[RFC,v2,4/6] slub: Simplify acquire_slab()

Message ID 20231021144317.3400916-5-chengming.zhou@linux.dev
State New
Headers
Series slub: Delay freezing of CPU partial slabs |

Commit Message

Chengming Zhou Oct. 21, 2023, 2:43 p.m. UTC
  From: Chengming Zhou <zhouchengming@bytedance.com>

Now the object == NULL is always true, simplify acquire_slab().

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 mm/slub.c | 13 ++++---------
 1 file changed, 4 insertions(+), 9 deletions(-)
  

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 61ee82ea21b6..9f0b80fefc70 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2222,8 +2222,7 @@  static void *alloc_single_from_new_slab(struct kmem_cache *s,
  * Returns a list of objects or NULL if it fails.
  */
 static inline void *acquire_slab(struct kmem_cache *s,
-		struct kmem_cache_node *n, struct slab *slab,
-		int mode)
+		struct kmem_cache_node *n, struct slab *slab)
 {
 	void *freelist;
 	unsigned long counters;
@@ -2239,12 +2238,8 @@  static inline void *acquire_slab(struct kmem_cache *s,
 	freelist = slab->freelist;
 	counters = slab->counters;
 	new.counters = counters;
-	if (mode) {
-		new.inuse = slab->objects;
-		new.freelist = NULL;
-	} else {
-		new.freelist = freelist;
-	}
+	new.inuse = slab->objects;
+	new.freelist = NULL;
 
 	VM_BUG_ON(new.frozen);
 	new.frozen = 1;
@@ -2306,7 +2301,7 @@  static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 		}
 
 		if (!object) {
-			t = acquire_slab(s, n, slab, object == NULL);
+			t = acquire_slab(s, n, slab);
 			if (t) {
 				*pc->slab = slab;
 				stat(s, ALLOC_FROM_PARTIAL);