[18/20] mm/slub: remove slab_alloc() and __kmem_cache_alloc_lru() wrappers

Message ID 20231113191340.17482-40-vbabka@suse.cz
State New
Headers
Series remove the SLAB allocator |

Commit Message

Vlastimil Babka Nov. 13, 2023, 7:13 p.m. UTC
  slab_alloc() is a thin wrapper around slab_alloc_node() with only one
caller.  Replace with direct call of slab_alloc_node().
__kmem_cache_alloc_lru() itself is a thin wrapper with two callers,
so replace it with direct calls of slab_alloc_node() and
trace_kmem_cache_alloc().

This also makes sure _RET_IP_ has always the expected value and not
depending on inlining decisions.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 28 +++++++++++-----------------
 1 file changed, 11 insertions(+), 17 deletions(-)
  

Comments

Kees Cook Nov. 14, 2023, 4:50 a.m. UTC | #1
On Mon, Nov 13, 2023 at 08:13:59PM +0100, Vlastimil Babka wrote:
> slab_alloc() is a thin wrapper around slab_alloc_node() with only one
> caller.  Replace with direct call of slab_alloc_node().
> __kmem_cache_alloc_lru() itself is a thin wrapper with two callers,
> so replace it with direct calls of slab_alloc_node() and
> trace_kmem_cache_alloc().

I'd have a sense that with 2 callers a wrapper is still useful?

> 
> This also makes sure _RET_IP_ has always the expected value and not
> depending on inlining decisions.
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> [...]
>  void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
>  {
> -	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
> +	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_,
> +				    s->object_size);
>  

Whitespace change here isn't mentioned in the commit log.

Regardless:

Reviewed-by: Kees Cook <keescook@chromium.org>
  
Vlastimil Babka Nov. 14, 2023, 8:31 p.m. UTC | #2
On 11/14/23 05:50, Kees Cook wrote:
> On Mon, Nov 13, 2023 at 08:13:59PM +0100, Vlastimil Babka wrote:
>> slab_alloc() is a thin wrapper around slab_alloc_node() with only one
>> caller.  Replace with direct call of slab_alloc_node().
>> __kmem_cache_alloc_lru() itself is a thin wrapper with two callers,
>> so replace it with direct calls of slab_alloc_node() and
>> trace_kmem_cache_alloc().
> 
> I'd have a sense that with 2 callers a wrapper is still useful?

Well it bothered me how many layers everything went through, it makes it
harder to comprehend the code.

>> 
>> This also makes sure _RET_IP_ has always the expected value and not
>> depending on inlining decisions.

And there's also this argument. We should evaluate _RET_IP_ in
kmem_cache_alloc() and kmem_cache_alloc_lru().

>> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
>> [...]
>>  void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
>>  {
>> -	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
>> +	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_,
>> +				    s->object_size);
>>  
> 
> Whitespace change here isn't mentioned in the commit log.

OK, will mention.

> Regardless:
> 
> Reviewed-by: Kees Cook <keescook@chromium.org>

Thanks!
  

Patch

diff --git a/mm/slub.c b/mm/slub.c
index b44243e7cc5e..d2363b91d55c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3821,39 +3821,33 @@  static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list
 	return object;
 }
 
-static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
-		gfp_t gfpflags, unsigned long addr, size_t orig_size)
-{
-	return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
-}
-
-static __fastpath_inline
-void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
-			     gfp_t gfpflags)
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
-	void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
+	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
+				    s->object_size);
 
 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
 
 	return ret;
 }
-
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
-{
-	return __kmem_cache_alloc_lru(s, NULL, gfpflags);
-}
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 			   gfp_t gfpflags)
 {
-	return __kmem_cache_alloc_lru(s, lru, gfpflags);
+	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
+				    s->object_size);
+
+	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
+
+	return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_lru);
 
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
-	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
+	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_,
+				    s->object_size);
 
 	trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);