[RFC,04/14] mm: use virt_to_slab instead of folio_slab
Commit Message
From: Jann Horn <jannh@google.com>
This is refactoring in preparation for the introduction of SLAB_VIRTUAL
which does not implement folio_slab.
With SLAB_VIRTUAL there is no longer a 1:1 correspondence between slabs
and pages of physical memory used by the slab allocator. There is no way
to look up the slab which corresponds to a specific page of physical
memory without iterating over all slabs or over the page tables. Instead
of doing that, we can look up the slab starting from its virtual address
which can still be performed cheaply with both SLAB_VIRTUAL enabled and
disabled.
Signed-off-by: Jann Horn <jannh@google.com>
Co-developed-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Matteo Rizzo <matteorizzo@google.com>
---
mm/memcontrol.c | 2 +-
mm/slab_common.c | 12 +++++++-----
mm/slub.c | 14 ++++++--------
3 files changed, 14 insertions(+), 14 deletions(-)
Comments
On Fri, Sep 15, 2023 at 10:59:23AM +0000, Matteo Rizzo wrote:
> From: Jann Horn <jannh@google.com>
>
> This is refactoring in preparation for the introduction of SLAB_VIRTUAL
> which does not implement folio_slab.
>
> With SLAB_VIRTUAL there is no longer a 1:1 correspondence between slabs
> and pages of physical memory used by the slab allocator. There is no way
> to look up the slab which corresponds to a specific page of physical
> memory without iterating over all slabs or over the page tables. Instead
> of doing that, we can look up the slab starting from its virtual address
> which can still be performed cheaply with both SLAB_VIRTUAL enabled and
> disabled.
>
> Signed-off-by: Jann Horn <jannh@google.com>
Refactoring continues to track.
Reviewed-by: Kees Cook <keescook@chromium.org>
@@ -2936,7 +2936,7 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
struct slab *slab;
unsigned int off;
- slab = folio_slab(folio);
+ slab = virt_to_slab(p);
objcgs = slab_objcgs(slab);
if (!objcgs)
return NULL;
@@ -1062,13 +1062,13 @@ void kfree(const void *object)
if (unlikely(ZERO_OR_NULL_PTR(object)))
return;
- folio = virt_to_folio(object);
if (unlikely(!is_slab_addr(object))) {
+ folio = virt_to_folio(object);
free_large_kmalloc(folio, (void *)object);
return;
}
- slab = folio_slab(folio);
+ slab = virt_to_slab(object);
s = slab->slab_cache;
__kmem_cache_free(s, (void *)object, _RET_IP_);
}
@@ -1089,12 +1089,13 @@ EXPORT_SYMBOL(kfree);
size_t __ksize(const void *object)
{
struct folio *folio;
+ struct kmem_cache *s;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
- folio = virt_to_folio(object);
if (unlikely(!is_slab_addr(object))) {
+ folio = virt_to_folio(object);
if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
return 0;
if (WARN_ON(object != folio_address(folio)))
@@ -1102,11 +1103,12 @@ size_t __ksize(const void *object)
return folio_size(folio);
}
+ s = virt_to_slab(object)->slab_cache;
#ifdef CONFIG_SLUB_DEBUG
- skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+ skip_orig_size_check(s, object);
#endif
- return slab_ksize(folio_slab(folio)->slab_cache);
+ return slab_ksize(s);
}
void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
@@ -3848,25 +3848,23 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
{
int lookahead = 3;
void *object;
- struct folio *folio;
+ struct slab *slab;
size_t same;
object = p[--size];
- folio = virt_to_folio(object);
+ slab = virt_to_slab(object);
if (!s) {
/* Handle kalloc'ed objects */
- if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, object);
+ if (unlikely(slab == NULL)) {
+ free_large_kmalloc(virt_to_folio(object), object);
df->slab = NULL;
return size;
}
- /* Derive kmem_cache from object */
- df->slab = folio_slab(folio);
- df->s = df->slab->slab_cache;
+ df->s = slab->slab_cache;
} else {
- df->slab = folio_slab(folio);
df->s = cache_from_obj(s, object); /* Support for memcg */
}
+ df->slab = slab;
/* Start new detached freelist */
df->tail = object;