From: Jann Horn <jannh@google.com>
When SLAB_VIRTUAL is enabled this new sysfs attribute tracks the number
of slab pages whose physical memory has been reclaimed but whose virtual
memory is still allocated to a kmem_cache.
Signed-off-by: Jann Horn <jannh@google.com>
Co-developed-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Matteo Rizzo <matteorizzo@google.com>
---
include/linux/slub_def.h | 4 +++-
mm/slub.c | 18 ++++++++++++++++++
2 files changed, 21 insertions(+), 1 deletion(-)
On Fri, Sep 15, 2023 at 10:59:31AM +0000, Matteo Rizzo wrote:
> From: Jann Horn <jannh@google.com>
>
> When SLAB_VIRTUAL is enabled this new sysfs attribute tracks the number
> of slab pages whose physical memory has been reclaimed but whose virtual
> memory is still allocated to a kmem_cache.
>
> Signed-off-by: Jann Horn <jannh@google.com>
Yay stats. :)
Reviewed-by: Kees Cook <keescook@chromium.org>
@@ -88,7 +88,7 @@ struct kmem_cache_cpu {
*/
struct kmem_cache_virtual {
#ifdef CONFIG_SLAB_VIRTUAL
- /* Protects freed_slabs and freed_slabs_min */
+ /* Protects freed_slabs, freed_slabs_min, and nr_free_pages */
spinlock_t freed_slabs_lock;
/*
* Slabs on this list have virtual memory of size oo allocated to them
@@ -97,6 +97,8 @@ struct kmem_cache_virtual {
struct list_head freed_slabs;
/* Same as freed_slabs but with memory of size min */
struct list_head freed_slabs_min;
+ /* Number of slab pages which got freed */
+ unsigned long nr_freed_pages;
#endif
};
@@ -2110,6 +2110,8 @@ static struct slab *get_free_slab(struct kmem_cache *s,
if (likely(slab)) {
list_del(&slab->slab_list);
+ WRITE_ONCE(s->virtual.nr_freed_pages,
+ s->virtual.nr_freed_pages - (1UL << slab_order(slab)));
spin_unlock_irqrestore(&s->virtual.freed_slabs_lock, flags);
return slab;
@@ -2158,6 +2160,8 @@ static struct slab *alloc_slab_page(struct kmem_cache *s,
/* Rollback: put the struct slab back. */
spin_lock_irqsave(&s->virtual.freed_slabs_lock, flags);
list_add(&slab->slab_list, freed_slabs);
+ WRITE_ONCE(s->virtual.nr_freed_pages,
+ s->virtual.nr_freed_pages + (1UL << slab_order(slab)));
spin_unlock_irqrestore(&s->virtual.freed_slabs_lock, flags);
return NULL;
@@ -2438,6 +2442,8 @@ static void slub_tlbflush_worker(struct kthread_work *work)
WARN_ON(oo_order(slab->oo) != oo_order(s->min));
list_add(&slab->slab_list, &s->virtual.freed_slabs_min);
}
+ WRITE_ONCE(s->virtual.nr_freed_pages, s->virtual.nr_freed_pages +
+ (1UL << slab_order(slab)));
spin_unlock(&s->virtual.freed_slabs_lock);
}
spin_unlock_irqrestore(&slub_kworker_lock, irq_flags);
@@ -4924,6 +4930,7 @@ static inline void slab_virtual_open(struct kmem_cache *s)
spin_lock_init(&s->virtual.freed_slabs_lock);
INIT_LIST_HEAD(&s->virtual.freed_slabs);
INIT_LIST_HEAD(&s->virtual.freed_slabs_min);
+ s->virtual.nr_freed_pages = 0;
#endif
}
@@ -6098,6 +6105,14 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(objects_partial);
+#ifdef CONFIG_SLAB_VIRTUAL
+static ssize_t deallocated_pages_show(struct kmem_cache *s, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", READ_ONCE(s->virtual.nr_freed_pages));
+}
+SLAB_ATTR_RO(deallocated_pages);
+#endif /* CONFIG_SLAB_VIRTUAL */
+
static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
{
int objects = 0;
@@ -6424,6 +6439,9 @@ static struct attribute *slab_attrs[] = {
&min_partial_attr.attr,
&cpu_partial_attr.attr,
&objects_partial_attr.attr,
+#ifdef CONFIG_SLAB_VIRTUAL
+ &deallocated_pages_attr.attr,
+#endif
&partial_attr.attr,
&cpu_slabs_attr.attr,
&ctor_attr.attr,