[v2] zram: add size class equals check into recompression
Commit Message
It makes no sense for us to recompress the object if it will
be in the same size class. We anyway don't get any memory gain.
But, at the same time, we get a CPU time overhead when inserting
this object into zspage and decompressing it afterwards.
Signed-off-by: Alexey Romanov <avromanov@sberdevices.ru>
---
drivers/block/zram/zram_drv.c | 5 +++++
include/linux/zsmalloc.h | 2 ++
mm/zsmalloc.c | 21 +++++++++++++++++++++
3 files changed, 28 insertions(+)
Comments
On (22/10/25 19:26), Alexey Romanov wrote:
> +/**
> + * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
> + * that hold objects of the provided size.
> + * @pool: zsmalloc pool to use
> + * @size: object size
> + *
> + * Context: Any context.
> + *
> + * Return: the index of the zsmalloc &size_class that hold objects of the
> + * provided size.
> + */
> +unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
> +{
> + struct size_class *class;
> +
> + class = pool->size_class[get_size_class_index(pool, size)];
> +
> + return class->index;
> +}
> +EXPORT_SYMBOL_GPL(zs_lookup_class_index);
I cherry-picked it with one tiny tweak: I want this to be ahead of
my series (break dependency on my series). So I removed pool parameter
from `get_size_class_index(pool, size)` in this patch and add it back
in my series (when I change get_size_class_index() prototype).
@@ -1671,6 +1671,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
unsigned long handle_next;
unsigned int comp_len_next;
unsigned int comp_len_prev;
+ unsigned int class_index_prev;
+ unsigned int class_index_next;
struct zcomp_strm *zstrm;
void *src, *dst;
int ret;
@@ -1695,6 +1697,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
ret = zcomp_compress(zstrm, src, &comp_len_next);
kunmap_atomic(src);
+ class_index_prev = zs_lookup_class_index(zram->mem_pool, comp_len_prev);
+ class_index_next = zs_lookup_class_index(zram->mem_pool, comp_len_next);
/*
* Either a compression error or we failed to compressed the object
* in a way that will save us memory. Mark the object so that we
@@ -1702,6 +1706,7 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
*/
if (comp_len_next >= huge_class_size ||
comp_len_next >= comp_len_prev ||
+ class_index_next >= class_index_prev ||
ret) {
zram_set_flag(zram, index, ZRAM_RECOMP_SKIP);
zram_clear_flag(zram, index, ZRAM_IDLE);
@@ -68,5 +68,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
+unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
+
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
#endif
@@ -1209,6 +1209,27 @@ static bool zspage_full(struct size_class *class, struct zspage *zspage)
return get_zspage_inuse(zspage) == class->objs_per_zspage;
}
+/**
+ * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
+ * that hold objects of the provided size.
+ * @pool: zsmalloc pool to use
+ * @size: object size
+ *
+ * Context: Any context.
+ *
+ * Return: the index of the zsmalloc &size_class that hold objects of the
+ * provided size.
+ */
+unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
+{
+ struct size_class *class;
+
+ class = pool->size_class[get_size_class_index(pool, size)];
+
+ return class->index;
+}
+EXPORT_SYMBOL_GPL(zs_lookup_class_index);
+
unsigned long zs_get_total_pages(struct zs_pool *pool)
{
return atomic_long_read(&pool->pages_allocated);