[RESEND,2/3] mm/zsmalloc: remove migrate_write_lock_nested()
Commit Message
From: Chengming Zhou <zhouchengming@bytedance.com>
The migrate write lock is to protect the race between zspage migration
and zspage objects' map users.
We only need to lock out the map users of src zspage, not dst zspage,
which is safe to map by users concurrently, since we only need to do
obj_malloc() from dst zspage.
So we can remove the migrate_write_lock_nested() use case.
As we are here, cleanup the __zs_compact() by moving putback_zspage()
outside of migrate_write_unlock since we hold pool lock, no malloc or
free users can come in.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
mm/zsmalloc.c | 22 +++++-----------------
1 file changed, 5 insertions(+), 17 deletions(-)
Comments
On (24/02/20 11:36), Chengming Zhou wrote:
> From: Chengming Zhou <zhouchengming@bytedance.com>
>
> The migrate write lock is to protect the race between zspage migration
> and zspage objects' map users.
>
> We only need to lock out the map users of src zspage, not dst zspage,
> which is safe to map by users concurrently, since we only need to do
> obj_malloc() from dst zspage.
>
> So we can remove the migrate_write_lock_nested() use case.
>
> As we are here, cleanup the __zs_compact() by moving putback_zspage()
> outside of migrate_write_unlock since we hold pool lock, no malloc or
> free users can come in.
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
@@ -279,7 +279,6 @@ static void migrate_lock_init(struct zspage *zspage);
static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage);
static void migrate_write_lock(struct zspage *zspage);
-static void migrate_write_lock_nested(struct zspage *zspage);
static void migrate_write_unlock(struct zspage *zspage);
#ifdef CONFIG_COMPACTION
@@ -1727,11 +1726,6 @@ static void migrate_write_lock(struct zspage *zspage)
write_lock(&zspage->lock);
}
-static void migrate_write_lock_nested(struct zspage *zspage)
-{
- write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
-}
-
static void migrate_write_unlock(struct zspage *zspage)
{
write_unlock(&zspage->lock);
@@ -2003,19 +1997,17 @@ static unsigned long __zs_compact(struct zs_pool *pool,
dst_zspage = isolate_dst_zspage(class);
if (!dst_zspage)
break;
- migrate_write_lock(dst_zspage);
}
src_zspage = isolate_src_zspage(class);
if (!src_zspage)
break;
- migrate_write_lock_nested(src_zspage);
-
+ migrate_write_lock(src_zspage);
migrate_zspage(pool, src_zspage, dst_zspage);
- fg = putback_zspage(class, src_zspage);
migrate_write_unlock(src_zspage);
+ fg = putback_zspage(class, src_zspage);
if (fg == ZS_INUSE_RATIO_0) {
free_zspage(pool, class, src_zspage);
pages_freed += class->pages_per_zspage;
@@ -2025,7 +2017,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
|| spin_is_contended(&pool->lock)) {
putback_zspage(class, dst_zspage);
- migrate_write_unlock(dst_zspage);
dst_zspage = NULL;
spin_unlock(&pool->lock);
@@ -2034,15 +2025,12 @@ static unsigned long __zs_compact(struct zs_pool *pool,
}
}
- if (src_zspage) {
+ if (src_zspage)
putback_zspage(class, src_zspage);
- migrate_write_unlock(src_zspage);
- }
- if (dst_zspage) {
+ if (dst_zspage)
putback_zspage(class, dst_zspage);
- migrate_write_unlock(dst_zspage);
- }
+
spin_unlock(&pool->lock);
return pages_freed;