[v2,1/2] mm: swap: enforce updating inuse_pages at the end of swap_range_free()

Message ID 20240124045113.415378-2-yosryahmed@google.com
State New
Headers
Series mm: zswap: simplify zswap_swapoff() |

Commit Message

Yosry Ahmed Jan. 24, 2024, 4:51 a.m. UTC
  In swap_range_free(), we update inuse_pages then do some cleanups (arch
invalidation, zswap invalidation, swap cache cleanups, etc). During
swapoff, try_to_unuse() checks that inuse_pages is 0 to make sure all
swap entries are freed. Make sure we only update inuse_pages after we
are done with the cleanups in swap_range_free(), and use the proper
memory barriers to enforce it. This makes sure that code following
try_to_unuse() can safely assume that swap_range_free() ran for all
entries in thr swapfile (e.g. swap cache cleanup, zswap_swapoff()).

In practice, this currently isn't a problem because swap_range_free() is
called with the swap info lock held, and the swapoff code happens to
spin for that after try_to_unuse(). However, this seems fragile and
unintentional, so make it more relable and future-proof. This also
facilitates a following simplification of zswap_swapoff().

Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
---
 mm/swapfile.c | 18 +++++++++++++++---
 1 file changed, 15 insertions(+), 3 deletions(-)
  

Comments

Huang, Ying Jan. 24, 2024, 5:20 a.m. UTC | #1
Yosry Ahmed <yosryahmed@google.com> writes:

> In swap_range_free(), we update inuse_pages then do some cleanups (arch
> invalidation, zswap invalidation, swap cache cleanups, etc). During
> swapoff, try_to_unuse() checks that inuse_pages is 0 to make sure all
> swap entries are freed. Make sure we only update inuse_pages after we
> are done with the cleanups in swap_range_free(), and use the proper
> memory barriers to enforce it. This makes sure that code following
> try_to_unuse() can safely assume that swap_range_free() ran for all
> entries in thr swapfile (e.g. swap cache cleanup, zswap_swapoff()).
>
> In practice, this currently isn't a problem because swap_range_free() is
> called with the swap info lock held, and the swapoff code happens to
> spin for that after try_to_unuse(). However, this seems fragile and
> unintentional, so make it more relable and future-proof. This also
> facilitates a following simplification of zswap_swapoff().
>
> Signed-off-by: Yosry Ahmed <yosryahmed@google.com>

LGTM, Thanks!

Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

> ---
>  mm/swapfile.c | 18 +++++++++++++++---
>  1 file changed, 15 insertions(+), 3 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index b11b6057d8b5f..0580bb3e34d77 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -737,8 +737,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
>  		if (was_full && (si->flags & SWP_WRITEOK))
>  			add_to_avail_list(si);
>  	}
> -	atomic_long_add(nr_entries, &nr_swap_pages);
> -	WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
>  	if (si->flags & SWP_BLKDEV)
>  		swap_slot_free_notify =
>  			si->bdev->bd_disk->fops->swap_slot_free_notify;
> @@ -752,6 +750,14 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
>  		offset++;
>  	}
>  	clear_shadow_from_swap_cache(si->type, begin, end);
> +
> +	/*
> +	 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
> +	 * only after the above cleanups are done.
> +	 */
> +	smp_wmb();
> +	atomic_long_add(nr_entries, &nr_swap_pages);
> +	WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
>  }
>  
>  static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
> @@ -2049,7 +2055,7 @@ static int try_to_unuse(unsigned int type)
>  	unsigned int i;
>  
>  	if (!READ_ONCE(si->inuse_pages))
> -		return 0;
> +		goto success;
>  
>  retry:
>  	retval = shmem_unuse(type);
> @@ -2130,6 +2136,12 @@ static int try_to_unuse(unsigned int type)
>  		return -EINTR;
>  	}
>  
> +success:
> +	/*
> +	 * Make sure that further cleanups after try_to_unuse() returns happen
> +	 * after swap_range_free() reduces si->inuse_pages to 0.
> +	 */
> +	smp_mb();
>  	return 0;
>  }
  

Patch

diff --git a/mm/swapfile.c b/mm/swapfile.c
index b11b6057d8b5f..0580bb3e34d77 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -737,8 +737,6 @@  static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 		if (was_full && (si->flags & SWP_WRITEOK))
 			add_to_avail_list(si);
 	}
-	atomic_long_add(nr_entries, &nr_swap_pages);
-	WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
 	if (si->flags & SWP_BLKDEV)
 		swap_slot_free_notify =
 			si->bdev->bd_disk->fops->swap_slot_free_notify;
@@ -752,6 +750,14 @@  static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 		offset++;
 	}
 	clear_shadow_from_swap_cache(si->type, begin, end);
+
+	/*
+	 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
+	 * only after the above cleanups are done.
+	 */
+	smp_wmb();
+	atomic_long_add(nr_entries, &nr_swap_pages);
+	WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
 }
 
 static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
@@ -2049,7 +2055,7 @@  static int try_to_unuse(unsigned int type)
 	unsigned int i;
 
 	if (!READ_ONCE(si->inuse_pages))
-		return 0;
+		goto success;
 
 retry:
 	retval = shmem_unuse(type);
@@ -2130,6 +2136,12 @@  static int try_to_unuse(unsigned int type)
 		return -EINTR;
 	}
 
+success:
+	/*
+	 * Make sure that further cleanups after try_to_unuse() returns happen
+	 * after swap_range_free() reduces si->inuse_pages to 0.
+	 */
+	smp_mb();
 	return 0;
 }