From: Kairui Song <kasong@tencent.com>
No longer needed after we switched to per entry swap readhead policy.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
include/linux/swap.h | 1 -
mm/swapfile.c | 11 -----------
2 files changed, 12 deletions(-)
Hi Kairui,
On Sun, Nov 19, 2023 at 11:48 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> No longer needed after we switched to per entry swap readhead policy.
This is a behavior change patch, better separate out from clean up
series for exposure and discussions.
I think the idea is reasonable. The policy should be made on device
level rather than system level. I saw Ying has some great feedback
regarding readahead cross device boundaries.
Chris
@@ -454,7 +454,6 @@ extern void free_pages_and_swap_cache(struct encoded_page **, int);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
-extern atomic_t nr_rotate_swap;
extern bool has_usable_swap(void);
/* Swap 50% full? Release swapcache more aggressively.. */
@@ -104,8 +104,6 @@ static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
/* Activity counter to indicate that a swapon or swapoff has occurred */
static atomic_t proc_poll_event = ATOMIC_INIT(0);
-atomic_t nr_rotate_swap = ATOMIC_INIT(0);
-
static struct swap_info_struct *swap_type_to_swap_info(int type)
{
if (type >= MAX_SWAPFILES)
@@ -2486,9 +2484,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p);
- if (!p->bdev || !bdev_nonrot(p->bdev))
- atomic_dec(&nr_rotate_swap);
-
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
spin_lock(&p->lock);
@@ -2990,7 +2985,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
struct swap_cluster_info *cluster_info = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
- bool inced_nr_rotate_swap = false;
if (swap_flags & ~SWAP_FLAGS_VALID)
return -EINVAL;
@@ -3112,9 +3106,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
cluster = per_cpu_ptr(p->percpu_cluster, cpu);
cluster_set_null(&cluster->index);
}
- } else {
- atomic_inc(&nr_rotate_swap);
- inced_nr_rotate_swap = true;
}
error = swap_cgroup_swapon(p->type, maxpages);
@@ -3218,8 +3209,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
spin_unlock(&swap_lock);
vfree(swap_map);
kvfree(cluster_info);
- if (inced_nr_rotate_swap)
- atomic_dec(&nr_rotate_swap);
if (swap_file)
filp_close(swap_file, NULL);
out: