[3/3] swap: Convert deactivate_page() to deactivate_folio()
Commit Message
Deactivate_page() has already been converted to use folios, this change
converts it to take in a folio argument instead of calling page_folio().
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
include/linux/swap.h | 2 +-
mm/damon/paddr.c | 2 +-
mm/madvise.c | 4 ++--
mm/swap.c | 14 ++++++--------
4 files changed, 10 insertions(+), 12 deletions(-)
Comments
On Tue, Dec 06, 2022 at 04:21:58PM -0800, Vishal Moola (Oracle) wrote:
> /*
> - * deactivate_page - deactivate a page
> - * @page: page to deactivate
> + * deactivate_folio - deactivate a folio
> + * @folio: folio to deactivate
> *
> - * deactivate_page() moves @page to the inactive list if @page was on the active
> - * list and was not an unevictable page. This is done to accelerate the reclaim
> - * of @page.
> + * deactivate_folio() moves @folio to the inactive list if @folio was on the
> + * active list and was not an unevictable page. This is done to accelerate
... and was not unevictable. This ...
> + * the reclaim of @folio.
> */
> -void deactivate_page(struct page *page)
> +void deactivate_folio(struct folio *folio)
> {
> - struct folio *folio = page_folio(page);
> -
> if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
> (folio_test_active(folio) || lru_gen_enabled())) {
> struct folio_batch *fbatch;
> --
> 2.38.1
>
>
@@ -409,7 +409,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void deactivate_page(struct page *page);
+extern void deactivate_folio(struct folio *folio);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
@@ -247,7 +247,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
if (mark_accessed)
folio_mark_accessed(folio);
else
- deactivate_page(&folio->page);
+ deactivate_folio(folio);
folio_put(folio);
applied += folio_nr_pages(folio);
}
@@ -397,7 +397,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
list_add(&folio->lru, &folio_list);
}
} else
- deactivate_page(&folio->page);
+ deactivate_folio(folio);
huge_unlock:
spin_unlock(ptl);
if (pageout)
@@ -487,7 +487,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
list_add(&folio->lru, &folio_list);
}
} else
- deactivate_page(&folio->page);
+ deactivate_folio(folio);
}
arch_leave_lazy_mmu_mode();
@@ -720,17 +720,15 @@ void deactivate_file_folio(struct folio *folio)
}
/*
- * deactivate_page - deactivate a page
- * @page: page to deactivate
+ * deactivate_folio - deactivate a folio
+ * @folio: folio to deactivate
*
- * deactivate_page() moves @page to the inactive list if @page was on the active
- * list and was not an unevictable page. This is done to accelerate the reclaim
- * of @page.
+ * deactivate_folio() moves @folio to the inactive list if @folio was on the
+ * active list and was not an unevictable page. This is done to accelerate
+ * the reclaim of @folio.
*/
-void deactivate_page(struct page *page)
+void deactivate_folio(struct folio *folio)
{
- struct folio *folio = page_folio(page);
-
if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
(folio_test_active(folio) || lru_gen_enabled())) {
struct folio_batch *fbatch;