At present, numa balance only support base page and PMD-mapped THP,
but we will expand to support to migrate large folio/pte-mapped THP
in the future, it is better to make migrate_misplaced_page() to take
a folio instead of a page, and rename it to migrate_misplaced_folio(),
it is a preparation, also this remove several compound_head() calls.
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
include/linux/migrate.h | 4 ++--
mm/huge_memory.c | 2 +-
mm/memory.c | 2 +-
mm/migrate.c | 39 +++++++++++++++++++++------------------
4 files changed, 25 insertions(+), 22 deletions(-)
@@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page)
}
#ifdef CONFIG_NUMA_BALANCING
-int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node);
#else
-static inline int migrate_misplaced_page(struct page *page,
+static inline int migrate_misplaced_folio(struct folio *folio,
struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
@@ -1540,7 +1540,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
spin_unlock(vmf->ptl);
writable = false;
- migrated = migrate_misplaced_page(page, vma, target_nid);
+ migrated = migrate_misplaced_folio(page_folio(page), vma, target_nid);
if (migrated) {
flags |= TNF_MIGRATED;
page_nid = target_nid;
@@ -4815,7 +4815,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
writable = false;
/* Migrate to the requested node */
- if (migrate_misplaced_page(page, vma, target_nid)) {
+ if (migrate_misplaced_folio(page_folio(page), vma, target_nid)) {
page_nid = target_nid;
flags |= TNF_MIGRATED;
} else {
@@ -2513,55 +2513,58 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
}
/*
- * Attempt to migrate a misplaced page to the specified destination
+ * Attempt to migrate a misplaced folio to the specified destination
* node. Caller is expected to have an elevated reference count on
- * the page that will be dropped by this function before returning.
+ * the folio that will be dropped by this function before returning.
*/
-int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
- int node)
+int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int isolated;
int nr_remaining;
unsigned int nr_succeeded;
LIST_HEAD(migratepages);
- int nr_pages = thp_nr_pages(page);
+ int nr_pages = folio_nr_pages(folio);
/*
- * Don't migrate file pages that are mapped in multiple processes
+ * Don't migrate file folios that are mapped in multiple processes
* with execute permissions as they are probably shared libraries.
+ * To check if the folio is shared, ideally we want to make sure
+ * every page is mapped to the same process. Doing that is very
+ * expensive, so check the estimated mapcount of the folio instead.
*/
- if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
+ if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
(vma->vm_flags & VM_EXEC))
goto out;
/*
- * Also do not migrate dirty pages as not all filesystems can move
- * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
+ * Also do not migrate dirty folios as not all filesystems can move
+ * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
*/
- if (page_is_file_lru(page) && PageDirty(page))
+ if (folio_is_file_lru(folio) && folio_test_dirty(folio))
goto out;
- isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
+ isolated = numamigrate_isolate_folio(pgdat, folio);
if (!isolated)
goto out;
- list_add(&page->lru, &migratepages);
+ list_add(&folio->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED, &nr_succeeded);
if (nr_remaining) {
if (!list_empty(&migratepages)) {
- list_del(&page->lru);
- mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
- page_is_file_lru(page), -nr_pages);
- putback_lru_page(page);
+ list_del(&folio->lru);
+ node_stat_mod_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio), -nr_pages);
+ folio_putback_lru(folio);
}
isolated = 0;
}
if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
- if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
+ if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
nr_succeeded);
}
@@ -2569,7 +2572,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
return isolated;
out:
- put_page(page);
+ folio_put(folio);
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */