[-v4,9/9] migrate_pages: move THP/hugetlb migration support check to simplify code
Commit Message
This is a code cleanup patch, no functionality change is expected.
After the change, the line number reduces especially in the long
migrate_pages_batch().
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Suggested-by: Alistair Popple <apopple@nvidia.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Bharata B Rao <bharata@amd.com>
Cc: haoxin <xhao@linux.alibaba.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
mm/migrate.c | 83 +++++++++++++++++++++++-----------------------------
1 file changed, 36 insertions(+), 47 deletions(-)
Comments
On 6 Feb 2023, at 1:33, Huang Ying wrote:
> This is a code cleanup patch, no functionality change is expected.
> After the change, the line number reduces especially in the long
> migrate_pages_batch().
>
> Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
> Suggested-by: Alistair Popple <apopple@nvidia.com>
> Cc: Zi Yan <ziy@nvidia.com>
> Cc: Yang Shi <shy828301@gmail.com>
> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Matthew Wilcox <willy@infradead.org>
> Cc: Bharata B Rao <bharata@amd.com>
> Cc: haoxin <xhao@linux.alibaba.com>
> Cc: Minchan Kim <minchan@kernel.org>
> Cc: Mike Kravetz <mike.kravetz@oracle.com>
> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> ---
> mm/migrate.c | 83 +++++++++++++++++++++++-----------------------------
> 1 file changed, 36 insertions(+), 47 deletions(-)
>
LGTM. Thanks. Reviewed-by: Zi Yan <ziy@nvidia.com>
--
Best Regards,
Yan, Zi
@@ -1099,9 +1099,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
bool locked = false;
bool dst_locked = false;
- if (!thp_migration_supported() && folio_test_transhuge(src))
- return -ENOSYS;
-
if (folio_ref_count(src) == 1) {
/* Folio was freed from under us. So we are done. */
folio_clear_active(src);
@@ -1359,16 +1356,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
- /*
- * Migratability of hugepages depends on architectures and their size.
- * This check is necessary because some callers of hugepage migration
- * like soft offline and memory hotremove don't walk through page
- * tables or check whether the hugepage is pmd-based or not before
- * kicking migration.
- */
- if (!hugepage_migration_supported(page_hstate(hpage)))
- return -ENOSYS;
-
if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
putback_active_hugepage(hpage);
@@ -1535,6 +1522,20 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
cond_resched();
+ /*
+ * Migratability of hugepages depends on architectures and
+ * their size. This check is necessary because some callers
+ * of hugepage migration like soft offline and memory
+ * hotremove don't walk through page tables or check whether
+ * the hugepage is pmd-based or not before kicking migration.
+ */
+ if (!hugepage_migration_supported(folio_hstate(folio))) {
+ nr_failed++;
+ stats->nr_failed_pages += nr_pages;
+ list_move_tail(&folio->lru, ret_folios);
+ continue;
+ }
+
rc = unmap_and_move_huge_page(get_new_page,
put_new_page, private,
&folio->page, pass > 2, mode,
@@ -1544,16 +1545,9 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
* Success: hugetlb folio will be put back
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
- * -ENOSYS: stay on the from list
* Other errno: put on ret_folios list
*/
switch(rc) {
- case -ENOSYS:
- /* Hugetlb migration is unsupported */
- nr_failed++;
- stats->nr_failed_pages += nr_pages;
- list_move_tail(&folio->lru, ret_folios);
- break;
case -ENOMEM:
/*
* When memory is low, don't bother to try to migrate
@@ -1639,6 +1633,28 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
cond_resched();
+ /*
+ * Large folio migration might be unsupported or
+ * the allocation might be failed so we should retry
+ * on the same folio with the large folio split
+ * to normal folios.
+ *
+ * Split folios are put in split_folios, and
+ * we will migrate them after the rest of the
+ * list is processed.
+ */
+ if (!thp_migration_supported() && is_thp) {
+ nr_large_failed++;
+ stats->nr_thp_failed++;
+ if (!try_split_folio(folio, &split_folios)) {
+ stats->nr_thp_split++;
+ continue;
+ }
+ stats->nr_failed_pages += nr_pages;
+ list_move_tail(&folio->lru, ret_folios);
+ continue;
+ }
+
rc = migrate_folio_unmap(get_new_page, put_new_page, private,
folio, &dst, pass > 2, force_lock,
mode, reason, ret_folios);
@@ -1650,36 +1666,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
* -EAGAIN: stay on the from list
* -EDEADLOCK: stay on the from list
* -ENOMEM: stay on the from list
- * -ENOSYS: stay on the from list
* Other errno: put on ret_folios list
*/
switch(rc) {
- /*
- * Large folio migration might be unsupported or
- * the allocation could've failed so we should retry
- * on the same folio with the large folio split
- * to normal folios.
- *
- * Split folios are put in split_folios, and
- * we will migrate them after the rest of the
- * list is processed.
- */
- case -ENOSYS:
- /* Large folio migration is unsupported */
- if (is_large) {
- nr_large_failed++;
- stats->nr_thp_failed += is_thp;
- if (!try_split_folio(folio, &split_folios)) {
- stats->nr_thp_split += is_thp;
- break;
- }
- } else if (!no_split_folio_counting) {
- nr_failed++;
- }
-
- stats->nr_failed_pages += nr_pages;
- list_move_tail(&folio->lru, ret_folios);
- break;
case -ENOMEM:
/*
* When memory is low, don't bother to try to migrate