[v5,4/7] mm: Separate move/undo doing on folio list from migrate_pages_batch()
Commit Message
Functionally, no change. This is a preparation for migrc mechanism that
requires to use separate folio lists for its own handling at migration.
Refactored migrate_pages_batch() and separated move and undo parts
operating on folio list, from migrate_pages_batch().
Signed-off-by: Byungchul Park <byungchul@sk.com>
---
mm/migrate.c | 134 +++++++++++++++++++++++++++++++--------------------
1 file changed, 83 insertions(+), 51 deletions(-)
Comments
On Thu, Jan 11, 2024 at 03:07:54PM +0900, Byungchul Park wrote:
> +static void migrate_folios_move(struct list_head *src_folios,
> + struct list_head *dst_folios,
> + free_folio_t put_new_folio, unsigned long private,
> + enum migrate_mode mode, int reason,
> + struct list_head *ret_folios,
> + struct migrate_pages_stats *stats,
> + int *retry, int *thp_retry, int *nr_failed,
> + int *nr_retry_pages)
> +{
> + struct folio *folio, *folio2, *dst, *dst2;
> + bool is_thp;
> + int nr_pages;
> + int rc;
> +
> + dst = list_first_entry(dst_folios, struct folio, lru);
> + dst2 = list_next_entry(dst, lru);
> + list_for_each_entry_safe(folio, folio2, src_folios, lru) {
> + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
You don't need to call folio_test_large() first. folio_order() includes
a call to test_large() so it can return 0.
> + nr_pages = folio_nr_pages(folio);
.. or since you're calculating this anyway,
is_thp = nr_pages >= HPAGE_PMD_NR
perhaps
On Thu, Jan 11, 2024 at 03:42:55PM +0000, Matthew Wilcox wrote:
> On Thu, Jan 11, 2024 at 03:07:54PM +0900, Byungchul Park wrote:
> > +static void migrate_folios_move(struct list_head *src_folios,
> > + struct list_head *dst_folios,
> > + free_folio_t put_new_folio, unsigned long private,
> > + enum migrate_mode mode, int reason,
> > + struct list_head *ret_folios,
> > + struct migrate_pages_stats *stats,
> > + int *retry, int *thp_retry, int *nr_failed,
> > + int *nr_retry_pages)
> > +{
> > + struct folio *folio, *folio2, *dst, *dst2;
> > + bool is_thp;
> > + int nr_pages;
> > + int rc;
> > +
> > + dst = list_first_entry(dst_folios, struct folio, lru);
> > + dst2 = list_next_entry(dst, lru);
> > + list_for_each_entry_safe(folio, folio2, src_folios, lru) {
> > + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
>
> You don't need to call folio_test_large() first. folio_order() includes
> a call to test_large() so it can return 0.
>
> > + nr_pages = folio_nr_pages(folio);
>
> ... or since you're calculating this anyway,
>
> is_thp = nr_pages >= HPAGE_PMD_NR
Cool. Thanks.
Byungchul
> perhaps
On Mon, Jan 15, 2024 at 11:08:18AM +0900, Byungchul Park wrote:
> On Thu, Jan 11, 2024 at 03:42:55PM +0000, Matthew Wilcox wrote:
> > On Thu, Jan 11, 2024 at 03:07:54PM +0900, Byungchul Park wrote:
> > > +static void migrate_folios_move(struct list_head *src_folios,
> > > + struct list_head *dst_folios,
> > > + free_folio_t put_new_folio, unsigned long private,
> > > + enum migrate_mode mode, int reason,
> > > + struct list_head *ret_folios,
> > > + struct migrate_pages_stats *stats,
> > > + int *retry, int *thp_retry, int *nr_failed,
> > > + int *nr_retry_pages)
> > > +{
> > > + struct folio *folio, *folio2, *dst, *dst2;
> > > + bool is_thp;
> > > + int nr_pages;
> > > + int rc;
> > > +
> > > + dst = list_first_entry(dst_folios, struct folio, lru);
> > > + dst2 = list_next_entry(dst, lru);
> > > + list_for_each_entry_safe(folio, folio2, src_folios, lru) {
> > > + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
JFYI, in the v4 patch set, I hadn't changed the original code that I
refactored. I just copied and pasted this part from the original code.
> >
> > You don't need to call folio_test_large() first. folio_order() includes
> > a call to test_large() so it can return 0.
> >
> > > + nr_pages = folio_nr_pages(folio);
> >
> > ... or since you're calculating this anyway,
> >
> > is_thp = nr_pages >= HPAGE_PMD_NR
I didn't apply what you suggested me here to the next version yet
because I was not sure if it'd make the code more readable.
Thanks anyway!
Byungchul
> Cool. Thanks.
>
> Byungchul
>
> > perhaps
@@ -1611,6 +1611,81 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
return nr_failed;
}
+static void migrate_folios_move(struct list_head *src_folios,
+ struct list_head *dst_folios,
+ free_folio_t put_new_folio, unsigned long private,
+ enum migrate_mode mode, int reason,
+ struct list_head *ret_folios,
+ struct migrate_pages_stats *stats,
+ int *retry, int *thp_retry, int *nr_failed,
+ int *nr_retry_pages)
+{
+ struct folio *folio, *folio2, *dst, *dst2;
+ bool is_thp;
+ int nr_pages;
+ int rc;
+
+ dst = list_first_entry(dst_folios, struct folio, lru);
+ dst2 = list_next_entry(dst, lru);
+ list_for_each_entry_safe(folio, folio2, src_folios, lru) {
+ is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
+ nr_pages = folio_nr_pages(folio);
+
+ cond_resched();
+
+ rc = migrate_folio_move(put_new_folio, private,
+ folio, dst, mode,
+ reason, ret_folios);
+ /*
+ * The rules are:
+ * Success: folio will be freed
+ * -EAGAIN: stay on the unmap_folios list
+ * Other errno: put on ret_folios list
+ */
+ switch(rc) {
+ case -EAGAIN:
+ *retry += 1;
+ *thp_retry += is_thp;
+ *nr_retry_pages += nr_pages;
+ break;
+ case MIGRATEPAGE_SUCCESS:
+ stats->nr_succeeded += nr_pages;
+ stats->nr_thp_succeeded += is_thp;
+ break;
+ default:
+ *nr_failed += 1;
+ stats->nr_thp_failed += is_thp;
+ stats->nr_failed_pages += nr_pages;
+ break;
+ }
+ dst = dst2;
+ dst2 = list_next_entry(dst, lru);
+ }
+}
+
+static void migrate_folios_undo(struct list_head *src_folios,
+ struct list_head *dst_folios,
+ free_folio_t put_new_folio, unsigned long private,
+ struct list_head *ret_folios)
+{
+ struct folio *folio, *folio2, *dst, *dst2;
+
+ dst = list_first_entry(dst_folios, struct folio, lru);
+ dst2 = list_next_entry(dst, lru);
+ list_for_each_entry_safe(folio, folio2, src_folios, lru) {
+ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+
+ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+ anon_vma, true, ret_folios);
+ list_del(&dst->lru);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
+ dst = dst2;
+ dst2 = list_next_entry(dst, lru);
+ }
+}
+
/*
* migrate_pages_batch() first unmaps folios in the from list as many as
* possible, then move the unmapped folios.
@@ -1633,7 +1708,7 @@ static int migrate_pages_batch(struct list_head *from,
int pass = 0;
bool is_thp = false;
bool is_large = false;
- struct folio *folio, *folio2, *dst = NULL, *dst2;
+ struct folio *folio, *folio2, *dst = NULL;
int rc, rc_saved = 0, nr_pages;
LIST_HEAD(unmap_folios);
LIST_HEAD(dst_folios);
@@ -1769,42 +1844,11 @@ static int migrate_pages_batch(struct list_head *from,
thp_retry = 0;
nr_retry_pages = 0;
- dst = list_first_entry(&dst_folios, struct folio, lru);
- dst2 = list_next_entry(dst, lru);
- list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
- is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
- nr_pages = folio_nr_pages(folio);
-
- cond_resched();
-
- rc = migrate_folio_move(put_new_folio, private,
- folio, dst, mode,
- reason, ret_folios);
- /*
- * The rules are:
- * Success: folio will be freed
- * -EAGAIN: stay on the unmap_folios list
- * Other errno: put on ret_folios list
- */
- switch(rc) {
- case -EAGAIN:
- retry++;
- thp_retry += is_thp;
- nr_retry_pages += nr_pages;
- break;
- case MIGRATEPAGE_SUCCESS:
- stats->nr_succeeded += nr_pages;
- stats->nr_thp_succeeded += is_thp;
- break;
- default:
- nr_failed++;
- stats->nr_thp_failed += is_thp;
- stats->nr_failed_pages += nr_pages;
- break;
- }
- dst = dst2;
- dst2 = list_next_entry(dst, lru);
- }
+ /* Move the unmapped folios */
+ migrate_folios_move(&unmap_folios, &dst_folios,
+ put_new_folio, private, mode, reason,
+ ret_folios, stats, &retry, &thp_retry,
+ &nr_failed, &nr_retry_pages);
}
nr_failed += retry;
stats->nr_thp_failed += thp_retry;
@@ -1813,20 +1857,8 @@ static int migrate_pages_batch(struct list_head *from,
rc = rc_saved ? : nr_failed;
out:
/* Cleanup remaining folios */
- dst = list_first_entry(&dst_folios, struct folio, lru);
- dst2 = list_next_entry(dst, lru);
- list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
- int old_page_state = 0;
- struct anon_vma *anon_vma = NULL;
-
- __migrate_folio_extract(dst, &old_page_state, &anon_vma);
- migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
- anon_vma, true, ret_folios);
- list_del(&dst->lru);
- migrate_folio_undo_dst(dst, true, put_new_folio, private);
- dst = dst2;
- dst2 = list_next_entry(dst, lru);
- }
+ migrate_folios_undo(&unmap_folios, &dst_folios,
+ put_new_folio, private, ret_folios);
return rc;
}