[v1,4/4] mm/compaction: optimize >0 order folio compaction by sorting source pages.

Message ID 20231113170157.280181-5-zi.yan@sent.com
State New
Headers
Series Enable >0 order folio memory compaction |

Commit Message

Zi Yan Nov. 13, 2023, 5:01 p.m. UTC
  From: Zi Yan <ziy@nvidia.com>

It should maximize high order free page use and minimize free page splits.
It might be useful before free page merging is implemented.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 mm/compaction.c | 33 +++++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)
  

Patch

diff --git a/mm/compaction.c b/mm/compaction.c
index 9c083e6b399a..91809bee5422 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -127,6 +127,37 @@  static unsigned long release_free_list(struct page_list *freepages)
 }
 
 #ifdef CONFIG_COMPACTION
+
+static void sort_folios_by_order(struct list_head *pages)
+{
+	struct page_list page_list[MAX_ORDER + 1];
+	int order;
+	struct folio *folio, *next;
+
+	for (order = 0; order <= MAX_ORDER; order++)
+		init_page_list(&page_list[order]);
+
+	list_for_each_entry_safe(folio, next, pages, lru) {
+		order = folio_order(folio);
+
+		if (order > MAX_ORDER)
+			continue;
+
+		list_move(&folio->lru, &page_list[order].pages);
+		page_list[order].nr_pages++;
+	}
+
+	for (order = MAX_ORDER; order >= 0; order--) {
+		if (page_list[order].nr_pages) {
+
+			list_for_each_entry_safe(folio, next,
+						 &page_list[order].pages, lru) {
+				list_move_tail(&folio->lru, pages);
+			}
+		}
+	}
+}
+
 bool PageMovable(struct page *page)
 {
 	const struct movable_operations *mops;
@@ -2639,6 +2670,8 @@  compact_zone(struct compact_control *cc, struct capture_control *capc)
 				pageblock_start_pfn(cc->migrate_pfn - 1));
 		}
 
+		sort_folios_by_order(&cc->migratepages);
+
 		err = migrate_pages(&cc->migratepages, compaction_alloc,
 				compaction_free, (unsigned long)cc, cc->mode,
 				MR_COMPACTION, &nr_succeeded);