On 22 Jan 2024, at 22:46, Zi Yan wrote:
> From: Zi Yan <ziy@nvidia.com>
>
> Before last commit, memory compaction only migrates order-0 folios and
> skips >0 order folios. Last commit splits all >0 order folios during
> compaction. This commit migrates >0 order folios during compaction by
> keeping isolated free pages at their original size without splitting them
> into order-0 pages and using them directly during migration process.
>
> What is different from the prior implementation:
> 1. All isolated free pages are kept in a NR_PAGE_ORDERS array of page
> lists, where each page list stores free pages in the same order.
> 2. All free pages are not post_alloc_hook() processed nor buddy pages,
> although their orders are stored in first page's private like buddy
> pages.
> 3. During migration, in new page allocation time (i.e., in
> compaction_alloc()), free pages are then processed by post_alloc_hook().
> When migration fails and a new page is returned (i.e., in
> compaction_free()), free pages are restored by reversing the
> post_alloc_hook() operations using newly added
> free_pages_prepare_fpi_none().
>
> Step 3 is done for a latter optimization that splitting and/or merging free
> pages during compaction becomes easier.
>
> Note: without splitting free pages, compaction can end prematurely due to
> migration will return -ENOMEM even if there is free pages. This happens
> when no order-0 free page exist and compaction_alloc() return NULL.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> mm/compaction.c | 148 +++++++++++++++++++++++++++++-------------------
> mm/internal.h | 9 ++-
> mm/page_alloc.c | 6 ++
> 3 files changed, 103 insertions(+), 60 deletions(-)
>
<snip>
> @@ -1462,7 +1489,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn)
> if (!page)
> return;
>
> - isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
> + isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false);
>
> /* Skip this pageblock in the future as it's full or nearly full */
> if (start_pfn == end_pfn && !cc->no_set_skip_hint)
> @@ -1591,7 +1618,7 @@ static void fast_isolate_freepages(struct compact_control *cc)
> nr_scanned += nr_isolated - 1;
> total_isolated += nr_isolated;
> cc->nr_freepages += nr_isolated;
> - list_add_tail(&page->lru, &cc->freepages);
> + list_add_tail(&page->lru, &cc->freepages[order].pages);
I did not increase nr_pages here, so compaction_alloc() thought no free page
was isolated.
This is the fix:
diff --git a/mm/compaction.c b/mm/compaction.c
index 335a6f6787e4..fa9993c8a389 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1638,6 +1638,7 @@ static void fast_isolate_freepages(struct compact_control *cc)
total_isolated += nr_isolated;
cc->nr_freepages += nr_isolated;
list_add_tail(&page->lru, &cc->freepages[order].pages);
+ cc->freepages[order].nr_pages++;
count_compact_events(COMPACTISOLATED, nr_isolated);
} else {
/* If isolation fails, abort the search */
I will send out v3 once I rerun vm-scalability and thpcompact.
--
Best Regards,
Yan, Zi
@@ -66,45 +66,67 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
#endif
-static unsigned long release_freepages(struct list_head *freelist)
+static void init_page_list(struct page_list *p)
{
- struct page *page, *next;
- unsigned long high_pfn = 0;
-
- list_for_each_entry_safe(page, next, freelist, lru) {
- unsigned long pfn = page_to_pfn(page);
- list_del(&page->lru);
- __free_page(page);
- if (pfn > high_pfn)
- high_pfn = pfn;
- }
-
- return high_pfn;
+ INIT_LIST_HEAD(&p->pages);
+ p->nr_pages = 0;
}
-static void split_map_pages(struct list_head *list)
+static void split_map_pages(struct page_list *freepages)
{
- unsigned int i, order, nr_pages;
+ unsigned int i, order, total_nr_pages;
struct page *page, *next;
LIST_HEAD(tmp_list);
- list_for_each_entry_safe(page, next, list, lru) {
- list_del(&page->lru);
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ total_nr_pages = freepages[order].nr_pages * (1 << order);
+ freepages[order].nr_pages = 0;
+
+ list_for_each_entry_safe(page, next, &freepages[order].pages, lru) {
+ unsigned int nr_pages;
+
+ list_del(&page->lru);
- order = page_private(page);
- nr_pages = 1 << order;
+ nr_pages = 1 << order;
- post_alloc_hook(page, order, __GFP_MOVABLE);
- if (order)
- split_page(page, order);
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ if (order)
+ split_page(page, order);
- for (i = 0; i < nr_pages; i++) {
- list_add(&page->lru, &tmp_list);
- page++;
+ for (i = 0; i < nr_pages; i++) {
+ list_add(&page->lru, &tmp_list);
+ page++;
+ }
}
+ freepages[0].nr_pages += total_nr_pages;
+ list_splice_init(&tmp_list, &freepages[0].pages);
}
+}
- list_splice(&tmp_list, list);
+static unsigned long release_free_list(struct page_list *freepages)
+{
+ int order;
+ unsigned long high_pfn = 0;
+
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, &freepages[order].pages, lru) {
+ unsigned long pfn = page_to_pfn(page);
+
+ list_del(&page->lru);
+ /*
+ * Convert free pages into post allocation pages, so
+ * that we can free them via __free_page.
+ */
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ __free_pages(page, order);
+ if (pfn > high_pfn)
+ high_pfn = pfn;
+ }
+ freepages[order].nr_pages = 0;
+ }
+ return high_pfn;
}
#ifdef CONFIG_COMPACTION
@@ -583,7 +605,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long *start_pfn,
unsigned long end_pfn,
- struct list_head *freelist,
+ struct page_list *freelist,
unsigned int stride,
bool strict)
{
@@ -657,7 +679,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
nr_scanned += isolated - 1;
total_isolated += isolated;
cc->nr_freepages += isolated;
- list_add_tail(&page->lru, freelist);
+ list_add_tail(&page->lru, &freelist[order].pages);
+ freelist[order].nr_pages++;
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
blockpfn += isolated;
@@ -722,7 +745,11 @@ isolate_freepages_range(struct compact_control *cc,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
- LIST_HEAD(freelist);
+ int order;
+ struct page_list tmp_freepages[NR_PAGE_ORDERS];
+
+ for (order = 0; order < NR_PAGE_ORDERS; order++)
+ init_page_list(&tmp_freepages[order]);
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
@@ -753,7 +780,7 @@ isolate_freepages_range(struct compact_control *cc,
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, &freelist, 0, true);
+ block_end_pfn, tmp_freepages, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
@@ -770,15 +797,15 @@ isolate_freepages_range(struct compact_control *cc,
*/
}
- /* __isolate_free_page() does not map the pages */
- split_map_pages(&freelist);
-
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
- release_freepages(&freelist);
+ release_free_list(tmp_freepages);
return 0;
}
+ /* __isolate_free_page() does not map the pages */
+ split_map_pages(tmp_freepages);
+
/* We don't use freelists for anything. */
return pfn;
}
@@ -1462,7 +1489,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn)
if (!page)
return;
- isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+ isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false);
/* Skip this pageblock in the future as it's full or nearly full */
if (start_pfn == end_pfn && !cc->no_set_skip_hint)
@@ -1591,7 +1618,7 @@ static void fast_isolate_freepages(struct compact_control *cc)
nr_scanned += nr_isolated - 1;
total_isolated += nr_isolated;
cc->nr_freepages += nr_isolated;
- list_add_tail(&page->lru, &cc->freepages);
+ list_add_tail(&page->lru, &cc->freepages[order].pages);
count_compact_events(COMPACTISOLATED, nr_isolated);
} else {
/* If isolation fails, abort the search */
@@ -1668,13 +1695,12 @@ static void isolate_freepages(struct compact_control *cc)
unsigned long isolate_start_pfn; /* exact pfn we start at */
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
- struct list_head *freelist = &cc->freepages;
unsigned int stride;
/* Try a small search of the free lists for a candidate */
fast_isolate_freepages(cc);
if (cc->nr_freepages)
- goto splitmap;
+ return;
/*
* Initialise the free scanner. The starting point is where we last
@@ -1734,7 +1760,7 @@ static void isolate_freepages(struct compact_control *cc)
/* Found a block suitable for isolating free pages from. */
nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, freelist, stride, false);
+ block_end_pfn, cc->freepages, stride, false);
/* Update the skip hint if the full pageblock was scanned */
if (isolate_start_pfn == block_end_pfn)
@@ -1775,10 +1801,6 @@ static void isolate_freepages(struct compact_control *cc)
* and the loop terminated due to isolate_start_pfn < low_pfn
*/
cc->free_pfn = isolate_start_pfn;
-
-splitmap:
- /* __isolate_free_page() does not map the pages */
- split_map_pages(freelist);
}
/*
@@ -1789,23 +1811,22 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
struct folio *dst;
+ int order = folio_order(src);
- /* this makes migrate_pages() split the source page and retry */
- if (folio_test_large(src) > 0)
- return NULL;
-
- if (list_empty(&cc->freepages)) {
+ if (!cc->freepages[order].nr_pages) {
isolate_freepages(cc);
-
- if (list_empty(&cc->freepages))
+ if (!cc->freepages[order].nr_pages)
return NULL;
}
- dst = list_entry(cc->freepages.next, struct folio, lru);
+ dst = list_first_entry(&cc->freepages[order].pages, struct folio, lru);
+ cc->freepages[order].nr_pages--;
list_del(&dst->lru);
- cc->nr_freepages--;
-
- return dst;
+ post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
+ if (order)
+ prep_compound_page(&dst->page, order);
+ cc->nr_freepages -= 1 << order;
+ return page_rmappable_folio(&dst->page);
}
/*
@@ -1816,9 +1837,17 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data)
static void compaction_free(struct folio *dst, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
+ int order = folio_order(dst);
+ struct page *page = &dst->page;
+
+ folio_set_count(dst, 0);
+ free_pages_prepare_fpi_none(page, order);
- list_add(&dst->lru, &cc->freepages);
- cc->nr_freepages++;
+ INIT_LIST_HEAD(&dst->lru);
+
+ list_add(&dst->lru, &cc->freepages[order].pages);
+ cc->freepages[order].nr_pages++;
+ cc->nr_freepages += 1 << order;
}
/* possible outcome of isolate_migratepages */
@@ -2442,6 +2471,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
unsigned int nr_succeeded = 0;
+ int order;
/*
* These counters track activities during zone compaction. Initialize
@@ -2451,7 +2481,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->total_free_scanned = 0;
cc->nr_migratepages = 0;
cc->nr_freepages = 0;
- INIT_LIST_HEAD(&cc->freepages);
+ for (order = 0; order < NR_PAGE_ORDERS; order++)
+ init_page_list(&cc->freepages[order]);
INIT_LIST_HEAD(&cc->migratepages);
cc->migratetype = gfp_migratetype(cc->gfp_mask);
@@ -2637,7 +2668,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
* so we don't leave any returned pages behind in the next attempt.
*/
if (cc->nr_freepages > 0) {
- unsigned long free_pfn = release_freepages(&cc->freepages);
+ unsigned long free_pfn = release_free_list(cc->freepages);
cc->nr_freepages = 0;
VM_BUG_ON(free_pfn == 0);
@@ -2656,7 +2687,6 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
- VM_BUG_ON(!list_empty(&cc->freepages));
VM_BUG_ON(!list_empty(&cc->migratepages));
return ret;
@@ -447,6 +447,8 @@ extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
+extern bool free_pages_prepare_fpi_none(struct page *page, unsigned int order);
+
extern int user_min_free_kbytes;
extern void free_unref_page(struct page *page, unsigned int order);
@@ -473,6 +475,11 @@ int split_free_page(struct page *free_page,
/*
* in mm/compaction.c
*/
+
+struct page_list {
+ struct list_head pages;
+ unsigned long nr_pages;
+};
/*
* compact_control is used to track pages being migrated and the free pages
* they are being migrated to during memory compaction. The free_pfn starts
@@ -481,7 +488,7 @@ int split_free_page(struct page *free_page,
* completes when free_pfn <= migrate_pfn
*/
struct compact_control {
- struct list_head freepages; /* List of free pages to migrate to */
+ struct page_list freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
struct list_head migratepages; /* List of pages being migrated */
unsigned int nr_freepages; /* Number of isolated free pages */
unsigned int nr_migratepages; /* Number of pages to migrate */
@@ -1179,6 +1179,12 @@ static __always_inline bool free_pages_prepare(struct page *page,
return true;
}
+__always_inline bool free_pages_prepare_fpi_none(struct page *page,
+ unsigned int order)
+{
+ return free_pages_prepare(page, order, FPI_NONE);
+}
+
/*
* Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone.