[v5,2/2] mm: Init page count in reserve_bootmem_region when MEMINIT_EARLY

Message ID 20230929170026.2520216-3-yajun.deng@linux.dev
State New
Headers
Series mm: Don't init and clear page count when MEMINIT_EARLY |

Commit Message

Yajun Deng Sept. 29, 2023, 5 p.m. UTC
  memmap_init_range() would init page count of all pages, but the free
pages count would be reset in __free_pages_core(). There are opposite
operations. It's unnecessary and time-consuming when it's MEMINIT_EARLY
context.

Init page count in reserve_bootmem_region when in MEMINIT_EARLY context,
and check the page count before reset it.

At the same time, the INIT_LIST_HEAD in reserve_bootmem_region isn't
need, as it already done in __init_single_page.

The following data was tested on an x86 machine with 190GB of RAM.

before:
free_low_memory_core_early()    341ms

after:
free_low_memory_core_early()    285ms

Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
---
v5: add flags in memmap_init_range.
v4: same with v2.
v3: same with v2.
v2: check page count instead of check context before reset it.
v1: https://lore.kernel.org/all/20230922070923.355656-1-yajun.deng@linux.dev/
---
 mm/mm_init.c    | 20 +++++++++++++++-----
 mm/page_alloc.c | 20 ++++++++++++--------
 2 files changed, 27 insertions(+), 13 deletions(-)
  

Patch

diff --git a/mm/mm_init.c b/mm/mm_init.c
index 0549e7c3d588..f84f1ede57c6 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -718,7 +718,7 @@  static void __meminit init_reserved_page(unsigned long pfn, int nid)
 		if (zone_spans_pfn(zone, pfn))
 			break;
 	}
-	__init_single_page(pfn_to_page(pfn), pfn, zid, nid, INIT_PAGE_COUNT);
+	__init_single_page(pfn_to_page(pfn), pfn, zid, nid, 0);
 }
 #else
 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
@@ -756,8 +756,11 @@  void __meminit reserve_bootmem_region(phys_addr_t start,
 
 			init_reserved_page(start_pfn, nid);
 
-			/* Avoid false-positive PageTail() */
-			INIT_LIST_HEAD(&page->lru);
+			/*
+			 * We didn't init page count in memmap_init_range when
+			 * MEMINIT_EARLY, so it must init page count here.
+			 */
+			init_page_count(page);
 
 			/*
 			 * no need for atomic set_bit because the struct
@@ -850,6 +853,7 @@  void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
 		struct vmem_altmap *altmap, int migratetype)
 {
 	unsigned long pfn, end_pfn = start_pfn + size;
+	enum page_init_flags flags = 0;
 	struct page *page;
 
 	if (highest_memmap_pfn < end_pfn - 1)
@@ -888,9 +892,15 @@  void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
 		}
 
 		page = pfn_to_page(pfn);
-		__init_single_page(page, pfn, zone, nid, INIT_PAGE_COUNT);
+
+		/* If the context is MEMINIT_EARLY, we will init page count and
+		 * mark page reserved in reserve_bootmem_region, the free region
+		 * wouldn't have page count and we will check the pages count
+		 * in __free_pages_core.
+		 */
 		if (context == MEMINIT_HOTPLUG)
-			__SetPageReserved(page);
+			flags = INIT_PAGE_COUNT | INIT_PAGE_RESERVED;
+		__init_single_page(page, pfn, zone, nid, flags);
 
 		/*
 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7df77b58a961..bc68b5452d01 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1289,18 +1289,22 @@  void __free_pages_core(struct page *page, unsigned int order)
 	unsigned int loop;
 
 	/*
-	 * When initializing the memmap, __init_single_page() sets the refcount
-	 * of all pages to 1 ("allocated"/"not free"). We have to set the
-	 * refcount of all involved pages to 0.
+	 * When initializing the memmap, memmap_init_range sets the refcount
+	 * of all pages to 1 ("reserved" and "free") in hotplug context. We
+	 * have to set the refcount of all involved pages to 0. Otherwise,
+	 * we don't do it, as reserve_bootmem_region only set the refcount on
+	 * reserve region ("reserved") in early context.
 	 */
-	prefetchw(p);
-	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
-		prefetchw(p + 1);
+	if (page_count(page)) {
+		prefetchw(p);
+		for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+			prefetchw(p + 1);
+			__ClearPageReserved(p);
+			set_page_count(p, 0);
+		}
 		__ClearPageReserved(p);
 		set_page_count(p, 0);
 	}
-	__ClearPageReserved(p);
-	set_page_count(p, 0);
 
 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);