[RFC,v1,4/4] hugetlb: parallelize hugetlb page allocation

Message ID 20231123133036.68540-5-gang.li@linux.dev
State New
Headers
Series hugetlb: parallelize hugetlb page allocation on boot |

Commit Message

Gang Li Nov. 23, 2023, 1:30 p.m. UTC
  From: Gang Li <ligang.bdlg@bytedance.com>

By distributing the allocation across threads, large hugetlb
configurations can allocate pages faster, improving boot speed.

Signed-off-by: Gang Li <ligang.bdlg@bytedance.com>
---
 mm/hugetlb.c | 89 +++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 74 insertions(+), 15 deletions(-)
  

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ac8558724cc2..df3fbe95989e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3509,6 +3509,55 @@  static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, st
 	}
 }
 
+struct hugetlb_work {
+	struct work_struct work;
+	struct hstate *h;
+	int num;
+	int nid;
+};
+
+static atomic_t hugetlb_hstate_alloc_n_undone __initdata;
+static __initdata DECLARE_COMPLETION(hugetlb_hstate_alloc_comp);
+
+static void __init hugetlb_alloc_node(struct work_struct *w)
+{
+	struct hugetlb_work *hw = container_of(w, struct hugetlb_work, work);
+	struct hstate *h = hw->h;
+	int i, num = hw->num;
+	nodemask_t node_alloc_noretry;
+	unsigned long flags;
+
+	/* Bit mask controlling how hard we retry per-node allocations.*/
+	nodes_clear(node_alloc_noretry);
+
+	for (i = 0; i < num; ++i) {
+		struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
+						&node_alloc_noretry);
+		if (!folio)
+			break;
+		spin_lock_irqsave(&hugetlb_lock, flags);
+		__prep_account_new_huge_page(h, folio_nid(folio));
+		enqueue_hugetlb_folio(h, folio);
+		spin_unlock_irqrestore(&hugetlb_lock, flags);
+		cond_resched();
+	}
+
+	if (atomic_dec_and_test(&hugetlb_hstate_alloc_n_undone))
+		complete(&hugetlb_hstate_alloc_comp);
+}
+
+static void __init hugetlb_vmemmap_optimize_node(struct work_struct *w)
+{
+	struct hugetlb_work *hw = container_of(w, struct hugetlb_work, work);
+	struct hstate *h = hw->h;
+	int nid = hw->nid;
+
+	hugetlb_vmemmap_optimize_folios(h, &h->hugepage_freelists[nid]);
+
+	if (atomic_dec_and_test(&hugetlb_hstate_alloc_n_undone))
+		complete(&hugetlb_hstate_alloc_comp);
+}
+
 static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hstate *h)
 {
 	unsigned long i;
@@ -3528,26 +3577,36 @@  static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hstate *h
 
 static unsigned long __init hugetlb_hstate_alloc_pages_non_gigantic(struct hstate *h)
 {
-	unsigned long i;
-	struct folio *folio;
-	LIST_HEAD(folio_list);
-	nodemask_t node_alloc_noretry;
+	int nid;
+	struct hugetlb_work *works;
 
-	/* Bit mask controlling how hard we retry per-node allocations.*/
-	nodes_clear(node_alloc_noretry);
+	works = kcalloc(num_node_state(N_MEMORY), sizeof(*works), GFP_KERNEL);
+	if (works == NULL) {
+		pr_warn("HugeTLB: allocating struct hugetlb_work failed.\n");
+		return 0;
+	}
 
-	for (i = 0; i < h->max_huge_pages; ++i) {
-		folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
-						&node_alloc_noretry);
-		if (!folio)
-			break;
-		list_add(&folio->lru, &folio_list);
-		cond_resched();
+	atomic_set(&hugetlb_hstate_alloc_n_undone, num_node_state(N_MEMORY));
+	for_each_node_state(nid, N_MEMORY) {
+		works[nid].h = h;
+		works[nid].num = h->max_huge_pages/num_node_state(N_MEMORY);
+		if (nid == 0)
+			works[nid].num += h->max_huge_pages % num_node_state(N_MEMORY);
+		INIT_WORK(&works[nid].work, hugetlb_alloc_node);
+		queue_work_node(nid, system_unbound_wq, &works[nid].work);
 	}
+	wait_for_completion(&hugetlb_hstate_alloc_comp);
 
-	prep_and_add_allocated_folios(h, &folio_list);
+	atomic_set(&hugetlb_hstate_alloc_n_undone, num_node_state(N_MEMORY));
+	for_each_node_state(nid, N_MEMORY) {
+		works[nid].nid = nid;
+		INIT_WORK(&works[nid].work, hugetlb_vmemmap_optimize_node);
+		queue_work_node(nid, system_unbound_wq, &works[nid].work);
+	}
+	wait_for_completion(&hugetlb_hstate_alloc_comp);
 
-	return i;
+	kfree(works);
+	return h->nr_huge_pages;
 }
 
 /*