[1/2] mm: optimization on page allocation when CMA enabled
Commit Message
From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
Let us look at the series of scenarios below with WMARK_LOW=25MB,WMARK_MIN=5MB
(managed pages 1.9GB). We can know that current 'fixed 1/2 ratio' start to use
CMA since C which actually has caused U&R lower than WMARK_LOW (this should be
deemed as against current memory policy, that is, UNMOVABLE & RECLAIMABLE should
either stay around WATERMARK_LOW when no allocation or do reclaim via entering
slowpath)
-- Free_pages
|
|
-- WMARK_LOW
|
-- Free_CMA
|
|
--
Free_CMA/Free_pages(MB) A(12/30) B(12/25) C(12/20)
fixed 1/2 ratio N N Y
this commit Y Y Y
Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
v2: do proportion check when zone_watermark_ok, update commit message
v3: update coding style and simplify the logic when zone_watermark_ok
---
---
mm/page_alloc.c | 46 ++++++++++++++++++++++++++++++++++++++++------
1 file changed, 40 insertions(+), 6 deletions(-)
@@ -3071,6 +3071,41 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
}
+#ifdef CONFIG_CMA
+/*
+ * GFP_MOVABLE allocation could drain UNMOVABLE & RECLAIMABLE page blocks via
+ * the help of CMA which makes GFP_KERNEL failed. Checking if zone_watermark_ok
+ * again without ALLOC_CMA to see if to use CMA first.
+ */
+static bool __if_use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
+{
+ unsigned long watermark;
+ bool cma_first = false;
+
+ watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
+ /* check if GFP_MOVABLE pass previous zone_watermark_ok via the help of CMA */
+ if (!zone_watermark_ok(zone, order, watermark, 0, alloc_flags & (~ALLOC_CMA)))
+ /*
+ * watermark failed means UNMOVABLE & RECLAIMBLE is not enough
+ * now, we should use cma first to keep them stay around the
+ * corresponding watermark
+ */
+ cma_first = true;
+ else
+ /*
+ * remain previous fixed 1/2 logic when watermark ok as we have
+ * above protection now
+ */
+ cma_first = (zone_page_state(zone, NR_FREE_CMA_PAGES) >
+ zone_page_state(zone, NR_FREE_PAGES) / 2);
+ return cma_first;
+}
+#else
+static bool __if_use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
+{
+ return false;
+}
+#endif
/*
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
@@ -3084,13 +3119,12 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
if (IS_ENABLED(CONFIG_CMA)) {
/*
* Balance movable allocations between regular and CMA areas by
- * allocating from CMA when over half of the zone's free memory
- * is in the CMA area.
+ * allocating from CMA base on judging zone_watermark_ok again
+ * to see if the latest check got pass via the help of CMA
*/
- if (alloc_flags & ALLOC_CMA &&
- zone_page_state(zone, NR_FREE_CMA_PAGES) >
- zone_page_state(zone, NR_FREE_PAGES) / 2) {
- page = __rmqueue_cma_fallback(zone, order);
+ if (migratetype == MIGRATE_MOVABLE) {
+ page = __if_use_cma_first(zone, order, alloc_flags) ?
+ __rmqueue_cma_fallback(zone, order) : NULL;
if (page)
return page;
}