[v4,4/7] mm: page_owner: add support for splitting to any order in split page_owner.
Commit Message
From: Zi Yan <ziy@nvidia.com>
It adds a new_order parameter to set new page order in page owner.
It prepares for upcoming changes to support split huge page to any
lower order.
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/page_owner.h | 10 +++++-----
mm/huge_memory.c | 2 +-
mm/page_alloc.c | 4 ++--
mm/page_owner.c | 9 +++++----
4 files changed, 13 insertions(+), 12 deletions(-)
Comments
On 13.02.24 22:55, Zi Yan wrote:
> From: Zi Yan <ziy@nvidia.com>
>
> It adds a new_order parameter to set new page order in page owner.
> It prepares for upcoming changes to support split huge page to any
> lower order.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---
> include/linux/page_owner.h | 10 +++++-----
> mm/huge_memory.c | 2 +-
> mm/page_alloc.c | 4 ++--
> mm/page_owner.c | 9 +++++----
> 4 files changed, 13 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
> index d7878523adfc..a784ba69f67f 100644
> --- a/include/linux/page_owner.h
> +++ b/include/linux/page_owner.h
> @@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
> extern void __reset_page_owner(struct page *page, unsigned short order);
> extern void __set_page_owner(struct page *page,
> unsigned short order, gfp_t gfp_mask);
> -extern void __split_page_owner(struct page *page, int order);
> +extern void __split_page_owner(struct page *page, int old_order, int new_order);
> extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
> extern void __set_page_owner_migrate_reason(struct page *page, int reason);
> extern void __dump_page_owner(const struct page *page);
> @@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
> __set_page_owner(page, order, gfp_mask);
> }
>
> -static inline void split_page_owner(struct page *page, int order)
> +static inline void split_page_owner(struct page *page, int old_order, int new_order)
> {
> if (static_branch_unlikely(&page_owner_inited))
> - __split_page_owner(page, order);
> + __split_page_owner(page, old_order, new_order);
> }
> static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
> {
> @@ -56,11 +56,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order)
> {
> }
> static inline void set_page_owner(struct page *page,
> - unsigned int order, gfp_t gfp_mask)
> + unsigned short order, gfp_t gfp_mask)
> {
> }
> static inline void split_page_owner(struct page *page,
> - int order)
> + int old_order, int new_order)
> {
> }
> static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 3d30eccd3a7f..ad7133c97428 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2919,7 +2919,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> unlock_page_lruvec(lruvec);
> /* Caller disabled irqs, so they are still disabled here */
>
> - split_page_owner(head, order);
> + split_page_owner(head, order, 0);
>
> /* See comment in __split_huge_page_tail() */
> if (PageAnon(head)) {
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 9d4dd41d0647..e0f107b21c98 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2652,7 +2652,7 @@ void split_page(struct page *page, unsigned int order)
>
> for (i = 1; i < (1 << order); i++)
> set_page_refcounted(page + i);
> - split_page_owner(page, order);
> + split_page_owner(page, order, 0);
> split_page_memcg(page, order, 0);
> }
> EXPORT_SYMBOL_GPL(split_page);
> @@ -4837,7 +4837,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
> struct page *page = virt_to_page((void *)addr);
> struct page *last = page + nr;
>
> - split_page_owner(page, order);
> + split_page_owner(page, order, 0);
> split_page_memcg(page, order, 0);
> while (page < --last)
> set_page_refcounted(last);
> diff --git a/mm/page_owner.c b/mm/page_owner.c
> index 1319e402c2cf..ebbffa0501db 100644
> --- a/mm/page_owner.c
> +++ b/mm/page_owner.c
> @@ -292,19 +292,20 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
> page_ext_put(page_ext);
> }
>
> -void __split_page_owner(struct page *page, int order)
> +void __split_page_owner(struct page *page, int old_order, int new_order)
> {
> int i;
> struct page_ext *page_ext = page_ext_get(page);
> struct page_owner *page_owner;
> - unsigned int nr = 1 << order;
> + unsigned int old_nr = 1 << old_order;
> + unsigned int new_nr = 1 << new_order;
>
> if (unlikely(!page_ext))
> return;
>
> - for (i = 0; i < nr; i++) {
> + for (i = 0; i < old_nr; i += new_nr) {
> page_owner = get_page_owner(page_ext);
> - page_owner->order = 0;
> + page_owner->order = new_order;
> page_ext = page_ext_next(page_ext);
Staring at __set_page_owner_handle(), we do set all 1<<order page_exts
(corresponding to 1<<order "struct page"s) to have ->order set.
Wouldn't you have to do the same here?
for (i = 0; i < 1 << old_order; i++) {
page_owner = get_page_owner(page_ext);
page_owner->order = new_order;
page_ext = page_ext_next(page_ext);
}
On 14 Feb 2024, at 4:34, David Hildenbrand wrote:
> On 13.02.24 22:55, Zi Yan wrote:
>> From: Zi Yan <ziy@nvidia.com>
>>
>> It adds a new_order parameter to set new page order in page owner.
>> It prepares for upcoming changes to support split huge page to any
>> lower order.
>>
>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>> ---
>> include/linux/page_owner.h | 10 +++++-----
>> mm/huge_memory.c | 2 +-
>> mm/page_alloc.c | 4 ++--
>> mm/page_owner.c | 9 +++++----
>> 4 files changed, 13 insertions(+), 12 deletions(-)
>>
>> diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
>> index d7878523adfc..a784ba69f67f 100644
>> --- a/include/linux/page_owner.h
>> +++ b/include/linux/page_owner.h
>> @@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
>> extern void __reset_page_owner(struct page *page, unsigned short order);
>> extern void __set_page_owner(struct page *page,
>> unsigned short order, gfp_t gfp_mask);
>> -extern void __split_page_owner(struct page *page, int order);
>> +extern void __split_page_owner(struct page *page, int old_order, int new_order);
>> extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
>> extern void __set_page_owner_migrate_reason(struct page *page, int reason);
>> extern void __dump_page_owner(const struct page *page);
>> @@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
>> __set_page_owner(page, order, gfp_mask);
>> }
>> -static inline void split_page_owner(struct page *page, int order)
>> +static inline void split_page_owner(struct page *page, int old_order, int new_order)
>> {
>> if (static_branch_unlikely(&page_owner_inited))
>> - __split_page_owner(page, order);
>> + __split_page_owner(page, old_order, new_order);
>> }
>> static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
>> {
>> @@ -56,11 +56,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order)
>> {
>> }
>> static inline void set_page_owner(struct page *page,
>> - unsigned int order, gfp_t gfp_mask)
>> + unsigned short order, gfp_t gfp_mask)
>> {
>> }
>> static inline void split_page_owner(struct page *page,
>> - int order)
>> + int old_order, int new_order)
>> {
>> }
>> static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index 3d30eccd3a7f..ad7133c97428 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -2919,7 +2919,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>> unlock_page_lruvec(lruvec);
>> /* Caller disabled irqs, so they are still disabled here */
>> - split_page_owner(head, order);
>> + split_page_owner(head, order, 0);
>> /* See comment in __split_huge_page_tail() */
>> if (PageAnon(head)) {
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 9d4dd41d0647..e0f107b21c98 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -2652,7 +2652,7 @@ void split_page(struct page *page, unsigned int order)
>> for (i = 1; i < (1 << order); i++)
>> set_page_refcounted(page + i);
>> - split_page_owner(page, order);
>> + split_page_owner(page, order, 0);
>> split_page_memcg(page, order, 0);
>> }
>> EXPORT_SYMBOL_GPL(split_page);
>> @@ -4837,7 +4837,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
>> struct page *page = virt_to_page((void *)addr);
>> struct page *last = page + nr;
>> - split_page_owner(page, order);
>> + split_page_owner(page, order, 0);
>> split_page_memcg(page, order, 0);
>> while (page < --last)
>> set_page_refcounted(last);
>> diff --git a/mm/page_owner.c b/mm/page_owner.c
>> index 1319e402c2cf..ebbffa0501db 100644
>> --- a/mm/page_owner.c
>> +++ b/mm/page_owner.c
>> @@ -292,19 +292,20 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
>> page_ext_put(page_ext);
>> }
>> -void __split_page_owner(struct page *page, int order)
>> +void __split_page_owner(struct page *page, int old_order, int new_order)
>> {
>> int i;
>> struct page_ext *page_ext = page_ext_get(page);
>> struct page_owner *page_owner;
>> - unsigned int nr = 1 << order;
>> + unsigned int old_nr = 1 << old_order;
>> + unsigned int new_nr = 1 << new_order;
>> if (unlikely(!page_ext))
>> return;
>> - for (i = 0; i < nr; i++) {
>> + for (i = 0; i < old_nr; i += new_nr) {
>> page_owner = get_page_owner(page_ext);
>> - page_owner->order = 0;
>> + page_owner->order = new_order;
>> page_ext = page_ext_next(page_ext);
>
> Staring at __set_page_owner_handle(), we do set all 1<<order page_exts (corresponding to 1<<order "struct page"s) to have ->order set.
>
> Wouldn't you have to do the same here?
>
> for (i = 0; i < 1 << old_order; i++) {
> page_owner = get_page_owner(page_ext);
> page_owner->order = new_order;
> page_ext = page_ext_next(page_ext);
> }
You are right. So page owner is per struct page. I misunderstood the code.
Thank you for pointing this out. Will fix it.
--
Best Regards,
Yan, Zi
@@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
unsigned short order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, int order);
+extern void __split_page_owner(struct page *page, int old_order, int new_order);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(const struct page *page);
@@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
__set_page_owner(page, order, gfp_mask);
}
-static inline void split_page_owner(struct page *page, int order)
+static inline void split_page_owner(struct page *page, int old_order, int new_order)
{
if (static_branch_unlikely(&page_owner_inited))
- __split_page_owner(page, order);
+ __split_page_owner(page, old_order, new_order);
}
static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
@@ -56,11 +56,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
}
static inline void split_page_owner(struct page *page,
- int order)
+ int old_order, int new_order)
{
}
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
@@ -2919,7 +2919,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
unlock_page_lruvec(lruvec);
/* Caller disabled irqs, so they are still disabled here */
- split_page_owner(head, order);
+ split_page_owner(head, order, 0);
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
@@ -2652,7 +2652,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
- split_page_owner(page, order);
+ split_page_owner(page, order, 0);
split_page_memcg(page, order, 0);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -4837,7 +4837,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
struct page *page = virt_to_page((void *)addr);
struct page *last = page + nr;
- split_page_owner(page, order);
+ split_page_owner(page, order, 0);
split_page_memcg(page, order, 0);
while (page < --last)
set_page_refcounted(last);
@@ -292,19 +292,20 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
page_ext_put(page_ext);
}
-void __split_page_owner(struct page *page, int order)
+void __split_page_owner(struct page *page, int old_order, int new_order)
{
int i;
struct page_ext *page_ext = page_ext_get(page);
struct page_owner *page_owner;
- unsigned int nr = 1 << order;
+ unsigned int old_nr = 1 << old_order;
+ unsigned int new_nr = 1 << new_order;
if (unlikely(!page_ext))
return;
- for (i = 0; i < nr; i++) {
+ for (i = 0; i < old_nr; i += new_nr) {
page_owner = get_page_owner(page_ext);
- page_owner->order = 0;
+ page_owner->order = new_order;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);