[v2,9/9] mm/hugetlb: convert move_hugetlb_state() to folios

Message ID 20221101223059.460937-10-sidhartha.kumar@oracle.com
State New
Headers
Series convert hugetlb_cgroup helper functions to folios |

Commit Message

Sidhartha Kumar Nov. 1, 2022, 10:30 p.m. UTC
  Clean up unmap_and_move_huge_page() by converting move_hugetlb_state() to
take in folios.

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 include/linux/hugetlb.h |  6 +++---
 mm/hugetlb.c            | 22 ++++++++++++----------
 mm/migrate.c            |  4 ++--
 3 files changed, 17 insertions(+), 15 deletions(-)
  

Comments

Muchun Song Nov. 2, 2022, 7:01 a.m. UTC | #1
> On Nov 2, 2022, at 06:30, Sidhartha Kumar <sidhartha.kumar@oracle.com> wrote:
> 
> Clean up unmap_and_move_huge_page() by converting move_hugetlb_state() to
> take in folios.
> 
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>

Reviewed-by: Muchun Song <songmuchun@bytedance.com>

A nit below.

> ---
> include/linux/hugetlb.h |  6 +++---
> mm/hugetlb.c            | 22 ++++++++++++----------
> mm/migrate.c            |  4 ++--
> 3 files changed, 17 insertions(+), 15 deletions(-)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index d81f139193aa..375cd57721d6 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -184,7 +184,7 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
> int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
> bool *migratable_cleared);
> void putback_active_hugepage(struct page *page);
> -void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
> +void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
> void free_huge_page(struct page *page);
> void hugetlb_fix_reserve_counts(struct inode *inode);
> extern struct mutex *hugetlb_fault_mutex_table;
> @@ -440,8 +440,8 @@ static inline void putback_active_hugepage(struct page *page)
> {
> }
> 
> -static inline void move_hugetlb_state(struct page *oldpage,
> - struct page *newpage, int reason)
> +static inline void move_hugetlb_state(struct folio *old_folio,
> + struct folio *new_folio, int reason)
> {
> }
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 2ecc0a6cf883..2ab8f3b7132a 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -7289,15 +7289,15 @@ void putback_active_hugepage(struct page *page)
> put_page(page);
> }
> 
> -void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
> +void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
> {
> - struct hstate *h = page_hstate(oldpage);
> + struct hstate *h = folio_hstate(old_folio);
> 
> - hugetlb_cgroup_migrate(page_folio(oldpage), page_folio(newpage));
> - set_page_owner_migrate_reason(newpage, reason);
> + hugetlb_cgroup_migrate(old_folio, new_folio);
> + set_page_owner_migrate_reason(&new_folio->page, reason);
> 
> /*
> - * transfer temporary state of the new huge page. This is
> + * transfer temporary state of the new hugetlb folio. This is
> * reverse to other transitions because the newpage is going to
> * be final while the old one will be freed so it takes over
> * the temporary status.
> @@ -7306,12 +7306,14 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
> * here as well otherwise the global surplus count will not match
> * the per-node's.
> */
> - if (HPageTemporary(newpage)) {
> - int old_nid = page_to_nid(oldpage);
> - int new_nid = page_to_nid(newpage);
> + if (folio_test_hugetlb_temporary(new_folio)) {
> + int old_nid = folio_nid(old_folio);
> + int new_nid = folio_nid(new_folio);
> +
> +

Please remove this redundant blank line.

Thanks.

> + folio_set_hugetlb_temporary(old_folio);
> + folio_clear_hugetlb_temporary(new_folio);
> 
> - SetHPageTemporary(oldpage);
> - ClearHPageTemporary(newpage);
> 
> /*
> * There is no need to transfer the per-node surplus state
> diff --git a/mm/migrate.c b/mm/migrate.c
> index d7db4fd97d8e..81f9a36c754d 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1278,7 +1278,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
> * folio_mapping() set, hugetlbfs specific move page routine will not
> * be called and we could leak usage counts for subpools.
> */
> - if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
> + if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
> rc = -EBUSY;
> goto out_unlock;
> }
> @@ -1328,7 +1328,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
> put_anon_vma(anon_vma);
> 
> if (rc == MIGRATEPAGE_SUCCESS) {
> - move_hugetlb_state(hpage, new_hpage, reason);
> + move_hugetlb_state(src, dst, reason);
> put_new_page = NULL;
> }
> 
> -- 
> 2.31.1
> 
>
  

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d81f139193aa..375cd57721d6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -184,7 +184,7 @@  int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
 				bool *migratable_cleared);
 void putback_active_hugepage(struct page *page);
-void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
 void free_huge_page(struct page *page);
 void hugetlb_fix_reserve_counts(struct inode *inode);
 extern struct mutex *hugetlb_fault_mutex_table;
@@ -440,8 +440,8 @@  static inline void putback_active_hugepage(struct page *page)
 {
 }
 
-static inline void move_hugetlb_state(struct page *oldpage,
-					struct page *newpage, int reason)
+static inline void move_hugetlb_state(struct folio *old_folio,
+					struct folio *new_folio, int reason)
 {
 }
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2ecc0a6cf883..2ab8f3b7132a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7289,15 +7289,15 @@  void putback_active_hugepage(struct page *page)
 	put_page(page);
 }
 
-void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
 {
-	struct hstate *h = page_hstate(oldpage);
+	struct hstate *h = folio_hstate(old_folio);
 
-	hugetlb_cgroup_migrate(page_folio(oldpage), page_folio(newpage));
-	set_page_owner_migrate_reason(newpage, reason);
+	hugetlb_cgroup_migrate(old_folio, new_folio);
+	set_page_owner_migrate_reason(&new_folio->page, reason);
 
 	/*
-	 * transfer temporary state of the new huge page. This is
+	 * transfer temporary state of the new hugetlb folio. This is
 	 * reverse to other transitions because the newpage is going to
 	 * be final while the old one will be freed so it takes over
 	 * the temporary status.
@@ -7306,12 +7306,14 @@  void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
 	 * here as well otherwise the global surplus count will not match
 	 * the per-node's.
 	 */
-	if (HPageTemporary(newpage)) {
-		int old_nid = page_to_nid(oldpage);
-		int new_nid = page_to_nid(newpage);
+	if (folio_test_hugetlb_temporary(new_folio)) {
+		int old_nid = folio_nid(old_folio);
+		int new_nid = folio_nid(new_folio);
+
+
+		folio_set_hugetlb_temporary(old_folio);
+		folio_clear_hugetlb_temporary(new_folio);
 
-		SetHPageTemporary(oldpage);
-		ClearHPageTemporary(newpage);
 
 		/*
 		 * There is no need to transfer the per-node surplus state
diff --git a/mm/migrate.c b/mm/migrate.c
index d7db4fd97d8e..81f9a36c754d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1278,7 +1278,7 @@  static int unmap_and_move_huge_page(new_page_t get_new_page,
 	 * folio_mapping() set, hugetlbfs specific move page routine will not
 	 * be called and we could leak usage counts for subpools.
 	 */
-	if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
+	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
 		rc = -EBUSY;
 		goto out_unlock;
 	}
@@ -1328,7 +1328,7 @@  static int unmap_and_move_huge_page(new_page_t get_new_page,
 		put_anon_vma(anon_vma);
 
 	if (rc == MIGRATEPAGE_SUCCESS) {
-		move_hugetlb_state(hpage, new_hpage, reason);
+		move_hugetlb_state(src, dst, reason);
 		put_new_page = NULL;
 	}