[3/6] mm: memory: rename page_copy_prealloc() to folio_prealloc()

Message ID 20231107135216.415926-4-wangkefeng.wang@huawei.com
State New
Headers
Series mm: cleanup and use more folio in page fault |

Commit Message

Kefeng Wang Nov. 7, 2023, 1:52 p.m. UTC
  Let's rename page_copy_prealloc() to folio_prealloc(), which could
be reused in more functons, as it maybe zero the new page, pass a
new should_zero to it, and call the vma_alloc_zeroed_movable_folio()
if need_zero is true.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)
  

Comments

Sidhartha Kumar Nov. 7, 2023, 6:21 p.m. UTC | #1
On 11/7/23 5:52 AM, Kefeng Wang wrote:
> Let's rename page_copy_prealloc() to folio_prealloc(), which could
> be reused in more functons, as it maybe zero the new page, pass a
> new should_zero to it, and call the vma_alloc_zeroed_movable_folio()
> if need_zero is true.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>   mm/memory.c | 13 +++++++++----
>   1 file changed, 9 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index b1bff4d245da..062136d25da3 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -988,12 +988,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
>   	return 0;
>   }
>   
> -static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
> -		struct vm_area_struct *vma, unsigned long addr)
> +static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
> +		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
>   {
>   	struct folio *new_folio;
>   
> -	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
> +	if (need_zero)
> +		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
> +	else
> +		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
> +					    addr, false);
> +
>   	if (!new_folio)
>   		return NULL;
>   
> @@ -1125,7 +1130,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
>   	} else if (ret == -EBUSY) {
>   		goto out;
>   	} else if (ret ==  -EAGAIN) {
> -		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
> +		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
>   		if (!prealloc)
>   			return -ENOMEM;
>   	} else if (ret) {
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
  

Patch

diff --git a/mm/memory.c b/mm/memory.c
index b1bff4d245da..062136d25da3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -988,12 +988,17 @@  copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 	return 0;
 }
 
-static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
-		struct vm_area_struct *vma, unsigned long addr)
+static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
+		struct vm_area_struct *vma, unsigned long addr, bool need_zero)
 {
 	struct folio *new_folio;
 
-	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+	if (need_zero)
+		new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
+	else
+		new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+					    addr, false);
+
 	if (!new_folio)
 		return NULL;
 
@@ -1125,7 +1130,7 @@  copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 	} else if (ret == -EBUSY) {
 		goto out;
 	} else if (ret ==  -EAGAIN) {
-		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
+		prealloc = folio_prealloc(src_mm, src_vma, addr, false);
 		if (!prealloc)
 			return -ENOMEM;
 	} else if (ret) {