[v2,4/6] mm: memory: use a folio in do_cow_page()

Message ID 20231113152222.3495908-5-wangkefeng.wang@huawei.com
State New
Headers
Series mm: cleanup and use more folio in page fault |

Commit Message

Kefeng Wang Nov. 13, 2023, 3:22 p.m. UTC
  Use folio_prealloc() helper and convert to use a folio in
do_cow_page(), which save five compound_head() calls.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory.c | 16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)
  

Comments

Vishal Moola Nov. 13, 2023, 7:51 p.m. UTC | #1
On Mon, Nov 13, 2023 at 11:22:20PM +0800, Kefeng Wang wrote:
> Use folio_prealloc() helper and convert to use a folio in
> do_cow_page(), which save five compound_head() calls.

s/do_cow_page()/do_cow_fault()/

Aside from that,
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  mm/memory.c | 16 ++++++----------
>  1 file changed, 6 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index d85df1c59f52..f350ab2a324f 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4653,6 +4653,7 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
>  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
>  {
>  	struct vm_area_struct *vma = vmf->vma;
> +	struct folio *folio;
>  	vm_fault_t ret;
>  
>  	ret = vmf_can_call_fault(vmf);
> @@ -4661,16 +4662,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
>  	if (ret)
>  		return ret;
>  
> -	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
> -	if (!vmf->cow_page)
> +	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
> +	if (!folio)
>  		return VM_FAULT_OOM;
>  
> -	if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
> -				GFP_KERNEL)) {
> -		put_page(vmf->cow_page);
> -		return VM_FAULT_OOM;
> -	}
> -	folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
> +	vmf->cow_page = &folio->page;
>  
>  	ret = __do_fault(vmf);
>  	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
> @@ -4679,7 +4675,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
>  		return ret;
>  
>  	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
> -	__SetPageUptodate(vmf->cow_page);
> +	__folio_mark_uptodate(folio);
>  
>  	ret |= finish_fault(vmf);
>  	unlock_page(vmf->page);
> @@ -4688,7 +4684,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
>  		goto uncharge_out;
>  	return ret;
>  uncharge_out:
> -	put_page(vmf->cow_page);
> +	folio_put(folio);
>  	return ret;
>  }
>  
> -- 
> 2.27.0
  

Patch

diff --git a/mm/memory.c b/mm/memory.c
index d85df1c59f52..f350ab2a324f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4653,6 +4653,7 @@  static vm_fault_t do_read_fault(struct vm_fault *vmf)
 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
+	struct folio *folio;
 	vm_fault_t ret;
 
 	ret = vmf_can_call_fault(vmf);
@@ -4661,16 +4662,11 @@  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 	if (ret)
 		return ret;
 
-	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
-	if (!vmf->cow_page)
+	folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
+	if (!folio)
 		return VM_FAULT_OOM;
 
-	if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
-				GFP_KERNEL)) {
-		put_page(vmf->cow_page);
-		return VM_FAULT_OOM;
-	}
-	folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
+	vmf->cow_page = &folio->page;
 
 	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -4679,7 +4675,7 @@  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 		return ret;
 
 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
-	__SetPageUptodate(vmf->cow_page);
+	__folio_mark_uptodate(folio);
 
 	ret |= finish_fault(vmf);
 	unlock_page(vmf->page);
@@ -4688,7 +4684,7 @@  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 		goto uncharge_out;
 	return ret;
 uncharge_out:
-	put_page(vmf->cow_page);
+	folio_put(folio);
 	return ret;
 }