[v2,3/5] hugetlb: Pass struct vm_fault through to hugetlb_handle_userfault()

Message ID 20240221234732.187629-4-vishal.moola@gmail.com
State New
Headers
Series Handle hugetlb faults under the VMA lock |

Commit Message

Vishal Moola Feb. 21, 2024, 11:47 p.m. UTC
  Now that hugetlb_fault() has a struct vm_fault, have
hugetlb_handle_userfault() use it instead of creating one of its own.

This lets us reduce the number of arguments passed to
hugetlb_handle_userfault() from 7 to 3, cleaning up the code and stack.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 mm/hugetlb.c | 38 +++++++++-----------------------------
 1 file changed, 9 insertions(+), 29 deletions(-)
  

Comments

Matthew Wilcox Feb. 22, 2024, 3:41 a.m. UTC | #1
On Wed, Feb 21, 2024 at 03:47:30PM -0800, Vishal Moola (Oracle) wrote:
> Now that hugetlb_fault() has a struct vm_fault, have
> hugetlb_handle_userfault() use it instead of creating one of its own.
> 
> This lets us reduce the number of arguments passed to
> hugetlb_handle_userfault() from 7 to 3, cleaning up the code and stack.
> 
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> ---
>  mm/hugetlb.c | 38 +++++++++-----------------------------
>  1 file changed, 9 insertions(+), 29 deletions(-)

I love the look of this ...

> @@ -6116,7 +6098,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>  			struct vm_area_struct *vma,
>  			struct address_space *mapping, pgoff_t idx,
>  			unsigned long address, pte_t *ptep,
> -			pte_t old_pte, unsigned int flags)
> +			pte_t old_pte, unsigned int flags,
> +			struct vm_fault *vmf)

Should we remove vma, address, idx and flags?
  
Vishal Moola Feb. 22, 2024, 4:13 p.m. UTC | #2
On Wed, Feb 21, 2024 at 7:41 PM Matthew Wilcox <willy@infradead.org> wrote:
>
> On Wed, Feb 21, 2024 at 03:47:30PM -0800, Vishal Moola (Oracle) wrote:
> > Now that hugetlb_fault() has a struct vm_fault, have
> > hugetlb_handle_userfault() use it instead of creating one of its own.
> >
> > This lets us reduce the number of arguments passed to
> > hugetlb_handle_userfault() from 7 to 3, cleaning up the code and stack.
> >
> > Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> > ---
> >  mm/hugetlb.c | 38 +++++++++-----------------------------
> >  1 file changed, 9 insertions(+), 29 deletions(-)
>
> I love the look of this ...
>
> > @@ -6116,7 +6098,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
> >                       struct vm_area_struct *vma,
> >                       struct address_space *mapping, pgoff_t idx,
> >                       unsigned long address, pte_t *ptep,
> > -                     pte_t old_pte, unsigned int flags)
> > +                     pte_t old_pte, unsigned int flags,
> > +                     struct vm_fault *vmf)
>
> Should we remove vma, address, idx and flags?


Yes, I'm going to do that in another patchset, this one is mainly about
enabling hugetlb_fault() to work safely under the VMA lock. It will
make it easier to debug if any substitution goes wrong somewhere as well.

We may also be able to remove one (or more) of the pte_t arguments,
but I have to look into that more.
  

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d792d60ea16c..70c5870e859e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6060,39 +6060,21 @@  int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping
 	return 0;
 }
 
-static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
+static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
 						  struct address_space *mapping,
-						  pgoff_t idx,
-						  unsigned int flags,
-						  unsigned long haddr,
-						  unsigned long addr,
 						  unsigned long reason)
 {
 	u32 hash;
-	struct vm_fault vmf = {
-		.vma = vma,
-		.address = haddr,
-		.real_address = addr,
-		.flags = flags,
-
-		/*
-		 * Hard to debug if it ends up being
-		 * used by a callee that assumes
-		 * something about the other
-		 * uninitialized fields... same as in
-		 * memory.c
-		 */
-	};
 
 	/*
 	 * vma_lock and hugetlb_fault_mutex must be dropped before handling
 	 * userfault. Also mmap_lock could be dropped due to handling
 	 * userfault, any vma operation should be careful from here.
 	 */
-	hugetlb_vma_unlock_read(vma);
-	hash = hugetlb_fault_mutex_hash(mapping, idx);
+	hugetlb_vma_unlock_read(vmf->vma);
+	hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
-	return handle_userfault(&vmf, reason);
+	return handle_userfault(vmf, reason);
 }
 
 /*
@@ -6116,7 +6098,8 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 			struct vm_area_struct *vma,
 			struct address_space *mapping, pgoff_t idx,
 			unsigned long address, pte_t *ptep,
-			pte_t old_pte, unsigned int flags)
+			pte_t old_pte, unsigned int flags,
+			struct vm_fault *vmf)
 {
 	struct hstate *h = hstate_vma(vma);
 	vm_fault_t ret = VM_FAULT_SIGBUS;
@@ -6175,8 +6158,7 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 				goto out;
 			}
 
-			return hugetlb_handle_userfault(vma, mapping, idx, flags,
-							haddr, address,
+			return hugetlb_handle_userfault(vmf, mapping,
 							VM_UFFD_MISSING);
 		}
 
@@ -6248,8 +6230,7 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 				ret = 0;
 				goto out;
 			}
-			return hugetlb_handle_userfault(vma, mapping, idx, flags,
-							haddr, address,
+			return hugetlb_handle_userfault(vmf, mapping,
 							VM_UFFD_MINOR);
 		}
 	}
@@ -6419,9 +6400,8 @@  vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		 * hugetlb_no_page will drop vma lock and hugetlb fault
 		 * mutex internally, which make us return immediately.
 		 */
-
 		return hugetlb_no_page(mm, vma, mapping, vmf.pgoff, address,
-					ptep, entry, flags);
+					ptep, entry, flags, &vmf);
 	}
 
 	ret = 0;