[08/31] mm/page_vma_mapped: pte_offset_map_nolock() not pte_lockptr()

Message ID 8fa3fb6e-2e39-cbea-c529-ee9e64c7d2d0@google.com
State New
Headers
Series mm: allow pte_offset_map[_lock]() to fail |

Commit Message

Hugh Dickins May 22, 2023, 4:58 a.m. UTC
  map_pte() use pte_offset_map_nolock(), to make sure of the ptl belonging
to pte, even if pmd entry is then changed racily: page_vma_mapped_walk()
use that instead of getting pte_lockptr() later, or restart if map_pte()
found no page table.

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 mm/page_vma_mapped.c | 28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)
  

Comments

Qi Zheng May 22, 2023, 11:41 a.m. UTC | #1
On 2023/5/22 12:58, Hugh Dickins wrote:
> map_pte() use pte_offset_map_nolock(), to make sure of the ptl belonging
> to pte, even if pmd entry is then changed racily: page_vma_mapped_walk()
> use that instead of getting pte_lockptr() later, or restart if map_pte()
> found no page table.
> 
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
>   mm/page_vma_mapped.c | 28 ++++++++++++++++++++++------
>   1 file changed, 22 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 947dc7491815..2af734274073 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -13,16 +13,28 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
>   	return false;
>   }
>   
> -static bool map_pte(struct page_vma_mapped_walk *pvmw)
> +static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
>   {
>   	if (pvmw->flags & PVMW_SYNC) {
>   		/* Use the stricter lookup */
>   		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
>   						pvmw->address, &pvmw->ptl);
> -		return true;
> +		*ptlp = pvmw->ptl;
> +		return !!pvmw->pte;
>   	}
>   
> -	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
> +	/*
> +	 * It is important to return the ptl corresponding to pte,
> +	 * in case *pvmw->pmd changes underneath us; so we need to
> +	 * return it even when choosing not to lock, in case caller
> +	 * proceeds to loop over next ptes, and finds a match later.
> +	 * Though, in most cases, page lock already protects this.
> +	 */
> +	pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
> +					  pvmw->address, ptlp);
> +	if (!pvmw->pte)
> +		return false;
> +
>   	if (pvmw->flags & PVMW_MIGRATION) {
>   		if (!is_swap_pte(*pvmw->pte))
>   			return false;
> @@ -51,7 +63,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
>   	} else if (!pte_present(*pvmw->pte)) {
>   		return false;
>   	}
> -	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
> +	pvmw->ptl = *ptlp;
>   	spin_lock(pvmw->ptl);
>   	return true;
>   }
> @@ -156,6 +168,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>   	struct vm_area_struct *vma = pvmw->vma;
>   	struct mm_struct *mm = vma->vm_mm;
>   	unsigned long end;
> +	spinlock_t *ptl;
>   	pgd_t *pgd;
>   	p4d_t *p4d;
>   	pud_t *pud;
> @@ -257,8 +270,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>   			step_forward(pvmw, PMD_SIZE);
>   			continue;
>   		}
> -		if (!map_pte(pvmw))
> +		if (!map_pte(pvmw, &ptl)) {
> +			if (!pvmw->pte)
> +				goto restart;

Could pvmw->pmd be changed? Otherwise, how about just jumping to the
retry label below?

@@ -205,6 +205,8 @@ bool page_vma_mapped_walk(struct 
page_vma_mapped_walk *pvmw)
                 }

                 pvmw->pmd = pmd_offset(pud, pvmw->address);
+
+retry:
                 /*
                  * Make sure the pmd value isn't cached in a register 
by the
                  * compiler and used as a stale value after we've 
observed a

>   			goto next_pte;
> +		}
>   this_pte:
>   		if (check_pte(pvmw))
>   			return true;
> @@ -281,7 +297,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>   		} while (pte_none(*pvmw->pte));
>   
>   		if (!pvmw->ptl) {
> -			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
> +			pvmw->ptl = ptl;
>   			spin_lock(pvmw->ptl);
>   		}
>   		goto this_pte;
  
Hugh Dickins May 24, 2023, 2:44 a.m. UTC | #2
On Mon, 22 May 2023, Qi Zheng wrote:
> On 2023/5/22 12:58, Hugh Dickins wrote:
> > map_pte() use pte_offset_map_nolock(), to make sure of the ptl belonging
> > to pte, even if pmd entry is then changed racily: page_vma_mapped_walk()
> > use that instead of getting pte_lockptr() later, or restart if map_pte()
> > found no page table.
> > 
> > Signed-off-by: Hugh Dickins <hughd@google.com>
> > ---
> >   mm/page_vma_mapped.c | 28 ++++++++++++++++++++++------
> >   1 file changed, 22 insertions(+), 6 deletions(-)
> > 
> > diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> > index 947dc7491815..2af734274073 100644
> > --- a/mm/page_vma_mapped.c
> > +++ b/mm/page_vma_mapped.c
> > @@ -156,6 +168,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk
> > *pvmw)
> >    struct vm_area_struct *vma = pvmw->vma;
> >    struct mm_struct *mm = vma->vm_mm;
> >    unsigned long end;
> > +	spinlock_t *ptl;
> >    pgd_t *pgd;
> >    p4d_t *p4d;
> >    pud_t *pud;
> > @@ -257,8 +270,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk
> > *pvmw)
> >      step_forward(pvmw, PMD_SIZE);
> >      continue;
> >   		}
> > -		if (!map_pte(pvmw))
> > +		if (!map_pte(pvmw, &ptl)) {
> > +			if (!pvmw->pte)
> > +				goto restart;
> 
> Could pvmw->pmd be changed? Otherwise, how about just jumping to the
> retry label below?
> 
> @@ -205,6 +205,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk
> *pvmw)
>                 }
> 
>                 pvmw->pmd = pmd_offset(pud, pvmw->address);
> +
> +retry:
>                 /*
>                  * Make sure the pmd value isn't cached in a register by the
>                  * compiler and used as a stale value after we've observed a

You're right, that could be done, and that's where I'd have inserted
the label if there were none already.  I just thought the fewer goto
labels the better, so reused the restart already there.  If you feel
strongly that it's actively misleading, I can certainly make that
change; but it's too rare an occurrence to be worth optimizing for.

Hugh
  

Patch

diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 947dc7491815..2af734274073 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -13,16 +13,28 @@  static inline bool not_found(struct page_vma_mapped_walk *pvmw)
 	return false;
 }
 
-static bool map_pte(struct page_vma_mapped_walk *pvmw)
+static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
 {
 	if (pvmw->flags & PVMW_SYNC) {
 		/* Use the stricter lookup */
 		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
 						pvmw->address, &pvmw->ptl);
-		return true;
+		*ptlp = pvmw->ptl;
+		return !!pvmw->pte;
 	}
 
-	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
+	/*
+	 * It is important to return the ptl corresponding to pte,
+	 * in case *pvmw->pmd changes underneath us; so we need to
+	 * return it even when choosing not to lock, in case caller
+	 * proceeds to loop over next ptes, and finds a match later.
+	 * Though, in most cases, page lock already protects this.
+	 */
+	pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
+					  pvmw->address, ptlp);
+	if (!pvmw->pte)
+		return false;
+
 	if (pvmw->flags & PVMW_MIGRATION) {
 		if (!is_swap_pte(*pvmw->pte))
 			return false;
@@ -51,7 +63,7 @@  static bool map_pte(struct page_vma_mapped_walk *pvmw)
 	} else if (!pte_present(*pvmw->pte)) {
 		return false;
 	}
-	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
+	pvmw->ptl = *ptlp;
 	spin_lock(pvmw->ptl);
 	return true;
 }
@@ -156,6 +168,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 	struct vm_area_struct *vma = pvmw->vma;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long end;
+	spinlock_t *ptl;
 	pgd_t *pgd;
 	p4d_t *p4d;
 	pud_t *pud;
@@ -257,8 +270,11 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 			step_forward(pvmw, PMD_SIZE);
 			continue;
 		}
-		if (!map_pte(pvmw))
+		if (!map_pte(pvmw, &ptl)) {
+			if (!pvmw->pte)
+				goto restart;
 			goto next_pte;
+		}
 this_pte:
 		if (check_pte(pvmw))
 			return true;
@@ -281,7 +297,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 		} while (pte_none(*pvmw->pte));
 
 		if (!pvmw->ptl) {
-			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
+			pvmw->ptl = ptl;
 			spin_lock(pvmw->ptl);
 		}
 		goto this_pte;