Hello Hugh,
On Sun, 21 May 2023 22:00:15 -0700 (PDT) Hugh Dickins <hughd@google.com> wrote:
> Simple walk_page_range() users should set ACTION_AGAIN to retry when
> pte_offset_map_lock() fails.
>
> No need to check pmd_trans_unstable(): that was precisely to avoid the
> possiblity of calling pte_offset_map() on a racily removed or inserted
> THP entry, but such cases are now safely handled inside it. Likewise
> there is no need to check pmd_none() or pmd_bad() before calling it.
>
> Signed-off-by: Hugh Dickins <hughd@google.com>
For below mm/damon part,
Reviewed-by: SeongJae Park <sj@kernel.org>
Thanks,
SJ
> ---
> fs/proc/task_mmu.c | 32 ++++++++++++++++----------------
> mm/damon/vaddr.c | 12 ++++++++----
> mm/mempolicy.c | 7 ++++---
> mm/mincore.c | 9 ++++-----
> mm/mlock.c | 4 ++++
> 5 files changed, 36 insertions(+), 28 deletions(-)
>
[...]
> diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
> index 1fec16d7263e..b8762ff15c3c 100644
> --- a/mm/damon/vaddr.c
> +++ b/mm/damon/vaddr.c
> @@ -318,9 +318,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
> spin_unlock(ptl);
> }
>
> - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
> - return 0;
> pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
> + if (!pte) {
> + walk->action = ACTION_AGAIN;
> + return 0;
> + }
> if (!pte_present(*pte))
> goto out;
> damon_ptep_mkold(pte, walk->mm, addr);
> @@ -464,9 +466,11 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
> regular_page:
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>
> - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
> - return -EINVAL;
> pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
> + if (!pte) {
> + walk->action = ACTION_AGAIN;
> + return 0;
> + }
> if (!pte_present(*pte))
> goto out;
> folio = damon_get_folio(pte_pfn(*pte));
[...]
@@ -631,14 +631,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out;
}
- if (pmd_trans_unstable(pmd))
- goto out;
- /*
- * The mmap_lock held all the way back in m_start() is what
- * keeps khugepaged out of here and from collapsing things
- * in here.
- */
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; pte++, addr += PAGE_SIZE)
smaps_pte_entry(pte, addr, walk);
pte_unmap_unlock(pte - 1, ptl);
@@ -1191,10 +1188,11 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}
- if (pmd_trans_unstable(pmd))
- return 0;
-
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
@@ -1538,9 +1536,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
spin_unlock(ptl);
return err;
}
-
- if (pmd_trans_unstable(pmdp))
- return 0;
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
@@ -1548,6 +1543,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
* goes beyond vma->vm_end.
*/
orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return err;
+ }
for (; addr < end; pte++, addr += PAGE_SIZE) {
pagemap_entry_t pme;
@@ -1887,11 +1886,12 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(ptl);
return 0;
}
-
- if (pmd_trans_unstable(pmd))
- return 0;
#endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
do {
struct page *page = can_gather_numa_stats(*pte, vma, addr);
if (!page)
@@ -318,9 +318,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
spin_unlock(ptl);
}
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
if (!pte_present(*pte))
goto out;
damon_ptep_mkold(pte, walk->mm, addr);
@@ -464,9 +466,11 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
regular_page:
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return -EINVAL;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
if (!pte_present(*pte))
goto out;
folio = damon_get_folio(pte_pfn(*pte));
@@ -514,10 +514,11 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
if (ptl)
return queue_folios_pmd(pmd, ptl, addr, end, walk);
- if (pmd_trans_unstable(pmd))
- return 0;
-
mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
@@ -113,12 +113,11 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out;
}
- if (pmd_trans_unstable(pmd)) {
- __mincore_unmapped_range(addr, end, vma, vec);
- goto out;
- }
-
ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!ptep) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; ptep++, addr += PAGE_SIZE) {
pte_t pte = *ptep;
@@ -329,6 +329,10 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
}
start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (!start_pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;