[v2,31/32] mm/swap: swap_vma_readahead() do the pte_offset_map()
Commit Message
swap_vma_readahead() has been proceeding in an unconventional way, its
preliminary swap_ra_info() doing the pte_offset_map() and pte_unmap(),
then relying on that pte pointer even after the pte_unmap() - in its
CONFIG_64BIT case (I think !CONFIG_HIGHPTE was intended; whereas 32-bit
copied ptes to stack while they were mapped, but had to limit how many).
Though it would be difficult to construct a failing testcase, accessing
page table after pte_unmap() will become bad practice, even on 64-bit:
an rcu_read_unlock() in pte_unmap() will allow page table to be freed.
Move relevant definitions from include/linux/swap.h to mm/swap_state.c,
nothing else used them. Delete the CONFIG_64BIT distinction and buffer,
delete all reference to ptes from swap_ra_info(), use pte_offset_map()
repeatedly in swap_vma_readahead(), breaking from the loop if it fails.
(Will the repeated "map" and "unmap" show up as a slowdown anywhere?
If so, maybe modify __read_swap_cache_async() to do the pte_unmap()
only when it does not find the page already in the swapcache.)
Use ptep_get_lockless(), mainly for its READ_ONCE(). Correctly advance
the address passed down to each call of __read__swap_cache_async().
Signed-off-by: Hugh Dickins <hughd@google.com>
---
include/linux/swap.h | 19 -------------------
mm/swap_state.c | 45 +++++++++++++++++++++++---------------------
2 files changed, 24 insertions(+), 40 deletions(-)
Comments
Hi, Hugh,
Sorry for late reply.
Hugh Dickins <hughd@google.com> writes:
> swap_vma_readahead() has been proceeding in an unconventional way, its
> preliminary swap_ra_info() doing the pte_offset_map() and pte_unmap(),
> then relying on that pte pointer even after the pte_unmap() - in its
> CONFIG_64BIT case (I think !CONFIG_HIGHPTE was intended; whereas 32-bit
> copied ptes to stack while they were mapped, but had to limit how many).
>
> Though it would be difficult to construct a failing testcase, accessing
> page table after pte_unmap() will become bad practice, even on 64-bit:
> an rcu_read_unlock() in pte_unmap() will allow page table to be freed.
>
> Move relevant definitions from include/linux/swap.h to mm/swap_state.c,
> nothing else used them. Delete the CONFIG_64BIT distinction and buffer,
> delete all reference to ptes from swap_ra_info(), use pte_offset_map()
> repeatedly in swap_vma_readahead(), breaking from the loop if it fails.
>
> (Will the repeated "map" and "unmap" show up as a slowdown anywhere?
> If so, maybe modify __read_swap_cache_async() to do the pte_unmap()
> only when it does not find the page already in the swapcache.)
>
> Use ptep_get_lockless(), mainly for its READ_ONCE(). Correctly advance
> the address passed down to each call of __read__swap_cache_async().
>
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
> include/linux/swap.h | 19 -------------------
> mm/swap_state.c | 45 +++++++++++++++++++++++---------------------
> 2 files changed, 24 insertions(+), 40 deletions(-)
>
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 3c69cb653cb9..1b9f2d92fc10 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -337,25 +337,6 @@ struct swap_info_struct {
> */
> };
>
> -#ifdef CONFIG_64BIT
> -#define SWAP_RA_ORDER_CEILING 5
> -#else
> -/* Avoid stack overflow, because we need to save part of page table */
> -#define SWAP_RA_ORDER_CEILING 3
> -#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
> -#endif
> -
> -struct vma_swap_readahead {
> - unsigned short win;
> - unsigned short offset;
> - unsigned short nr_pte;
> -#ifdef CONFIG_64BIT
> - pte_t *ptes;
> -#else
> - pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
> -#endif
> -};
> -
> static inline swp_entry_t folio_swap_entry(struct folio *folio)
> {
> swp_entry_t entry = { .val = page_private(&folio->page) };
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index b76a65ac28b3..a43b41975da2 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -698,6 +698,14 @@ void exit_swap_address_space(unsigned int type)
> swapper_spaces[type] = NULL;
> }
>
> +#define SWAP_RA_ORDER_CEILING 5
> +
> +struct vma_swap_readahead {
> + unsigned short win;
> + unsigned short offset;
> + unsigned short nr_pte;
> +};
> +
Because we don't deal with PTEs in struct vma_swap_readahead anymore, it
appears simpler to record addresses directly, for example,
struct vma_swap_readahead {
unsigned long start;
unsigned long end;
};
we can make ra_info.win to be the return value of swap_ra_info().
Anyway, this can be a separate cleanup patch based on this patch.
For the patch itself, feel free to add,
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
> static void swap_ra_info(struct vm_fault *vmf,
> struct vma_swap_readahead *ra_info)
> {
> @@ -705,11 +713,7 @@ static void swap_ra_info(struct vm_fault *vmf,
> unsigned long ra_val;
> unsigned long faddr, pfn, fpfn, lpfn, rpfn;
> unsigned long start, end;
> - pte_t *pte, *orig_pte;
> unsigned int max_win, hits, prev_win, win;
> -#ifndef CONFIG_64BIT
> - pte_t *tpte;
> -#endif
>
> max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
> SWAP_RA_ORDER_CEILING);
> @@ -728,12 +732,9 @@ static void swap_ra_info(struct vm_fault *vmf,
> max_win, prev_win);
> atomic_long_set(&vma->swap_readahead_info,
> SWAP_RA_VAL(faddr, win, 0));
> -
> if (win == 1)
> return;
>
> - /* Copy the PTEs because the page table may be unmapped */
> - orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
> if (fpfn == pfn + 1) {
> lpfn = fpfn;
> rpfn = fpfn + win;
> @@ -753,15 +754,6 @@ static void swap_ra_info(struct vm_fault *vmf,
>
> ra_info->nr_pte = end - start;
> ra_info->offset = fpfn - start;
> - pte -= ra_info->offset;
> -#ifdef CONFIG_64BIT
> - ra_info->ptes = pte;
> -#else
> - tpte = ra_info->ptes;
> - for (pfn = start; pfn != end; pfn++)
> - *tpte++ = *pte++;
> -#endif
> - pte_unmap(orig_pte);
> }
>
> /**
> @@ -785,7 +777,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
> struct swap_iocb *splug = NULL;
> struct vm_area_struct *vma = vmf->vma;
> struct page *page;
> - pte_t *pte, pentry;
> + pte_t *pte = NULL, pentry;
> + unsigned long addr;
> swp_entry_t entry;
> unsigned int i;
> bool page_allocated;
> @@ -797,17 +790,25 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
> if (ra_info.win == 1)
> goto skip;
>
> + addr = vmf->address - (ra_info.offset * PAGE_SIZE);
> +
> blk_start_plug(&plug);
> - for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
> - i++, pte++) {
> - pentry = *pte;
> + for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
> + if (!pte++) {
> + pte = pte_offset_map(vmf->pmd, addr);
> + if (!pte)
> + break;
> + }
> + pentry = ptep_get_lockless(pte);
> if (!is_swap_pte(pentry))
> continue;
> entry = pte_to_swp_entry(pentry);
> if (unlikely(non_swap_entry(entry)))
> continue;
> + pte_unmap(pte);
> + pte = NULL;
> page = __read_swap_cache_async(entry, gfp_mask, vma,
> - vmf->address, &page_allocated);
> + addr, &page_allocated);
> if (!page)
> continue;
> if (page_allocated) {
> @@ -819,6 +820,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
> }
> put_page(page);
> }
> + if (pte)
> + pte_unmap(pte);
> blk_finish_plug(&plug);
> swap_read_unplug(splug);
> lru_add_drain();
On Mon, 12 Jun 2023, Huang, Ying wrote:
> Hi, Hugh,
>
> Sorry for late reply.
Never apologize to *me* for being "late" or "slow" or "unresponsive".
Thanks for looking, yes, it was indeed for this one that I particularly
added you to the Cc.
>
> Hugh Dickins <hughd@google.com> writes:
>
> > swap_vma_readahead() has been proceeding in an unconventional way, its
> > preliminary swap_ra_info() doing the pte_offset_map() and pte_unmap(),
> > then relying on that pte pointer even after the pte_unmap() - in its
> > CONFIG_64BIT case (I think !CONFIG_HIGHPTE was intended; whereas 32-bit
> > copied ptes to stack while they were mapped, but had to limit how many).
> >
> > Though it would be difficult to construct a failing testcase, accessing
> > page table after pte_unmap() will become bad practice, even on 64-bit:
> > an rcu_read_unlock() in pte_unmap() will allow page table to be freed.
> >
> > Move relevant definitions from include/linux/swap.h to mm/swap_state.c,
> > nothing else used them. Delete the CONFIG_64BIT distinction and buffer,
> > delete all reference to ptes from swap_ra_info(), use pte_offset_map()
> > repeatedly in swap_vma_readahead(), breaking from the loop if it fails.
> >
> > (Will the repeated "map" and "unmap" show up as a slowdown anywhere?
> > If so, maybe modify __read_swap_cache_async() to do the pte_unmap()
> > only when it does not find the page already in the swapcache.)
> >
> > Use ptep_get_lockless(), mainly for its READ_ONCE(). Correctly advance
> > the address passed down to each call of __read__swap_cache_async().
> >
> > Signed-off-by: Hugh Dickins <hughd@google.com>
> > ---
> > include/linux/swap.h | 19 -------------------
> > mm/swap_state.c | 45 +++++++++++++++++++++++---------------------
> > 2 files changed, 24 insertions(+), 40 deletions(-)
...
> Because we don't deal with PTEs in struct vma_swap_readahead anymore, it
> appears simpler to record addresses directly, for example,
>
> struct vma_swap_readahead {
> unsigned long start;
> unsigned long end;
> };
>
> we can make ra_info.win to be the return value of swap_ra_info().
>
> Anyway, this can be a separate cleanup patch based on this patch.
Ooh, that would have required me to think, rather than just delete
lines. Mmm, if you see a cleaner way forward, yes, please do add
some cleanup on top.
>
> For the patch itself, feel free to add,
>
> Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Great, thanks a lot.
Hugh
@@ -337,25 +337,6 @@ struct swap_info_struct {
*/
};
-#ifdef CONFIG_64BIT
-#define SWAP_RA_ORDER_CEILING 5
-#else
-/* Avoid stack overflow, because we need to save part of page table */
-#define SWAP_RA_ORDER_CEILING 3
-#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
-#endif
-
-struct vma_swap_readahead {
- unsigned short win;
- unsigned short offset;
- unsigned short nr_pte;
-#ifdef CONFIG_64BIT
- pte_t *ptes;
-#else
- pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
-#endif
-};
-
static inline swp_entry_t folio_swap_entry(struct folio *folio)
{
swp_entry_t entry = { .val = page_private(&folio->page) };
@@ -698,6 +698,14 @@ void exit_swap_address_space(unsigned int type)
swapper_spaces[type] = NULL;
}
+#define SWAP_RA_ORDER_CEILING 5
+
+struct vma_swap_readahead {
+ unsigned short win;
+ unsigned short offset;
+ unsigned short nr_pte;
+};
+
static void swap_ra_info(struct vm_fault *vmf,
struct vma_swap_readahead *ra_info)
{
@@ -705,11 +713,7 @@ static void swap_ra_info(struct vm_fault *vmf,
unsigned long ra_val;
unsigned long faddr, pfn, fpfn, lpfn, rpfn;
unsigned long start, end;
- pte_t *pte, *orig_pte;
unsigned int max_win, hits, prev_win, win;
-#ifndef CONFIG_64BIT
- pte_t *tpte;
-#endif
max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
SWAP_RA_ORDER_CEILING);
@@ -728,12 +732,9 @@ static void swap_ra_info(struct vm_fault *vmf,
max_win, prev_win);
atomic_long_set(&vma->swap_readahead_info,
SWAP_RA_VAL(faddr, win, 0));
-
if (win == 1)
return;
- /* Copy the PTEs because the page table may be unmapped */
- orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
if (fpfn == pfn + 1) {
lpfn = fpfn;
rpfn = fpfn + win;
@@ -753,15 +754,6 @@ static void swap_ra_info(struct vm_fault *vmf,
ra_info->nr_pte = end - start;
ra_info->offset = fpfn - start;
- pte -= ra_info->offset;
-#ifdef CONFIG_64BIT
- ra_info->ptes = pte;
-#else
- tpte = ra_info->ptes;
- for (pfn = start; pfn != end; pfn++)
- *tpte++ = *pte++;
-#endif
- pte_unmap(orig_pte);
}
/**
@@ -785,7 +777,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct swap_iocb *splug = NULL;
struct vm_area_struct *vma = vmf->vma;
struct page *page;
- pte_t *pte, pentry;
+ pte_t *pte = NULL, pentry;
+ unsigned long addr;
swp_entry_t entry;
unsigned int i;
bool page_allocated;
@@ -797,17 +790,25 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
if (ra_info.win == 1)
goto skip;
+ addr = vmf->address - (ra_info.offset * PAGE_SIZE);
+
blk_start_plug(&plug);
- for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
- i++, pte++) {
- pentry = *pte;
+ for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
+ if (!pte++) {
+ pte = pte_offset_map(vmf->pmd, addr);
+ if (!pte)
+ break;
+ }
+ pentry = ptep_get_lockless(pte);
if (!is_swap_pte(pentry))
continue;
entry = pte_to_swp_entry(pentry);
if (unlikely(non_swap_entry(entry)))
continue;
+ pte_unmap(pte);
+ pte = NULL;
page = __read_swap_cache_async(entry, gfp_mask, vma,
- vmf->address, &page_allocated);
+ addr, &page_allocated);
if (!page)
continue;
if (page_allocated) {
@@ -819,6 +820,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
}
put_page(page);
}
+ if (pte)
+ pte_unmap(pte);
blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain();