[06/24] swap: rework swapin_no_readahead arguments

Message ID 20231119194740.94101-7-ryncsn@gmail.com
State New
Headers
Series Swapin path refactor for optimization and bugfix |

Commit Message

Kairui Song Nov. 19, 2023, 7:47 p.m. UTC
  From: Kairui Song <kasong@tencent.com>

Make it use alloc_pages_mpol instead of vma_alloc_folio, and accept
mm_struct directly as an argument instead of taking a vmf as argument.
Make its arguments similar to swap_{cluster,vma}_readahead, to
make the code more aligned.

Also prepare for following commits which will skip vmf for certain
swapin paths.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 mm/swap_state.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)
  

Comments

kernel test robot Nov. 20, 2023, 12:20 a.m. UTC | #1
Hi Kairui,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on linus/master v6.7-rc2 next-20231117]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Kairui-Song/mm-swap-fix-a-potential-undefined-behavior-issue/20231120-035926
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20231119194740.94101-7-ryncsn%40gmail.com
patch subject: [PATCH 06/24] swap: rework swapin_no_readahead arguments
config: i386-buildonly-randconfig-003-20231120 (https://download.01.org/0day-ci/archive/20231120/202311200826.8Nl5w3h8-lkp@intel.com/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231120/202311200826.8Nl5w3h8-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202311200826.8Nl5w3h8-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> mm/swap_state.c:872: warning: Function parameter or member 'mpol' not described in 'swapin_no_readahead'
>> mm/swap_state.c:872: warning: Function parameter or member 'ilx' not described in 'swapin_no_readahead'
>> mm/swap_state.c:872: warning: Function parameter or member 'mm' not described in 'swapin_no_readahead'
>> mm/swap_state.c:872: warning: Excess function parameter 'vmf' description in 'swapin_no_readahead'


vim +872 mm/swap_state.c

d9bfcfdc41e8e5 Huang Ying  2017-09-06  859  
19f582d2684e47 Kairui Song 2023-11-20  860  /**
19f582d2684e47 Kairui Song 2023-11-20  861   * swapin_no_readahead - swap in pages skipping swap cache and readahead
19f582d2684e47 Kairui Song 2023-11-20  862   * @entry: swap entry of this memory
19f582d2684e47 Kairui Song 2023-11-20  863   * @gfp_mask: memory allocation flags
19f582d2684e47 Kairui Song 2023-11-20  864   * @vmf: fault information
19f582d2684e47 Kairui Song 2023-11-20  865   *
19f582d2684e47 Kairui Song 2023-11-20  866   * Returns the struct page for entry and addr after the swap entry is read
19f582d2684e47 Kairui Song 2023-11-20  867   * in.
19f582d2684e47 Kairui Song 2023-11-20  868   */
598f2616cde014 Kairui Song 2023-11-20  869  static struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
2538a5e96fe62f Kairui Song 2023-11-20  870  					struct mempolicy *mpol, pgoff_t ilx,
2538a5e96fe62f Kairui Song 2023-11-20  871  					struct mm_struct *mm)
19f582d2684e47 Kairui Song 2023-11-20 @872  {
19f582d2684e47 Kairui Song 2023-11-20  873  	struct folio *folio;
2538a5e96fe62f Kairui Song 2023-11-20  874  	struct page *page;
19f582d2684e47 Kairui Song 2023-11-20  875  	void *shadow = NULL;
19f582d2684e47 Kairui Song 2023-11-20  876  
2538a5e96fe62f Kairui Song 2023-11-20  877  	page = alloc_pages_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
2538a5e96fe62f Kairui Song 2023-11-20  878  	folio = (struct folio *)page;
19f582d2684e47 Kairui Song 2023-11-20  879  	if (folio) {
2538a5e96fe62f Kairui Song 2023-11-20  880  		if (mem_cgroup_swapin_charge_folio(folio, mm,
c2ac0dcbf9ab6a Kairui Song 2023-11-20  881  						   GFP_KERNEL, entry)) {
19f582d2684e47 Kairui Song 2023-11-20  882  			folio_put(folio);
19f582d2684e47 Kairui Song 2023-11-20  883  			return NULL;
19f582d2684e47 Kairui Song 2023-11-20  884  		}
c2ac0dcbf9ab6a Kairui Song 2023-11-20  885  
c2ac0dcbf9ab6a Kairui Song 2023-11-20  886  		__folio_set_locked(folio);
c2ac0dcbf9ab6a Kairui Song 2023-11-20  887  		__folio_set_swapbacked(folio);
c2ac0dcbf9ab6a Kairui Song 2023-11-20  888  
19f582d2684e47 Kairui Song 2023-11-20  889  		mem_cgroup_swapin_uncharge_swap(entry);
19f582d2684e47 Kairui Song 2023-11-20  890  
19f582d2684e47 Kairui Song 2023-11-20  891  		shadow = get_shadow_from_swap_cache(entry);
19f582d2684e47 Kairui Song 2023-11-20  892  		if (shadow)
19f582d2684e47 Kairui Song 2023-11-20  893  			workingset_refault(folio, shadow);
19f582d2684e47 Kairui Song 2023-11-20  894  
19f582d2684e47 Kairui Song 2023-11-20  895  		folio_add_lru(folio);
19f582d2684e47 Kairui Song 2023-11-20  896  
19f582d2684e47 Kairui Song 2023-11-20  897  		/* To provide entry to swap_readpage() */
19f582d2684e47 Kairui Song 2023-11-20  898  		folio->swap = entry;
19f582d2684e47 Kairui Song 2023-11-20  899  		swap_readpage(page, true, NULL);
19f582d2684e47 Kairui Song 2023-11-20  900  		folio->private = NULL;
19f582d2684e47 Kairui Song 2023-11-20  901  	}
19f582d2684e47 Kairui Song 2023-11-20  902  
19f582d2684e47 Kairui Song 2023-11-20  903  	return page;
19f582d2684e47 Kairui Song 2023-11-20  904  }
19f582d2684e47 Kairui Song 2023-11-20  905
  
Chris Li Nov. 21, 2023, 6:44 a.m. UTC | #2
On Sun, Nov 19, 2023 at 11:48 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> Make it use alloc_pages_mpol instead of vma_alloc_folio, and accept
> mm_struct directly as an argument instead of taking a vmf as argument.
> Make its arguments similar to swap_{cluster,vma}_readahead, to
> make the code more aligned.

It is unclear to me what is the value of making the
swap_{cluster,vma}_readahead more aligned here.

> Also prepare for following commits which will skip vmf for certain
> swapin paths.

The following patch 07/24 does not seem to use this patch.
Can it merge with the other patch that uses it?

As it is, this patch does not serve a stand alone value to justify it.

Chris

>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/swap_state.c | 15 +++++++--------
>  1 file changed, 7 insertions(+), 8 deletions(-)
>
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index fd0047ae324e..ff6756f2e8e4 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -867,17 +867,17 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
>   * in.
>   */
>  static struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
> -                                       struct vm_fault *vmf)
> +                                       struct mempolicy *mpol, pgoff_t ilx,
> +                                       struct mm_struct *mm)
>  {
> -       struct vm_area_struct *vma = vmf->vma;
> -       struct page *page = NULL;
>         struct folio *folio;
> +       struct page *page;
>         void *shadow = NULL;
>
> -       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
> -                               vma, vmf->address, false);
> +       page = alloc_pages_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
> +       folio = (struct folio *)page;
>         if (folio) {
> -               if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
> +               if (mem_cgroup_swapin_charge_folio(folio, mm,
>                                                    GFP_KERNEL, entry)) {
>                         folio_put(folio);
>                         return NULL;
> @@ -896,7 +896,6 @@ static struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
>
>                 /* To provide entry to swap_readpage() */
>                 folio->swap = entry;
> -               page = &folio->page;
>                 swap_readpage(page, true, NULL);
>                 folio->private = NULL;
>         }
> @@ -928,7 +927,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
>
>         mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
>         if (swap_use_no_readahead(swp_swap_info(entry), entry)) {
> -               page = swapin_no_readahead(entry, gfp_mask, vmf);
> +               page = swapin_no_readahead(entry, gfp_mask, mpol, ilx, vmf->vma->vm_mm);
>                 cached = false;
>         } else if (swap_use_vma_readahead()) {
>                 page = swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf);
> --
> 2.42.0
>
>
  
Kairui Song Nov. 23, 2023, 10:51 a.m. UTC | #3
Chris Li <chrisl@kernel.org> 于2023年11月21日周二 14:44写道:
>
> On Sun, Nov 19, 2023 at 11:48 AM Kairui Song <ryncsn@gmail.com> wrote:
> >
> > From: Kairui Song <kasong@tencent.com>
> >
> > Make it use alloc_pages_mpol instead of vma_alloc_folio, and accept
> > mm_struct directly as an argument instead of taking a vmf as argument.
> > Make its arguments similar to swap_{cluster,vma}_readahead, to
> > make the code more aligned.
>
> It is unclear to me what is the value of making the
> swap_{cluster,vma}_readahead more aligned here.
>
> > Also prepare for following commits which will skip vmf for certain
> > swapin paths.
>
> The following patch 07/24 does not seem to use this patch.
> Can it merge with the other patch that uses it?
>
> As it is, this patch does not serve a stand alone value to justify it.

Yes, I'll rearrange this.
  

Patch

diff --git a/mm/swap_state.c b/mm/swap_state.c
index fd0047ae324e..ff6756f2e8e4 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -867,17 +867,17 @@  static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
  * in.
  */
 static struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
-					struct vm_fault *vmf)
+					struct mempolicy *mpol, pgoff_t ilx,
+					struct mm_struct *mm)
 {
-	struct vm_area_struct *vma = vmf->vma;
-	struct page *page = NULL;
 	struct folio *folio;
+	struct page *page;
 	void *shadow = NULL;
 
-	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
-				vma, vmf->address, false);
+	page = alloc_pages_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
+	folio = (struct folio *)page;
 	if (folio) {
-		if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
+		if (mem_cgroup_swapin_charge_folio(folio, mm,
 						   GFP_KERNEL, entry)) {
 			folio_put(folio);
 			return NULL;
@@ -896,7 +896,6 @@  static struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
 
 		/* To provide entry to swap_readpage() */
 		folio->swap = entry;
-		page = &folio->page;
 		swap_readpage(page, true, NULL);
 		folio->private = NULL;
 	}
@@ -928,7 +927,7 @@  struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 
 	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
 	if (swap_use_no_readahead(swp_swap_info(entry), entry)) {
-		page = swapin_no_readahead(entry, gfp_mask, vmf);
+		page = swapin_no_readahead(entry, gfp_mask, mpol, ilx, vmf->vma->vm_mm);
 		cached = false;
 	} else if (swap_use_vma_readahead()) {
 		page = swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf);