[-next,2/7] mm: memory: convert do_anonymous_page() to use a folio
Commit Message
Convert do_anonymous_page() to use a folio and replace related functions
to folio functions.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
mm/memory.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
Comments
On Thu, Jan 12, 2023 at 04:30:01PM +0800, Kefeng Wang wrote:
> Convert do_anonymous_page() to use a folio and replace related functions
> to folio functions.
I think this patch has a prerequisite of sorting out
alloc_zeroed_user_highpage_movable(). That way we can get rid of
the 'page' variable inside this function altogether.
> inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> page_add_new_anon_rmap(page, vma, vmf->address);
folio_add_new_anon-rmap().
> - lru_cache_add_inactive_or_unevictable(page, vma);
> + folio_add_lru_vma(folio, vma);
On 2023/1/13 23:33, Matthew Wilcox wrote:
> On Thu, Jan 12, 2023 at 04:30:01PM +0800, Kefeng Wang wrote:
>> Convert do_anonymous_page() to use a folio and replace related functions
>> to folio functions.
>
> I think this patch has a prerequisite of sorting out
> alloc_zeroed_user_highpage_movable(). That way we can get rid of
> the 'page' variable inside this function altogether.
How about provide a wrapper like
folio_alloc_zeroed_user_highmem_movable(), but the is a little bit long.
static inline struct folio *
folio_alloc_zeroed_user_highmem_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
struct folio *folio = NULL;
struct page *page;
page = alloc_zeroed_user_highpage_movable(vma, vaddr);
if (page)
folio = page_folio(page);
return folio;
}
>
>> inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
>> page_add_new_anon_rmap(page, vma, vmf->address);
>
> folio_add_new_anon-rmap().
ok, will update.
>
>> - lru_cache_add_inactive_or_unevictable(page, vma);
>> + folio_add_lru_vma(folio, vma);
@@ -4002,6 +4002,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
+ struct folio *folio;
vm_fault_t ret = 0;
pte_t entry;
@@ -4055,16 +4056,17 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (!page)
goto oom;
- if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+ folio = page_folio(page);
+ if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
- cgroup_throttle_swaprate(page, GFP_KERNEL);
+ folio_throttle_swaprate(folio, GFP_KERNEL);
/*
- * The memory barrier inside __SetPageUptodate makes sure that
+ * The memory barrier inside __folio_mark_uptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
- __SetPageUptodate(page);
+ __folio_mark_uptodate(folio);
entry = mk_pte(page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
@@ -4085,13 +4087,13 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
- put_page(page);
+ folio_put(folio);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, vmf->address);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ folio_add_lru_vma(folio, vma);
setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
@@ -4101,10 +4103,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
release:
- put_page(page);
+ folio_put(folio);
goto unlock;
oom_free_page:
- put_page(page);
+ folio_put(folio);
oom:
return VM_FAULT_OOM;
}