@@ -1391,6 +1391,94 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
}
+static inline unsigned long page_cont_mapped_vaddr(struct page *page,
+ struct page *anchor, unsigned long anchor_vaddr)
+{
+ unsigned long offset;
+ unsigned long vaddr;
+
+ offset = (page_to_pfn(page) - page_to_pfn(anchor)) << PAGE_SHIFT;
+ vaddr = anchor_vaddr + offset;
+
+ if (anchor > page) {
+ if (vaddr > anchor_vaddr)
+ return 0;
+ } else {
+ if (vaddr < anchor_vaddr)
+ return ULONG_MAX;
+ }
+
+ return vaddr;
+}
+
+static int folio_nr_pages_cont_mapped(struct folio *folio,
+ struct page *page, pte_t *pte,
+ unsigned long addr, unsigned long end)
+{
+ pte_t ptent;
+ int floops;
+ int i;
+ unsigned long pfn;
+ struct page *folio_end;
+
+ if (!folio_test_large(folio))
+ return 1;
+
+ folio_end = &folio->page + folio_nr_pages(folio);
+ end = min(page_cont_mapped_vaddr(folio_end, page, addr), end);
+ floops = (end - addr) >> PAGE_SHIFT;
+ pfn = page_to_pfn(page);
+ pfn++;
+ pte++;
+
+ for (i = 1; i < floops; i++) {
+ ptent = ptep_get(pte);
+
+ if (!pte_present(ptent) || pte_pfn(ptent) != pfn)
+ break;
+
+ pfn++;
+ pte++;
+ }
+
+ return i;
+}
+
+static unsigned long try_zap_anon_pte_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ struct folio *folio,
+ struct page *page, pte_t *pte,
+ unsigned long addr, int nr_pages,
+ struct zap_details *details)
+{
+ struct mm_struct *mm = tlb->mm;
+ pte_t ptent;
+ bool full;
+ int i;
+
+ for (i = 0; i < nr_pages;) {
+ ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
+ full = __tlb_remove_page(tlb, page, 0);
+
+ if (unlikely(page_mapcount(page) < 1))
+ print_bad_pte(vma, addr, ptent, page);
+
+ i++;
+ page++;
+ pte++;
+ addr += PAGE_SIZE;
+
+ if (unlikely(full))
+ break;
+ }
+
+ folio_remove_rmap_range(folio, page - i, i, vma);
+
+ return i;
+}
+
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
@@ -1428,6 +1516,38 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
page = vm_normal_page(vma, addr, ptent);
if (unlikely(!should_zap_page(details, page)))
continue;
+
+ /*
+ * Batch zap large anonymous folio mappings. This allows
+ * batching the rmap removal, which means we avoid
+ * spuriously adding a partially unmapped folio to the
+ * deferrred split queue in the common case, which
+ * reduces split queue lock contention.
+ */
+ if (page && PageAnon(page)) {
+ struct folio *folio = page_folio(page);
+ int nr_pages_req, nr_pages;
+
+ nr_pages_req = folio_nr_pages_cont_mapped(
+ folio, page, pte, addr, end);
+
+ nr_pages = try_zap_anon_pte_range(tlb, vma,
+ folio, page, pte, addr,
+ nr_pages_req, details);
+
+ rss[mm_counter(page)] -= nr_pages;
+ nr_pages--;
+ pte += nr_pages;
+ addr += nr_pages << PAGE_SHIFT;
+
+ if (unlikely(nr_pages < nr_pages_req)) {
+ force_flush = 1;
+ addr += PAGE_SIZE;
+ break;
+ }
+ continue;
+ }
+
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);