[2/3] hugetlbfs: close race between MADV_DONTNEED and page fault
Commit Message
From: Rik van Riel <riel@surriel.com>
Malloc libraries, like jemalloc and tcalloc, take decisions on when
to call madvise independently from the code in the main application.
This sometimes results in the application page faulting on an address,
right after the malloc library has shot down the backing memory with
MADV_DONTNEED.
Usually this is harmless, because we always have some 4kB pages
sitting around to satisfy a page fault. However, with hugetlbfs
systems often allocate only the exact number of huge pages that
the application wants.
Due to TLB batching, hugetlbfs MADV_DONTNEED will free pages outside of
any lock taken on the page fault path, which can open up the following
race condition:
CPU 1 CPU 2
MADV_DONTNEED
unmap page
shoot down TLB entry
page fault
fail to allocate a huge page
killed with SIGBUS
free page
Fix that race by pulling the locking from __unmap_hugepage_final_range
into helper functions called from zap_page_range_single. This ensures
page faults stay locked out of the MADV_DONTNEED VMA until the
huge pages have actually been freed.
Signed-off-by: Rik van Riel <riel@surriel.com>
Cc: stable@kernel.org
Fixes: 04ada095dcfc ("hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing")
---
include/linux/hugetlb.h | 35 +++++++++++++++++++++++++++++++++--
mm/hugetlb.c | 28 ++++++++++++++++------------
mm/memory.c | 13 ++++++++-----
3 files changed, 57 insertions(+), 19 deletions(-)
Comments
On 10/03/23 23:25, riel@surriel.com wrote:
> From: Rik van Riel <riel@surriel.com>
>
> Malloc libraries, like jemalloc and tcalloc, take decisions on when
> to call madvise independently from the code in the main application.
>
> This sometimes results in the application page faulting on an address,
> right after the malloc library has shot down the backing memory with
> MADV_DONTNEED.
>
> Usually this is harmless, because we always have some 4kB pages
> sitting around to satisfy a page fault. However, with hugetlbfs
> systems often allocate only the exact number of huge pages that
> the application wants.
>
> Due to TLB batching, hugetlbfs MADV_DONTNEED will free pages outside of
> any lock taken on the page fault path, which can open up the following
> race condition:
>
> CPU 1 CPU 2
>
> MADV_DONTNEED
> unmap page
> shoot down TLB entry
> page fault
> fail to allocate a huge page
> killed with SIGBUS
> free page
>
> Fix that race by pulling the locking from __unmap_hugepage_final_range
> into helper functions called from zap_page_range_single. This ensures
> page faults stay locked out of the MADV_DONTNEED VMA until the
> huge pages have actually been freed.
>
> Signed-off-by: Rik van Riel <riel@surriel.com>
> Cc: stable@kernel.org
> Fixes: 04ada095dcfc ("hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing")
> ---
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index ee7497f37098..424bb8da9519 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5435,16 +5435,19 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
> tlb_flush_mmu_tlbonly(tlb);
> }
>
> -void __unmap_hugepage_range_final(struct mmu_gather *tlb,
> - struct vm_area_struct *vma, unsigned long start,
> - unsigned long end, struct page *ref_page,
> - zap_flags_t zap_flags)
> +void __hugetlb_zap_begin(struct vm_area_struct *vma,
> + unsigned long *start, unsigned long *end)
> {
> + adjust_range_if_pmd_sharing_possible(vma, start, end);
> hugetlb_vma_lock_write(vma);
> - i_mmap_lock_write(vma->vm_file->f_mapping);
> + if (vma->vm_file)
> + i_mmap_lock_write(vma->vm_file->f_mapping);
> +}
>
> - /* mmu notification performed in caller */
> - __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
> +void __hugetlb_zap_end(struct vm_area_struct *vma,
> + struct zap_details *details)
> +{
> + zap_flags_t zap_flags = details ? details->zap_flags : 0;
>
> if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
> /*
> @@ -5457,11 +5460,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
> * someone else.
> */
> __hugetlb_vma_unlock_write_free(vma);
> - i_mmap_unlock_write(vma->vm_file->f_mapping);
> } else {
> - i_mmap_unlock_write(vma->vm_file->f_mapping);
> hugetlb_vma_unlock_write(vma);
> }
> +
> + if (vma->vm_file)
> + i_mmap_unlock_write(vma->vm_file->f_mapping);
> }
In the case of a mmap(hugetlbfs_file_mmap) error, the per-vma hugetlb
lock will not be setup. The hugetlb_vma_lock/unlock routines do not
check for this as they were previously always called after the lock was
set up. So, we can now get:
[ 47.653806] BUG: kernel NULL pointer dereference, address: 00000000000000c8
[ 47.654967] #PF: supervisor read access in kernel mode
[ 47.655900] #PF: error_code(0x0000) - not-present page
[ 47.656814] PGD 8000000307415067 P4D 8000000307415067 PUD 30587b067 PMD 0
[ 47.658005] Oops: 0000 [#1] PREEMPT SMP PTI
[ 47.658777] CPU: 3 PID: 1224 Comm: heap-overflow Tainted: G W 6.6.0-rc3-next-20230925+ #19
[ 47.660428] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc37 04/01/2014
[ 47.661931] RIP: 0010:__lock_acquire+0x1e6/0x2390
[ 47.662784] Code: 46 24 4c 89 e8 25 ff 1f 00 00 48 0f a3 05 f2 84 0f 02 0f 83 e9 05 00 00 48 8d 14 40 48 8d 04 90 48 c1 e0 04 48 05 a0 89 27 83 <0f> b6 98 c8 00 00 00 41 0f b7 46 20 66 25 ff 1f 0f b7 c0 48 0f a3
[ 47.665890] RSP: 0018:ffffc90004a03ac8 EFLAGS: 00010046
[ 47.667009] RAX: 0000000000000000 RBX: 000000000004138c RCX: 0000000000000000
[ 47.668321] RDX: 0000000000000002 RSI: ffffffff8246ce26 RDI: 00000000ffffffff
[ 47.669580] RBP: 0000000000000000 R08: 0000000000009ffb R09: 0000000000000001
[ 47.670825] R10: ffff888303c51ac0 R11: ffff888303c52430 R12: 0000000000000001
[ 47.672070] R13: 3b97e6ab9880538c R14: ffff888303c52458 R15: 0000000000000000
[ 47.673285] FS: 00007f3065e7a0c0(0000) GS:ffff888477d00000(0000) knlGS:0000000000000000
[ 47.675504] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 47.676646] CR2: 00000000000000c8 CR3: 000000030409a004 CR4: 0000000000370ef0
[ 47.677975] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 47.679264] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 47.680603] Call Trace:
[ 47.681196] <TASK>
[ 47.681723] ? __die+0x1f/0x70
[ 47.682440] ? page_fault_oops+0x159/0x450
[ 47.683246] ? do_user_addr_fault+0x65/0x850
[ 47.684082] ? exc_page_fault+0x6d/0x1c0
[ 47.684838] ? asm_exc_page_fault+0x22/0x30
[ 47.685611] ? __lock_acquire+0x1e6/0x2390
[ 47.686360] ? __lock_acquire+0xab9/0x2390
[ 47.687123] lock_acquire+0xd4/0x2c0
[ 47.687811] ? __hugetlb_zap_begin+0x6e/0xa0
[ 47.688595] ? mark_held_locks+0x49/0x80
[ 47.689321] down_write+0x2a/0xc0
[ 47.689976] ? __hugetlb_zap_begin+0x6e/0xa0
[ 47.690862] __hugetlb_zap_begin+0x6e/0xa0
[ 47.691707] unmap_vmas+0xb3/0x100
[ 47.692480] unmap_region.constprop.0+0xcc/0x140
[ 47.693518] ? lock_release+0x142/0x290
[ 47.694304] ? preempt_count_add+0x47/0xa0
[ 47.695109] mmap_region+0x565/0xab0
[ 47.695831] do_mmap+0x35a/0x520
[ 47.696511] vm_mmap_pgoff+0xdf/0x200
[ 47.697419] ksys_mmap_pgoff+0xdb/0x200
[ 47.698368] do_syscall_64+0x37/0x90
[ 47.699148] entry_SYSCALL_64_after_hwframe+0x6e/0xd8
[ 47.700307] RIP: 0033:0x7f3065f77086
In my environment, I added the following to this patch to resolve the
issue.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d25db18c9526..48370f5b70f5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5503,10 +5503,12 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
void __hugetlb_zap_begin(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
+ if (!vma->vm_file) /* hugetlbfs_file_mmap error */
+ return;
+
adjust_range_if_pmd_sharing_possible(vma, start, end);
hugetlb_vma_lock_write(vma);
- if (vma->vm_file)
- i_mmap_lock_write(vma->vm_file->f_mapping);
+ i_mmap_lock_write(vma->vm_file->f_mapping);
}
void __hugetlb_zap_end(struct vm_area_struct *vma,
@@ -5514,6 +5516,9 @@ void __hugetlb_zap_end(struct vm_area_struct *vma,
{
zap_flags_t zap_flags = details ? details->zap_flags : 0;
+ if (!vma->vm_file) /* hugetlbfs_file_mmap mmap error */
+ return;
+
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
/*
* Unlock and free the vma lock before releasing i_mmap_rwsem.
@@ -5529,8 +5534,7 @@ void __hugetlb_zap_end(struct vm_area_struct *vma,
hugetlb_vma_unlock_write(vma);
}
- if (vma->vm_file)
- i_mmap_unlock_write(vma->vm_file->f_mapping);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
Another way to resolve would be to fix up the hugetlb_vma_lock/unlock routines
to check for and handle a null lock.
On Wed, 2023-10-04 at 20:19 -0700, Mike Kravetz wrote:
> On 10/03/23 23:25, riel@surriel.com wrote:
> >
> > @@ -5457,11 +5460,12 @@ void __unmap_hugepage_range_final(struct
> > mmu_gather *tlb,
> > * someone else.
> > */
> > __hugetlb_vma_unlock_write_free(vma);
> > - i_mmap_unlock_write(vma->vm_file->f_mapping);
> > } else {
> > - i_mmap_unlock_write(vma->vm_file->f_mapping);
> > hugetlb_vma_unlock_write(vma);
> > }
> > +
> > + if (vma->vm_file)
> > + i_mmap_unlock_write(vma->vm_file->f_mapping);
> > }
>
> In the case of a mmap(hugetlbfs_file_mmap) error, the per-vma hugetlb
> lock will not be setup. The hugetlb_vma_lock/unlock routines do not
> check for this as they were previously always called after the lock
> was
> set up. So, we can now get:
Wait, the hugetlb_vma_(un)lock_{read,write} functions do
have checks for the presence of the lock:
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
if (__vma_shareable_lock(vma)) {
struct hugetlb_vma_lock *vma_lock = vma-
>vm_private_data;
down_read(&vma_lock->rw_sema);
} else if (__vma_private_lock(vma)) {
struct resv_map *resv_map = vma_resv_map(vma);
down_read(&resv_map->rw_sema);
}
}
Both __vma_shareable_lock and __vma_private_lock check that
vma->vm_private_data points at something.
Exactly what corner case am I missing here?
What leaves vma->vm_private_data pointing at something
invalid?
>
> +++ b/mm/hugetlb.c
> @@ -5503,10 +5503,12 @@ void __unmap_hugepage_range(struct mmu_gather
> *tlb, struct vm_area_struct *vma,
> void __hugetlb_zap_begin(struct vm_area_struct *vma,
> unsigned long *start, unsigned long *end)
> {
> + if (!vma->vm_file) /* hugetlbfs_file_mmap error */
> + return;
> +
This does not seem quite correct, because the locking is needed to
avoid the race between MADV_DONTNEED and the page fault path.
> Another way to resolve would be to fix up the hugetlb_vma_lock/unlock
> routines
> to check for and handle a null lock.
I thought I had that already.
Does __vma_shareable_lock need to check for !vma->vm_file ?
On 10/05/23 09:23, Rik van Riel wrote:
> On Wed, 2023-10-04 at 20:19 -0700, Mike Kravetz wrote:
> > On 10/03/23 23:25, riel@surriel.com wrote:
> > >
> > > @@ -5457,11 +5460,12 @@ void __unmap_hugepage_range_final(struct
> > > mmu_gather *tlb,
> > > * someone else.
> > > */
> > > __hugetlb_vma_unlock_write_free(vma);
> > > - i_mmap_unlock_write(vma->vm_file->f_mapping);
> > > } else {
> > > - i_mmap_unlock_write(vma->vm_file->f_mapping);
> > > hugetlb_vma_unlock_write(vma);
> > > }
> > > +
> > > + if (vma->vm_file)
> > > + i_mmap_unlock_write(vma->vm_file->f_mapping);
> > > }
> >
> > In the case of a mmap(hugetlbfs_file_mmap) error, the per-vma hugetlb
> > lock will not be setup. The hugetlb_vma_lock/unlock routines do not
> > check for this as they were previously always called after the lock
> > was
> > set up. So, we can now get:
>
> Wait, the hugetlb_vma_(un)lock_{read,write} functions do
> have checks for the presence of the lock:
>
> void hugetlb_vma_lock_read(struct vm_area_struct *vma)
> {
> if (__vma_shareable_lock(vma)) {
> struct hugetlb_vma_lock *vma_lock = vma-
> >vm_private_data;
>
> down_read(&vma_lock->rw_sema);
> } else if (__vma_private_lock(vma)) {
> struct resv_map *resv_map = vma_resv_map(vma);
>
> down_read(&resv_map->rw_sema);
> }
> }
>
> Both __vma_shareable_lock and __vma_private_lock check that
> vma->vm_private_data points at something.
>
> Exactly what corner case am I missing here?
>
> What leaves vma->vm_private_data pointing at something
> invalid?
You are correct. The checks in hugetlb_vma_(un)lock_{read,write} functions
should be sufficient.
Here is the corner case that needs to be addressed:
mmap
hugetlbfs_file_mmap
hugetlb_reserve_pages {
!VM_MAYSHARE so
resv_map = resv_map_alloc();
set_vma_resv_map(vma, resv_map);
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
...
some error in hugetlb_reserve_pages,
goto out_err
out_error:
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release);
return false;
Note that we free resv_map but do not clear vm_private_data. So, at
unmap time we try to use the lock in the freed resv_map.
Leaving the dangling pointer to a freed structure is BAD. However, no
code accessed the freed structure until this patch.
I would suggest incorporating this into this patch, or even as a stand
alone patch before this series.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d25db18c9526..a3ae13d0f8fe 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1171,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
- set_vma_private_data(vma, (get_vma_private_data(vma) &
- HPAGE_RESV_MASK) | (unsigned long)map);
+ set_vma_private_data(vma, (unsigned long)map);
}
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
@@ -6910,8 +6909,10 @@ bool hugetlb_reserve_pages(struct inode *inode,
*/
if (chg >= 0 && add < 0)
region_abort(resv_map, from, to, regions_needed);
- if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
kref_put(&resv_map->refs, resv_map_release);
+ set_vma_resv_map(vma, NULL);
+ }
return false;
}
At one time set_vma_resv_map must have wanted to preserve flags.
However, that is no longer the case so we can remove that code.
Of course, feel free to address some other way if you would like.
> > +++ b/mm/hugetlb.c
> > @@ -5503,10 +5503,12 @@ void __unmap_hugepage_range(struct mmu_gather
> > *tlb, struct vm_area_struct *vma,
> > void __hugetlb_zap_begin(struct vm_area_struct *vma,
> > unsigned long *start, unsigned long *end)
> > {
> > + if (!vma->vm_file) /* hugetlbfs_file_mmap error */
> > + return;
> > +
>
> This does not seem quite correct, because the locking is needed to
> avoid the race between MADV_DONTNEED and the page fault path.
>
>
Note that vma->vm_file is always set for hugetlb vmas except in the mmap
error case. The first line of code in hugetlb_fault is:
mapping = vma->vm_file->f_mapping;
So, bailing out early like this should not be an issue.
> > Another way to resolve would be to fix up the hugetlb_vma_lock/unlock
> > routines
> > to check for and handle a null lock.
>
> I thought I had that already.
>
You did. My bad! I saw the NULL deref and jumped to conclusions.
> Does __vma_shareable_lock need to check for !vma->vm_file ?
I do not think there is a need. It has the check for vma->vm_private_data
which should be sufficient. We only got in trouble in the !VM_MAYSHARE
case because of the stale/invalid pointer.
However, I do still think we could have that early return in the case of
!vma->vm_file I suggested earlier. There is no need to take the locks
in this case we will not be calling the hugetlb unmap code.
@@ -139,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
@@ -246,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -297,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+}
+
static inline struct page *hugetlb_follow_page_mask(
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask)
@@ -442,7 +473,7 @@ static inline long hugetlb_change_protection(
return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
@@ -5306,9 +5306,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
return len + old_addr - old_end;
}
-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page, zap_flags_t zap_flags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -5435,16 +5435,19 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_flush_mmu_tlbonly(tlb);
}
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page,
- zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
+ adjust_range_if_pmd_sharing_possible(vma, start, end);
hugetlb_vma_lock_write(vma);
- i_mmap_lock_write(vma->vm_file->f_mapping);
+ if (vma->vm_file)
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+}
- /* mmu notification performed in caller */
- __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ zap_flags_t zap_flags = details ? details->zap_flags : 0;
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
/*
@@ -5457,11 +5460,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* someone else.
*/
__hugetlb_vma_unlock_write_free(vma);
- i_mmap_unlock_write(vma->vm_file->f_mapping);
} else {
- i_mmap_unlock_write(vma->vm_file->f_mapping);
hugetlb_vma_unlock_write(vma);
}
+
+ if (vma->vm_file)
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -1683,7 +1683,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
- __unmap_hugepage_range_final(tlb, vma, start, end,
+ __unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags);
}
} else
@@ -1728,8 +1728,12 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
do {
- unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
+ unsigned long start = start_addr;
+ unsigned long end = end_addr;
+ hugetlb_zap_begin(vma, &start, &end);
+ unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
+ hugetlb_zap_end(vma, &details);
} while ((vma = mas_find(mas, tree_end - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
}
@@ -1753,9 +1757,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
- if (is_vm_hugetlb_page(vma))
- adjust_range_if_pmd_sharing_possible(vma, &range.start,
- &range.end);
+ hugetlb_zap_begin(vma, &range.start, &range.end);
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
@@ -1766,6 +1768,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unmap_single_vma(&tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
+ hugetlb_zap_end(vma, details);
}
/**