On Oct 30, 2022, at 2:30 PM, Peter Xu <peterx@redhat.com> wrote:
> + /* With vma lock held, safe without RCU */
> src_pte = huge_pte_offset(src, addr, sz);
Just another option to consider: you can create an inline function
huge_pte_offset_locked_mm(), which would do the lockdep_assert_held_*().
I personally would prefer it, since it would clarify exactly the lock you
care about and "make the code document itself”.
On Mon, Oct 31, 2022 at 10:39:33PM -0700, Nadav Amit wrote:
> On Oct 30, 2022, at 2:30 PM, Peter Xu <peterx@redhat.com> wrote:
>
> > + /* With vma lock held, safe without RCU */
> > src_pte = huge_pte_offset(src, addr, sz);
>
> Just another option to consider: you can create an inline function
> huge_pte_offset_locked_mm(), which would do the lockdep_assert_held_*().
>
> I personally would prefer it, since it would clarify exactly the lock you
> care about and "make the code document itself”.
That's a great suggestion, I'll give it a shot in the next version.
Thanks!
@@ -4822,6 +4822,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
last_addr_mask = hugetlb_mask_last_page(h);
for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl;
+ /* With vma lock held, safe without RCU */
src_pte = huge_pte_offset(src, addr, sz);
if (!src_pte) {
addr |= last_addr_mask;
@@ -5026,6 +5027,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
hugetlb_vma_lock_write(vma);
i_mmap_lock_write(mapping);
for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
+ /* With vma lock held, safe without RCU */
src_pte = huge_pte_offset(mm, old_addr, sz);
if (!src_pte) {
old_addr |= last_addr_mask;
@@ -5097,6 +5099,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
last_addr_mask = hugetlb_mask_last_page(h);
address = start;
for (; address < end; address += sz) {
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, address, sz);
if (!ptep) {
address |= last_addr_mask;
@@ -5402,6 +5405,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
mutex_lock(&hugetlb_fault_mutex_table[hash]);
hugetlb_vma_lock_read(vma);
spin_lock(ptl);
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep &&
pte_same(huge_ptep_get(ptep), pte)))
@@ -5440,6 +5444,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
* before the page tables are altered
*/
spin_lock(ptl);
+ /* With vma lock (and even pgtable lock) held, safe without RCU */
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW or unshare */
@@ -6511,6 +6516,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
last_addr_mask = hugetlb_mask_last_page(h);
for (; address < end; address += psize) {
spinlock_t *ptl;
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, address, psize);
if (!ptep) {
address |= last_addr_mask;
@@ -7060,7 +7066,14 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
saddr = page_table_shareable(svma, vma, addr, idx);
if (saddr) {
+ /*
+ * huge_pmd_share() (or say its solo caller,
+ * huge_pte_alloc()) always takes the hugetlb vma
+ * lock, so it's always safe to walk the pgtable of
+ * the process, even without RCU.
+ */
spte = huge_pte_offset(svma->vm_mm, saddr,
+
vma_mmu_pagesize(svma));
if (spte) {
get_page(virt_to_page(spte));
@@ -7420,6 +7433,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
hugetlb_vma_lock_write(vma);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (address = start; address < end; address += PUD_SIZE) {
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, address, sz);
if (!ptep)
continue;