@@ -688,29 +688,6 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
remove_inode_hugepages(inode, offset, LLONG_MAX);
}
-static void hugetlbfs_zero_partial_page(struct hstate *h,
- struct address_space *mapping,
- loff_t start,
- loff_t end)
-{
- pgoff_t idx = start >> huge_page_shift(h);
- struct folio *folio;
-
- folio = filemap_lock_folio(mapping, idx);
- if (!folio)
- return;
-
- start = start & ~huge_page_mask(h);
- end = end & ~huge_page_mask(h);
- if (!end)
- end = huge_page_size(h);
-
- folio_zero_segment(folio, (size_t)start, (size_t)end);
-
- folio_unlock(folio);
- folio_put(folio);
-}
-
static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
@@ -737,7 +714,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* If range starts before first full page, zero partial page. */
if (offset < hole_start)
- hugetlbfs_zero_partial_page(h, mapping,
+ hugetlb_zero_partial_page(h, mapping,
offset, min(offset + len, hole_start));
/* Unmap users of full pages in the hole. */
@@ -750,7 +727,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* If range extends beyond last full page, zero partial page. */
if ((offset + len) > hole_end && (offset + len) > hole_start)
- hugetlbfs_zero_partial_page(h, mapping,
+ hugetlb_zero_partial_page(h, mapping,
hole_end, offset + len);
i_mmap_unlock_write(mapping);
@@ -256,6 +256,9 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
bool is_hugetlb_entry_migration(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void hugetlb_zero_partial_page(struct hstate *h, struct address_space *mapping,
+ loff_t start, loff_t end);
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
@@ -464,6 +467,9 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline void hugetlb_zero_partial_page(
+ struct hstate *h, struct address_space *mapping, loff_t start, loff_t end) {}
+
#endif /* !CONFIG_HUGETLB_PAGE */
/*
* hugepages at page global directory. If arch support
@@ -7407,6 +7407,28 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
}
+void hugetlb_zero_partial_page(struct hstate *h,
+ struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ pgoff_t idx = start >> huge_page_shift(h);
+ struct folio *folio;
+
+ folio = filemap_lock_folio(mapping, idx);
+ if (!folio)
+ return;
+
+ start = start & ~huge_page_mask(h);
+ end = end & ~huge_page_mask(h);
+ if (!end)
+ end = huge_page_size(h);
+
+ folio_zero_segment(folio, (size_t)start, (size_t)end);
+
+ folio_unlock(folio);
+ folio_put(folio);
+}
+
#ifdef CONFIG_CMA
static bool cma_reserve_called __initdata;