[14/46] hugetlb: add make_huge_pte_with_shift

Message ID 20230105101844.1893104-15-jthoughton@google.com
State New
Headers
Series Based on latest mm-unstable (85b44c25cd1e). |

Commit Message

James Houghton Jan. 5, 2023, 10:18 a.m. UTC
  This allows us to make huge PTEs at shifts other than the hstate shift,
which will be necessary for high-granularity mappings.

Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: James Houghton <jthoughton@google.com>
---
 mm/hugetlb.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)
  

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aa8e59cbca69..3a75833d7aba 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5028,11 +5028,11 @@  const struct vm_operations_struct hugetlb_vm_ops = {
 	.pagesize = hugetlb_vm_op_pagesize,
 };
 
-static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
-				int writable)
+static pte_t make_huge_pte_with_shift(struct vm_area_struct *vma,
+				      struct page *page, int writable,
+				      int shift)
 {
 	pte_t entry;
-	unsigned int shift = huge_page_shift(hstate_vma(vma));
 
 	if (writable) {
 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_pte(page,
@@ -5046,6 +5046,14 @@  static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
 	return entry;
 }
 
+static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
+			   int writable)
+{
+	unsigned int shift = huge_page_shift(hstate_vma(vma));
+
+	return make_huge_pte_with_shift(vma, page, writable, shift);
+}
+
 static void set_huge_ptep_writable(struct vm_area_struct *vma,
 				   unsigned long address, pte_t *ptep)
 {