[RFC,08/11] shmem: add file length arg in shmem_get_folio() path

Message ID 20231028211518.3424020-9-da.gomez@samsung.com
State New
Headers
Series shmem: high order folios support in write path |

Commit Message

Daniel Gomez Oct. 28, 2023, 9:15 p.m. UTC
  In preparation for large folio in the write path, add file length
argument in shmem_get_folio() path to be able to calculate the folio
order based on the file size. Use of order-0 (PAGE_SIZE) for non write
paths such as read, page cache read, and vm fault.

This enables high order folios in the write and fallocate paths once the
folio order is calculated based on the length.

Signed-off-by: Daniel Gomez <da.gomez@samsung.com>
---
 include/linux/shmem_fs.h |  2 +-
 mm/khugepaged.c          |  3 ++-
 mm/shmem.c               | 33 ++++++++++++++++++---------------
 mm/userfaultfd.c         |  2 +-
 4 files changed, 22 insertions(+), 18 deletions(-)

--
2.39.2
  

Patch

diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 2caa6b86106a..7138ea980884 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -137,7 +137,7 @@  enum sgp_type {
 };

 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
-		enum sgp_type sgp);
+		enum sgp_type sgp, size_t len);
 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
 		pgoff_t index, gfp_t gfp);

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 064654717843..fcde8223b507 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1855,7 +1855,8 @@  static int collapse_file(struct mm_struct *mm, unsigned long addr,
 				xas_unlock_irq(&xas);
 				/* swap in or instantiate fallocated page */
 				if (shmem_get_folio(mapping->host, index,
-						&folio, SGP_NOALLOC)) {
+						    &folio, SGP_NOALLOC,
+						    PAGE_SIZE)) {
 					result = SCAN_FAIL;
 					goto xa_unlocked;
 				}
diff --git a/mm/shmem.c b/mm/shmem.c
index 9d68211373c4..d8dc2ceaba18 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -958,7 +958,7 @@  static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
 	 * (although in some cases this is just a waste of time).
 	 */
 	folio = NULL;
-	shmem_get_folio(inode, index, &folio, SGP_READ);
+	shmem_get_folio(inode, index, &folio, SGP_READ, PAGE_SIZE);
 	return folio;
 }

@@ -1644,7 +1644,7 @@  static struct folio *shmem_alloc_folio(gfp_t gfp,

 static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
 		struct inode *inode, pgoff_t index,
-		struct mm_struct *fault_mm)
+		struct mm_struct *fault_mm, size_t len)
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct shmem_inode_info *info = SHMEM_I(inode);
@@ -1969,7 +1969,7 @@  static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
  */
 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 		struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
-		struct vm_fault *vmf, vm_fault_t *fault_type)
+		struct vm_fault *vmf, vm_fault_t *fault_type, size_t len)
 {
 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
 	struct mm_struct *fault_mm;
@@ -2051,7 +2051,7 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 		huge_gfp = vma_thp_gfp_mask(vma);
 		huge_gfp = limit_gfp_mask(huge_gfp, gfp);
 		folio = shmem_alloc_and_add_folio(huge_gfp,
-				inode, index, fault_mm);
+				inode, index, fault_mm, len);
 		if (!IS_ERR(folio)) {
 			count_vm_event(THP_FILE_ALLOC);
 			goto alloced;
@@ -2060,7 +2060,7 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 			goto repeat;
 	}

-	folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm);
+	folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, len);
 	if (IS_ERR(folio)) {
 		error = PTR_ERR(folio);
 		if (error == -EEXIST)
@@ -2140,10 +2140,10 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 }

 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
-		enum sgp_type sgp)
+		enum sgp_type sgp, size_t len)
 {
 	return shmem_get_folio_gfp(inode, index, foliop, sgp,
-			mapping_gfp_mask(inode->i_mapping), NULL, NULL);
+			mapping_gfp_mask(inode->i_mapping), NULL, NULL, len);
 }

 /*
@@ -2237,7 +2237,7 @@  static vm_fault_t shmem_fault(struct vm_fault *vmf)

 	WARN_ON_ONCE(vmf->page != NULL);
 	err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
-				  gfp, vmf, &ret);
+				  gfp, vmf, &ret, PAGE_SIZE);
 	if (err)
 		return vmf_error(err);
 	if (folio) {
@@ -2716,6 +2716,9 @@  shmem_write_begin(struct file *file, struct address_space *mapping,
 	struct folio *folio;
 	int ret = 0;

+	if (!mapping_large_folio_support(mapping))
+		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
+
 	/* i_rwsem is held by caller */
 	if (unlikely(info->seals & (F_SEAL_GROW |
 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
@@ -2725,7 +2728,7 @@  shmem_write_begin(struct file *file, struct address_space *mapping,
 			return -EPERM;
 	}

-	ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
+	ret = shmem_get_folio(inode, index, &folio, SGP_WRITE, len);
 	if (ret)
 		return ret;

@@ -2796,7 +2799,7 @@  static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 				break;
 		}

-		error = shmem_get_folio(inode, index, &folio, SGP_READ);
+		error = shmem_get_folio(inode, index, &folio, SGP_READ, PAGE_SIZE);
 		if (error) {
 			if (error == -EINVAL)
 				error = 0;
@@ -2973,7 +2976,7 @@  static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
 			break;

 		error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
-					SGP_READ);
+					SGP_READ, PAGE_SIZE);
 		if (error) {
 			if (error == -EINVAL)
 				error = 0;
@@ -3160,7 +3163,7 @@  static long shmem_fallocate(struct file *file, int mode, loff_t offset,
 			error = -ENOMEM;
 		else
 			error = shmem_get_folio(inode, index, &folio,
-						SGP_FALLOC);
+						SGP_FALLOC, (end - index) << PAGE_SHIFT);
 		if (error) {
 			info->fallocend = undo_fallocend;
 			/* Remove the !uptodate folios we added */
@@ -3511,7 +3514,7 @@  static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
 		inode->i_op = &shmem_short_symlink_operations;
 	} else {
 		inode_nohighmem(inode);
-		error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
+		error = shmem_get_folio(inode, 0, &folio, SGP_WRITE, PAGE_SIZE);
 		if (error)
 			goto out_remove_offset;
 		inode->i_mapping->a_ops = &shmem_aops;
@@ -3558,7 +3561,7 @@  static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
 			return ERR_PTR(-ECHILD);
 		}
 	} else {
-		error = shmem_get_folio(inode, 0, &folio, SGP_READ);
+		error = shmem_get_folio(inode, 0, &folio, SGP_READ, PAGE_SIZE);
 		if (error)
 			return ERR_PTR(error);
 		if (!folio)
@@ -4923,7 +4926,7 @@  struct folio *shmem_read_folio_gfp(struct address_space *mapping,

 	BUG_ON(!shmem_mapping(mapping));
 	error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
-				    gfp, NULL, NULL);
+				    gfp, NULL, NULL, PAGE_SIZE);
 	if (error)
 		return ERR_PTR(error);

diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 96d9eae5c7cc..aab8679b322a 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -256,7 +256,7 @@  static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
 	struct page *page;
 	int ret;

-	ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
+	ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC, PAGE_SIZE);
 	/* Our caller expects us to return -EFAULT if we failed to find folio */
 	if (ret == -ENOENT)
 		ret = -EFAULT;