[1/4] ext4: Convert move_extent_per_page() to use folios

Message ID 20221116021011.54164-2-vishal.moola@gmail.com
State New
Headers
Series Removing the try_to_release_page() wrapper |

Commit Message

Vishal Moola Nov. 16, 2022, 2:10 a.m. UTC
  Converts move_extent_per_page() to use folios. This change removes
5 calls to compound_head() and is in preparation for the removal of
the try_to_release_page() wrapper.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 fs/ext4/move_extent.c | 47 ++++++++++++++++++++++++-------------------
 1 file changed, 26 insertions(+), 21 deletions(-)
  

Comments

Matthew Wilcox Nov. 16, 2022, 8:09 a.m. UTC | #1
On Tue, Nov 15, 2022 at 06:10:08PM -0800, Vishal Moola (Oracle) wrote:
>  {
>  	struct inode *orig_inode = file_inode(o_filp);
>  	struct page *pagep[2] = {NULL, NULL};
> +	struct folio *folio[2] = {NULL, NULL};

I have a feeling that mext_page_double_lock() should also be converted
to use folios.  But this makes me nervous:

        int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;

and I'm not sure what will happen if one or both of the orig_page
and donor_page is large -- possibly different sizes of large.

Obviously ext4 doesn't allow large folios today, but it would be good to
get some reasoning about why this isn't laying a trap for later (or at
least assertions that neither folio is large so that there's an obvious
scream instead of silent data corruption).
  
Vishal Moola Nov. 16, 2022, 7:49 p.m. UTC | #2
On Wed, Nov 16, 2022 at 12:09 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> On Tue, Nov 15, 2022 at 06:10:08PM -0800, Vishal Moola (Oracle) wrote:
> >  {
> >       struct inode *orig_inode = file_inode(o_filp);
> >       struct page *pagep[2] = {NULL, NULL};
> > +     struct folio *folio[2] = {NULL, NULL};
>
> I have a feeling that mext_page_double_lock() should also be converted
> to use folios.  But this makes me nervous:
>
>         int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
>
> and I'm not sure what will happen if one or both of the orig_page
> and donor_page is large -- possibly different sizes of large.
>
> Obviously ext4 doesn't allow large folios today, but it would be good to
> get some reasoning about why this isn't laying a trap for later (or at
> least assertions that neither folio is large so that there's an obvious
> scream instead of silent data corruption).

I had thought once mext_page_mkuptodate() and block_commit_write()
were converted to folios, large folios wouldn't be a problem. I hadn't
considered
that the folios may be of different sizes. I can add assertions about both
folios being large and the same size in v2.
  

Patch

diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 044e34cd835c..0c680d4a2929 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -253,6 +253,7 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 {
 	struct inode *orig_inode = file_inode(o_filp);
 	struct page *pagep[2] = {NULL, NULL};
+	struct folio *folio[2] = {NULL, NULL};
 	handle_t *handle;
 	ext4_lblk_t orig_blk_offset, donor_blk_offset;
 	unsigned long blocksize = orig_inode->i_sb->s_blocksize;
@@ -313,6 +314,8 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 	 * hold page's lock, if it is still the case data copy is not
 	 * necessary, just swap data blocks between orig and donor.
 	 */
+	folio[0] = page_folio(pagep[0]);
+	folio[1] = page_folio(pagep[1]);
 	if (unwritten) {
 		ext4_double_down_write_data_sem(orig_inode, donor_inode);
 		/* If any of extents in range became initialized we have to
@@ -331,10 +334,10 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 			ext4_double_up_write_data_sem(orig_inode, donor_inode);
 			goto data_copy;
 		}
-		if ((page_has_private(pagep[0]) &&
-		     !try_to_release_page(pagep[0], 0)) ||
-		    (page_has_private(pagep[1]) &&
-		     !try_to_release_page(pagep[1], 0))) {
+		if ((folio_has_private(folio[0]) &&
+		     !filemap_release_folio(folio[0], 0)) ||
+		    (folio_has_private(folio[1]) &&
+		     !filemap_release_folio(folio[1], 0))) {
 			*err = -EBUSY;
 			goto drop_data_sem;
 		}
@@ -344,19 +347,21 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 						   block_len_in_page, 1, err);
 	drop_data_sem:
 		ext4_double_up_write_data_sem(orig_inode, donor_inode);
-		goto unlock_pages;
+		goto unlock_folios;
 	}
 data_copy:
-	*err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
+	*err = mext_page_mkuptodate(&folio[0]->page, from, from + replaced_size);
 	if (*err)
-		goto unlock_pages;
+		goto unlock_folios;
 
 	/* At this point all buffers in range are uptodate, old mapping layout
 	 * is no longer required, try to drop it now. */
-	if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
-	    (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
+	if ((folio_has_private(folio[0]) &&
+		!filemap_release_folio(folio[0], 0)) ||
+	    (folio_has_private(folio[1]) &&
+		!filemap_release_folio(folio[1], 0))) {
 		*err = -EBUSY;
-		goto unlock_pages;
+		goto unlock_folios;
 	}
 	ext4_double_down_write_data_sem(orig_inode, donor_inode);
 	replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
@@ -369,13 +374,13 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 			replaced_size =
 				block_len_in_page << orig_inode->i_blkbits;
 		} else
-			goto unlock_pages;
+			goto unlock_folios;
 	}
 	/* Perform all necessary steps similar write_begin()/write_end()
 	 * but keeping in mind that i_size will not change */
-	if (!page_has_buffers(pagep[0]))
-		create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
-	bh = page_buffers(pagep[0]);
+	if (!folio_buffers(folio[0]))
+		create_empty_buffers(&folio[0]->page, 1 << orig_inode->i_blkbits, 0);
+	bh = folio_buffers(folio[0]);
 	for (i = 0; i < data_offset_in_page; i++)
 		bh = bh->b_this_page;
 	for (i = 0; i < block_len_in_page; i++) {
@@ -385,7 +390,7 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 		bh = bh->b_this_page;
 	}
 	if (!*err)
-		*err = block_commit_write(pagep[0], from, from + replaced_size);
+		*err = block_commit_write(&folio[0]->page, from, from + replaced_size);
 
 	if (unlikely(*err < 0))
 		goto repair_branches;
@@ -395,11 +400,11 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 	*err = ext4_jbd2_inode_add_write(handle, orig_inode,
 			(loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
 
-unlock_pages:
-	unlock_page(pagep[0]);
-	put_page(pagep[0]);
-	unlock_page(pagep[1]);
-	put_page(pagep[1]);
+unlock_folios:
+	folio_unlock(folio[0]);
+	folio_put(folio[0]);
+	folio_unlock(folio[1]);
+	folio_put(folio[1]);
 stop_journal:
 	ext4_journal_stop(handle);
 	if (*err == -ENOSPC &&
@@ -430,7 +435,7 @@  move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 		*err = -EIO;
 	}
 	replaced_count = 0;
-	goto unlock_pages;
+	goto unlock_folios;
 }
 
 /**