[04/12] writeback: Simplify the loops in write_cache_pages()

Message ID 20230626173521.459345-5-willy@infradead.org
State New
Headers
Series Convert write_cache_pages() to an iterator |

Commit Message

Matthew Wilcox June 26, 2023, 5:35 p.m. UTC
  Collapse the two nested loops into one.  This is needed as a step
towards turning this into an iterator.
---
 mm/page-writeback.c | 94 ++++++++++++++++++++++-----------------------
 1 file changed, 47 insertions(+), 47 deletions(-)
  

Comments

Christoph Hellwig June 27, 2023, 4:16 a.m. UTC | #1
On Mon, Jun 26, 2023 at 06:35:13PM +0100, Matthew Wilcox (Oracle) wrote:
> Collapse the two nested loops into one.  This is needed as a step
> towards turning this into an iterator.
> ---
>  mm/page-writeback.c | 94 ++++++++++++++++++++++-----------------------
>  1 file changed, 47 insertions(+), 47 deletions(-)
> 
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index 54f2972dab45..68f28eeb15ed 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -2461,6 +2461,7 @@ int write_cache_pages(struct address_space *mapping,
>  		      void *data)
>  {
>  	int error;
> +	int i = 0;
>  
>  	if (wbc->range_cyclic) {
>  		wbc->index = mapping->writeback_index; /* prev offset */
> @@ -2478,65 +2479,64 @@ int write_cache_pages(struct address_space *mapping,
>  	folio_batch_init(&wbc->fbatch);
>  	wbc->err = 0;
>  
> +	for (;;) {
> +		struct folio *folio;
>  
> +		if (i == wbc->fbatch.nr) {
> +			writeback_get_batch(mapping, wbc);
> +			i = 0;
> +		}
>  		if (wbc->fbatch.nr == 0)
>  			break;
> +		folio = wbc->fbatch.folios[i++];

Did you consider moving what is currently the "i" local variable
into strut writeback_control as well?  Then writeback_get_batch
could return the current folio, and we could hae a much nicer loop
here by moving all of the above into writeback_get_batch:

	while ((folio = writeback_get_batch(mapping, wbc))) {

(and yes, writeback_get_batch probably needs a better name with that)
  

Patch

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 54f2972dab45..68f28eeb15ed 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2461,6 +2461,7 @@  int write_cache_pages(struct address_space *mapping,
 		      void *data)
 {
 	int error;
+	int i = 0;
 
 	if (wbc->range_cyclic) {
 		wbc->index = mapping->writeback_index; /* prev offset */
@@ -2478,65 +2479,64 @@  int write_cache_pages(struct address_space *mapping,
 	folio_batch_init(&wbc->fbatch);
 	wbc->err = 0;
 
-	while (wbc->index <= wbc->end) {
-		int i;
-
-		writeback_get_batch(mapping, wbc);
+	for (;;) {
+		struct folio *folio;
 
+		if (i == wbc->fbatch.nr) {
+			writeback_get_batch(mapping, wbc);
+			i = 0;
+		}
 		if (wbc->fbatch.nr == 0)
 			break;
 
-		for (i = 0; i < wbc->fbatch.nr; i++) {
-			struct folio *folio = wbc->fbatch.folios[i];
+		folio = wbc->fbatch.folios[i++];
 
-			wbc->done_index = folio->index;
+		wbc->done_index = folio->index;
 
-			folio_lock(folio);
-			if (!should_writeback_folio(mapping, wbc, folio)) {
-				folio_unlock(folio);
-				continue;
-			}
+		folio_lock(folio);
+		if (!should_writeback_folio(mapping, wbc, folio)) {
+			folio_unlock(folio);
+			continue;
+		}
 
-			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
-
-			error = writepage(folio, wbc, data);
-			if (unlikely(error)) {
-				/*
-				 * Handle errors according to the type of
-				 * writeback. There's no need to continue for
-				 * background writeback. Just push done_index
-				 * past this page so media errors won't choke
-				 * writeout for the entire file. For integrity
-				 * writeback, we must process the entire dirty
-				 * set regardless of errors because the fs may
-				 * still have state to clear for each page. In
-				 * that case we continue processing and return
-				 * the first error.
-				 */
-				if (error == AOP_WRITEPAGE_ACTIVATE) {
-					folio_unlock(folio);
-					error = 0;
-				} else if (wbc->sync_mode != WB_SYNC_ALL) {
-					wbc->err = error;
-					wbc->done_index = folio->index +
-							folio_nr_pages(folio);
-					return writeback_finish(mapping,
-							wbc, true);
-				}
-				if (!wbc->err)
-					wbc->err = error;
-			}
+		trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
 
+		error = writepage(folio, wbc, data);
+		if (unlikely(error)) {
 			/*
-			 * We stop writing back only if we are not doing
-			 * integrity sync. In case of integrity sync we have to
-			 * keep going until we have written all the pages
-			 * we tagged for writeback prior to entering this loop.
+			 * Handle errors according to the type of
+			 * writeback. There's no need to continue for
+			 * background writeback. Just push done_index
+			 * past this page so media errors won't choke
+			 * writeout for the entire file. For integrity
+			 * writeback, we must process the entire dirty
+			 * set regardless of errors because the fs may
+			 * still have state to clear for each page. In
+			 * that case we continue processing and return
+			 * the first error.
 			 */
-			if (--wbc->nr_to_write <= 0 &&
-			    wbc->sync_mode == WB_SYNC_NONE)
+			if (error == AOP_WRITEPAGE_ACTIVATE) {
+				folio_unlock(folio);
+				error = 0;
+			} else if (wbc->sync_mode != WB_SYNC_ALL) {
+				wbc->err = error;
+				wbc->done_index = folio->index +
+						folio_nr_pages(folio);
 				return writeback_finish(mapping, wbc, true);
+			}
+			if (!wbc->err)
+				wbc->err = error;
 		}
+
+		/*
+		 * We stop writing back only if we are not doing
+		 * integrity sync. In case of integrity sync we have to
+		 * keep going until we have written all the pages
+		 * we tagged for writeback prior to entering this loop.
+		 */
+		if (--wbc->nr_to_write <= 0 &&
+		    wbc->sync_mode == WB_SYNC_NONE)
+			return writeback_finish(mapping, wbc, true);
 	}
 
 	return writeback_finish(mapping, wbc, false);