erofs: remove the member readahead from struct z_erofs_decompress_frontend

Message ID 20230524061152.30155-1-zbestahu@gmail.com
State New
Headers
Series erofs: remove the member readahead from struct z_erofs_decompress_frontend |

Commit Message

Yue Hu May 24, 2023, 6:11 a.m. UTC
  From: Yue Hu <huyue2@coolpad.com>

The struct member is only used to add REQ_RAHEAD during I/O submission.
So it is cleaner to pass it as a parameter than keep it in the struct.

Also, rename function z_erofs_get_sync_decompress_policy() to
z_erofs_is_sync_decompress() for better clarity and conciseness.

Signed-off-by: Yue Hu <huyue2@coolpad.com>
---
 fs/erofs/zdata.c | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)
  

Comments

Gao Xiang May 24, 2023, 6:17 a.m. UTC | #1
On 2023/5/24 23:11, Yue Hu wrote:
> From: Yue Hu <huyue2@coolpad.com>
> 
> The struct member is only used to add REQ_RAHEAD during I/O submission.
> So it is cleaner to pass it as a parameter than keep it in the struct.
> 
> Also, rename function z_erofs_get_sync_decompress_policy() to
> z_erofs_is_sync_decompress() for better clarity and conciseness.
> 
> Signed-off-by: Yue Hu <huyue2@coolpad.com>
> ---
>   fs/erofs/zdata.c | 19 +++++++++----------
>   1 file changed, 9 insertions(+), 10 deletions(-)
> 
> diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
> index 45f21db2303a..4522a3be2ce9 100644
> --- a/fs/erofs/zdata.c
> +++ b/fs/erofs/zdata.c
> @@ -550,7 +550,6 @@ struct z_erofs_decompress_frontend {
>   	z_erofs_next_pcluster_t owned_head;
>   	enum z_erofs_pclustermode mode;
>   
> -	bool readahead;
>   	/* used for applying cache strategy on the fly */
>   	bool backmost;
>   	erofs_off_t headoffset;
> @@ -1106,7 +1105,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
>   	return err;
>   }
>   
> -static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
> +static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
>   				       unsigned int readahead_pages)
>   {
>   	/* auto: enable for read_folio, disable for readahead */
> @@ -1672,7 +1671,7 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
>   static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
>   				 struct page **pagepool,
>   				 struct z_erofs_decompressqueue *fgq,
> -				 bool *force_fg)
> +				 bool *force_fg, bool readahead)
>   {
>   	struct super_block *sb = f->inode->i_sb;
>   	struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
> @@ -1763,7 +1762,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
>   				bio->bi_iter.bi_sector = (sector_t)cur <<
>   					(sb->s_blocksize_bits - 9);
>   				bio->bi_private = q[JQ_SUBMIT];
> -				if (f->readahead)
> +				if (readahead)
>   					bio->bi_opf |= REQ_RAHEAD;
>   				++nr_bios;
>   			}
> @@ -1799,13 +1798,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
>   }
>   
>   static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
> -			     struct page **pagepool, bool force_fg)
> +			     struct page **pagepool, bool force_fg,
> +			     bool readahead)

			     struct page **pagepool, bool force_fg, bool ra)


>   {
>   	struct z_erofs_decompressqueue io[NR_JOBQUEUES];
>   
>   	if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
>   		return;
> -	z_erofs_submit_queue(f, pagepool, io, &force_fg);
> +	z_erofs_submit_queue(f, pagepool, io, &force_fg, readahead);

	z_erofs_submit_queue(f, pagepool, io, &force_fg, ra);


Otherwise it seems ok to me,

Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>

Thanks,
Gao Xiang
  

Patch

diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 45f21db2303a..4522a3be2ce9 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -550,7 +550,6 @@  struct z_erofs_decompress_frontend {
 	z_erofs_next_pcluster_t owned_head;
 	enum z_erofs_pclustermode mode;
 
-	bool readahead;
 	/* used for applying cache strategy on the fly */
 	bool backmost;
 	erofs_off_t headoffset;
@@ -1106,7 +1105,7 @@  static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
 	return err;
 }
 
-static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
+static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
 				       unsigned int readahead_pages)
 {
 	/* auto: enable for read_folio, disable for readahead */
@@ -1672,7 +1671,7 @@  static void z_erofs_decompressqueue_endio(struct bio *bio)
 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
 				 struct page **pagepool,
 				 struct z_erofs_decompressqueue *fgq,
-				 bool *force_fg)
+				 bool *force_fg, bool readahead)
 {
 	struct super_block *sb = f->inode->i_sb;
 	struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
@@ -1763,7 +1762,7 @@  static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
 				bio->bi_iter.bi_sector = (sector_t)cur <<
 					(sb->s_blocksize_bits - 9);
 				bio->bi_private = q[JQ_SUBMIT];
-				if (f->readahead)
+				if (readahead)
 					bio->bi_opf |= REQ_RAHEAD;
 				++nr_bios;
 			}
@@ -1799,13 +1798,14 @@  static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
 }
 
 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
-			     struct page **pagepool, bool force_fg)
+			     struct page **pagepool, bool force_fg,
+			     bool readahead)
 {
 	struct z_erofs_decompressqueue io[NR_JOBQUEUES];
 
 	if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
 		return;
-	z_erofs_submit_queue(f, pagepool, io, &force_fg);
+	z_erofs_submit_queue(f, pagepool, io, &force_fg, readahead);
 
 	/* handle bypass queue (no i/o pclusters) immediately */
 	z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
@@ -1903,8 +1903,8 @@  static int z_erofs_read_folio(struct file *file, struct folio *folio)
 	(void)z_erofs_collector_end(&f);
 
 	/* if some compressed cluster ready, need submit them anyway */
-	z_erofs_runqueue(&f, &pagepool,
-			 z_erofs_get_sync_decompress_policy(sbi, 0));
+	z_erofs_runqueue(&f, &pagepool, z_erofs_is_sync_decompress(sbi, 0),
+			 false);
 
 	if (err)
 		erofs_err(inode->i_sb, "failed to read, err [%d]", err);
@@ -1922,7 +1922,6 @@  static void z_erofs_readahead(struct readahead_control *rac)
 	struct page *pagepool = NULL, *head = NULL, *page;
 	unsigned int nr_pages;
 
-	f.readahead = true;
 	f.headoffset = readahead_pos(rac);
 
 	z_erofs_pcluster_readmore(&f, rac, f.headoffset +
@@ -1953,7 +1952,7 @@  static void z_erofs_readahead(struct readahead_control *rac)
 	(void)z_erofs_collector_end(&f);
 
 	z_erofs_runqueue(&f, &pagepool,
-			 z_erofs_get_sync_decompress_policy(sbi, nr_pages));
+			 z_erofs_is_sync_decompress(sbi, nr_pages), true);
 	erofs_put_metabuf(&f.map.buf);
 	erofs_release_pages(&pagepool);
 }