[v3,09/19] ext4: remove unsed parameter and unnecessary forward declaration of ext4_mb_new_blocks_simple
Commit Message
Two cleanups for ext4_mb_new_blocks_simple:
Remove unsed parameter handle of ext4_mb_new_blocks_simple.
Move ext4_mb_new_blocks_simple definition before ext4_mb_new_blocks to
remove unnecessary forward declaration of ext4_mb_new_blocks_simple.
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
fs/ext4/mballoc.c | 137 +++++++++++++++++++++++-----------------------
1 file changed, 67 insertions(+), 70 deletions(-)
Comments
On Mon, Apr 17, 2023 at 07:06:07PM +0800, Kemeng Shi wrote:
> Two cleanups for ext4_mb_new_blocks_simple:
> Remove unsed parameter handle of ext4_mb_new_blocks_simple.
> Move ext4_mb_new_blocks_simple definition before ext4_mb_new_blocks to
> remove unnecessary forward declaration of ext4_mb_new_blocks_simple.
>
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Feel free to add:
Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
> ---
> fs/ext4/mballoc.c | 137 +++++++++++++++++++++++-----------------------
> 1 file changed, 67 insertions(+), 70 deletions(-)
>
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index f37e921c11e5..774f969b39d8 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -5729,8 +5729,72 @@ static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
> return ret;
> }
>
> -static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
> - struct ext4_allocation_request *ar, int *errp);
> +/*
> + * Simple allocator for Ext4 fast commit replay path. It searches for blocks
> + * linearly starting at the goal block and also excludes the blocks which
> + * are going to be in use after fast commit replay.
> + */
> +static ext4_fsblk_t
> +ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
> +{
> + struct buffer_head *bitmap_bh;
> + struct super_block *sb = ar->inode->i_sb;
> + struct ext4_sb_info *sbi = EXT4_SB(sb);
> + ext4_group_t group, nr;
> + ext4_grpblk_t blkoff;
> + ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
> + ext4_grpblk_t i = 0;
> + ext4_fsblk_t goal, block;
> + struct ext4_super_block *es = EXT4_SB(sb)->s_es;
> +
> + goal = ar->goal;
> + if (goal < le32_to_cpu(es->s_first_data_block) ||
> + goal >= ext4_blocks_count(es))
> + goal = le32_to_cpu(es->s_first_data_block);
> +
> + ar->len = 0;
> + ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
> + for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
> + bitmap_bh = ext4_read_block_bitmap(sb, group);
> + if (IS_ERR(bitmap_bh)) {
> + *errp = PTR_ERR(bitmap_bh);
> + pr_warn("Failed to read block bitmap\n");
> + return 0;
> + }
> +
> + while (1) {
> + i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
> + blkoff);
> + if (i >= max)
> + break;
> + if (ext4_fc_replay_check_excluded(sb,
> + ext4_group_first_block_no(sb, group) +
> + EXT4_C2B(sbi, i))) {
> + blkoff = i + 1;
> + } else
> + break;
> + }
> + brelse(bitmap_bh);
> + if (i < max)
> + break;
> +
> + if (++group >= ext4_get_groups_count(sb))
> + group = 0;
> +
> + blkoff = 0;
> + }
> +
> + if (i >= max) {
> + *errp = -ENOSPC;
> + return 0;
> + }
> +
> + block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
> + ext4_mb_mark_bb(sb, block, 1, 1);
> + ar->len = 1;
> +
> + return block;
> +}
>
> /*
> * Main entry point into mballoc to allocate blocks
> @@ -5755,7 +5819,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
>
> trace_ext4_request_blocks(ar);
> if (sbi->s_mount_state & EXT4_FC_REPLAY)
> - return ext4_mb_new_blocks_simple(handle, ar, errp);
> + return ext4_mb_new_blocks_simple(ar, errp);
>
> /* Allow to use superuser reservation for quota file */
> if (ext4_is_quota_file(ar->inode))
> @@ -5979,73 +6043,6 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
> spin_unlock(&sbi->s_md_lock);
> }
>
> -/*
> - * Simple allocator for Ext4 fast commit replay path. It searches for blocks
> - * linearly starting at the goal block and also excludes the blocks which
> - * are going to be in use after fast commit replay.
> - */
> -static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
> - struct ext4_allocation_request *ar, int *errp)
> -{
> - struct buffer_head *bitmap_bh;
> - struct super_block *sb = ar->inode->i_sb;
> - struct ext4_sb_info *sbi = EXT4_SB(sb);
> - ext4_group_t group, nr;
> - ext4_grpblk_t blkoff;
> - ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
> - ext4_grpblk_t i = 0;
> - ext4_fsblk_t goal, block;
> - struct ext4_super_block *es = EXT4_SB(sb)->s_es;
> -
> - goal = ar->goal;
> - if (goal < le32_to_cpu(es->s_first_data_block) ||
> - goal >= ext4_blocks_count(es))
> - goal = le32_to_cpu(es->s_first_data_block);
> -
> - ar->len = 0;
> - ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
> - for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
> - bitmap_bh = ext4_read_block_bitmap(sb, group);
> - if (IS_ERR(bitmap_bh)) {
> - *errp = PTR_ERR(bitmap_bh);
> - pr_warn("Failed to read block bitmap\n");
> - return 0;
> - }
> -
> - while (1) {
> - i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
> - blkoff);
> - if (i >= max)
> - break;
> - if (ext4_fc_replay_check_excluded(sb,
> - ext4_group_first_block_no(sb, group) +
> - EXT4_C2B(sbi, i))) {
> - blkoff = i + 1;
> - } else
> - break;
> - }
> - brelse(bitmap_bh);
> - if (i < max)
> - break;
> -
> - if (++group >= ext4_get_groups_count(sb))
> - group = 0;
> -
> - blkoff = 0;
> - }
> -
> - if (i >= max) {
> - *errp = -ENOSPC;
> - return 0;
> - }
> -
> - block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
> - ext4_mb_mark_bb(sb, block, 1, 1);
> - ar->len = 1;
> -
> - return block;
> -}
> -
> static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
> unsigned long count)
> {
> --
> 2.30.0
>
@@ -5729,8 +5729,72 @@ static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
return ret;
}
-static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
- struct ext4_allocation_request *ar, int *errp);
+/*
+ * Simple allocator for Ext4 fast commit replay path. It searches for blocks
+ * linearly starting at the goal block and also excludes the blocks which
+ * are going to be in use after fast commit replay.
+ */
+static ext4_fsblk_t
+ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
+{
+ struct buffer_head *bitmap_bh;
+ struct super_block *sb = ar->inode->i_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_group_t group, nr;
+ ext4_grpblk_t blkoff;
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_grpblk_t i = 0;
+ ext4_fsblk_t goal, block;
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+ goal = ar->goal;
+ if (goal < le32_to_cpu(es->s_first_data_block) ||
+ goal >= ext4_blocks_count(es))
+ goal = le32_to_cpu(es->s_first_data_block);
+
+ ar->len = 0;
+ ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
+ for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
+ bitmap_bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR(bitmap_bh)) {
+ *errp = PTR_ERR(bitmap_bh);
+ pr_warn("Failed to read block bitmap\n");
+ return 0;
+ }
+
+ while (1) {
+ i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
+ blkoff);
+ if (i >= max)
+ break;
+ if (ext4_fc_replay_check_excluded(sb,
+ ext4_group_first_block_no(sb, group) +
+ EXT4_C2B(sbi, i))) {
+ blkoff = i + 1;
+ } else
+ break;
+ }
+ brelse(bitmap_bh);
+ if (i < max)
+ break;
+
+ if (++group >= ext4_get_groups_count(sb))
+ group = 0;
+
+ blkoff = 0;
+ }
+
+ if (i >= max) {
+ *errp = -ENOSPC;
+ return 0;
+ }
+
+ block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
+ ext4_mb_mark_bb(sb, block, 1, 1);
+ ar->len = 1;
+
+ return block;
+}
/*
* Main entry point into mballoc to allocate blocks
@@ -5755,7 +5819,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
trace_ext4_request_blocks(ar);
if (sbi->s_mount_state & EXT4_FC_REPLAY)
- return ext4_mb_new_blocks_simple(handle, ar, errp);
+ return ext4_mb_new_blocks_simple(ar, errp);
/* Allow to use superuser reservation for quota file */
if (ext4_is_quota_file(ar->inode))
@@ -5979,73 +6043,6 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
spin_unlock(&sbi->s_md_lock);
}
-/*
- * Simple allocator for Ext4 fast commit replay path. It searches for blocks
- * linearly starting at the goal block and also excludes the blocks which
- * are going to be in use after fast commit replay.
- */
-static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
- struct ext4_allocation_request *ar, int *errp)
-{
- struct buffer_head *bitmap_bh;
- struct super_block *sb = ar->inode->i_sb;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- ext4_group_t group, nr;
- ext4_grpblk_t blkoff;
- ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
- ext4_grpblk_t i = 0;
- ext4_fsblk_t goal, block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-
- goal = ar->goal;
- if (goal < le32_to_cpu(es->s_first_data_block) ||
- goal >= ext4_blocks_count(es))
- goal = le32_to_cpu(es->s_first_data_block);
-
- ar->len = 0;
- ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
- for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
- bitmap_bh = ext4_read_block_bitmap(sb, group);
- if (IS_ERR(bitmap_bh)) {
- *errp = PTR_ERR(bitmap_bh);
- pr_warn("Failed to read block bitmap\n");
- return 0;
- }
-
- while (1) {
- i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
- blkoff);
- if (i >= max)
- break;
- if (ext4_fc_replay_check_excluded(sb,
- ext4_group_first_block_no(sb, group) +
- EXT4_C2B(sbi, i))) {
- blkoff = i + 1;
- } else
- break;
- }
- brelse(bitmap_bh);
- if (i < max)
- break;
-
- if (++group >= ext4_get_groups_count(sb))
- group = 0;
-
- blkoff = 0;
- }
-
- if (i >= max) {
- *errp = -ENOSPC;
- return 0;
- }
-
- block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
- ext4_mb_mark_bb(sb, block, 1, 1);
- ar->len = 1;
-
- return block;
-}
-
static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
unsigned long count)
{