f2fs: compress: do cleanup in f2fs_truncate_partial_cluster()

Message ID 20231130092310.1297336-1-frank.li@vivo.com
State New
Headers
Series f2fs: compress: do cleanup in f2fs_truncate_partial_cluster() |

Commit Message

李扬韬 Nov. 30, 2023, 9:23 a.m. UTC
  Remove unnecessary code logic.

Signed-off-by: Yangtao Li <frank.li@vivo.com>
---
 fs/f2fs/compress.c | 30 +++++++++++++-----------------
 1 file changed, 13 insertions(+), 17 deletions(-)
  

Comments

Chao Yu Dec. 9, 2023, 9:07 a.m. UTC | #1
On 2023/11/30 17:23, Yangtao Li wrote:
> Remove unnecessary code logic.
> 
> Signed-off-by: Yangtao Li <frank.li@vivo.com>
> ---
>   fs/f2fs/compress.c | 30 +++++++++++++-----------------
>   1 file changed, 13 insertions(+), 17 deletions(-)
> 
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index 36e5dab6baae..de55c266509a 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -1170,7 +1170,9 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
>   	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
>   	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
>   							log_cluster_size;
> -	int err;
> +	struct page **rpages = fsdata;

fsdata is NULL here.

Thanks,

> +	int cluster_size = F2FS_I(inode)->i_cluster_size;
> +	int err, i;
>   
>   	err = f2fs_is_compressed_cluster(inode, start_idx);
>   	if (err < 0)
> @@ -1190,25 +1192,19 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
>   	if (err <= 0)
>   		return err;
>   
> -	if (err > 0) {
> -		struct page **rpages = fsdata;
> -		int cluster_size = F2FS_I(inode)->i_cluster_size;
> -		int i;
> -
> -		for (i = cluster_size - 1; i >= 0; i--) {
> -			loff_t start = rpages[i]->index << PAGE_SHIFT;
> +	for (i = cluster_size - 1; i >= 0; i--) {
> +		loff_t start = rpages[i]->index << PAGE_SHIFT;
>   
> -			if (from <= start) {
> -				zero_user_segment(rpages[i], 0, PAGE_SIZE);
> -			} else {
> -				zero_user_segment(rpages[i], from - start,
> -								PAGE_SIZE);
> -				break;
> -			}
> +		if (from <= start) {
> +			zero_user_segment(rpages[i], 0, PAGE_SIZE);
> +		} else {
> +			zero_user_segment(rpages[i], from - start,
> +							PAGE_SIZE);
> +			break;
>   		}
> -
> -		f2fs_compress_write_end(inode, fsdata, start_idx, true);
>   	}
> +
> +	f2fs_compress_write_end(inode, fsdata, start_idx, true);
>   	return 0;
>   }
>
  

Patch

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 36e5dab6baae..de55c266509a 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1170,7 +1170,9 @@  int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
 	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
 	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
 							log_cluster_size;
-	int err;
+	struct page **rpages = fsdata;
+	int cluster_size = F2FS_I(inode)->i_cluster_size;
+	int err, i;
 
 	err = f2fs_is_compressed_cluster(inode, start_idx);
 	if (err < 0)
@@ -1190,25 +1192,19 @@  int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
 	if (err <= 0)
 		return err;
 
-	if (err > 0) {
-		struct page **rpages = fsdata;
-		int cluster_size = F2FS_I(inode)->i_cluster_size;
-		int i;
-
-		for (i = cluster_size - 1; i >= 0; i--) {
-			loff_t start = rpages[i]->index << PAGE_SHIFT;
+	for (i = cluster_size - 1; i >= 0; i--) {
+		loff_t start = rpages[i]->index << PAGE_SHIFT;
 
-			if (from <= start) {
-				zero_user_segment(rpages[i], 0, PAGE_SIZE);
-			} else {
-				zero_user_segment(rpages[i], from - start,
-								PAGE_SIZE);
-				break;
-			}
+		if (from <= start) {
+			zero_user_segment(rpages[i], 0, PAGE_SIZE);
+		} else {
+			zero_user_segment(rpages[i], from - start,
+							PAGE_SIZE);
+			break;
 		}
-
-		f2fs_compress_write_end(inode, fsdata, start_idx, true);
 	}
+
+	f2fs_compress_write_end(inode, fsdata, start_idx, true);
 	return 0;
 }