[V4,2/8] null_blk: code cleaup
Commit Message
Introduce and use two new macros for calculating the page index from
given sector and index (offset) of the sector in the page.
The newly added macros makes code easy to read with meaningful name and
explanation comments attached to it.
While at it adjust the code in the null_free_sector() to return early
to get rid of the extra identation.
Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/block/null_blk/main.c | 37 ++++++++++++++++++++---------------
1 file changed, 21 insertions(+), 16 deletions(-)
Comments
On Nov 29, 2022 / 15:28, Chaitanya Kulkarni wrote:
> Introduce and use two new macros for calculating the page index from
> given sector and index (offset) of the sector in the page.
> The newly added macros makes code easy to read with meaningful name and
> explanation comments attached to it.
>
> While at it adjust the code in the null_free_sector() to return early
> to get rid of the extra identation.
>
> Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
Looks good to me.
Reviewed-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
@@ -14,6 +14,11 @@
#undef pr_fmt
#define pr_fmt(fmt) "null_blk: " fmt
+/* Gives page index for which this sector belongs to. */
+#define PAGE_IDX_FROM_SECT(sect) (sect >> PAGE_SECTORS_SHIFT)
+/* Gives index (offset) of the sector within page. */
+#define SECT_OFFSET_IN_PAGE(sect) ((sect & SECTOR_MASK) << SECTOR_SHIFT)
+
#define FREE_BATCH 16
#define TICKS_PER_SEC 50ULL
@@ -860,20 +865,20 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
struct radix_tree_root *root;
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
- idx = sector >> PAGE_SECTORS_SHIFT;
+ idx = PAGE_IDX_FROM_SECT(sector);
sector_bit = (sector & SECTOR_MASK);
t_page = radix_tree_lookup(root, idx);
- if (t_page) {
- __clear_bit(sector_bit, t_page->bitmap);
-
- if (null_page_empty(t_page)) {
- ret = radix_tree_delete_item(root, idx, t_page);
- WARN_ON(ret != t_page);
- null_free_page(ret);
- if (is_cache)
- nullb->dev->curr_cache -= PAGE_SIZE;
- }
+ if (!t_page)
+ return;
+ __clear_bit(sector_bit, t_page->bitmap);
+
+ if (null_page_empty(t_page)) {
+ ret = radix_tree_delete_item(root, idx, t_page);
+ WARN_ON(ret != t_page);
+ null_free_page(ret);
+ if (is_cache)
+ nullb->dev->curr_cache -= PAGE_SIZE;
}
}
@@ -885,11 +890,11 @@ static void null_zero_sector(struct nullb_device *d, sector_t sect,
unsigned int offset;
void *dest;
- t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT);
+ t_page = radix_tree_lookup(root, PAGE_IDX_FROM_SECT(sect));
if (!t_page)
return;
- offset = (sect & SECTOR_MASK) << SECTOR_SHIFT;
+ offset = SECT_OFFSET_IN_PAGE(sect);
dest = kmap_atomic(t_page->page);
memset(dest + offset, 0, SECTOR_SIZE * nr_sects);
kunmap_atomic(dest);
@@ -949,7 +954,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
struct nullb_page *t_page;
struct radix_tree_root *root;
- idx = sector >> PAGE_SECTORS_SHIFT;
+ idx = PAGE_IDX_FROM_SECT(sector);
sector_bit = (sector & SECTOR_MASK);
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
@@ -1125,7 +1130,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
if (null_cache_active(nullb) && !is_fua)
null_make_cache_space(nullb, PAGE_SIZE);
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ offset = SECT_OFFSET_IN_PAGE(sector);
t_page = null_insert_page(nullb, sector,
!null_cache_active(nullb) || is_fua);
if (!t_page)
@@ -1159,7 +1164,7 @@ static int copy_from_nullb(struct nullb *nullb, struct page *dest,
while (count < n) {
temp = min_t(size_t, nullb->dev->blocksize, n - count);
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ offset = SECT_OFFSET_IN_PAGE(sector);
t_page = null_lookup_page(nullb, sector, false,
!null_cache_active(nullb));