In the null_blk for different page and sector calculation we have
simple macros to make the code more redable.

Similar to that this is a purely code cleanup patch where we
introduce two new macros for calculating the page index from given
sector and index (offset) of the sector in the page.

Signed-off-by: Chaitanya Kulkarni <[email protected]>
---
 drivers/block/null_blk_main.c | 37 ++++++++++++++++++++---------------
 1 file changed, 21 insertions(+), 16 deletions(-)

diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index fca011a05277..d463bde001b6 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -16,6 +16,11 @@
 #define PAGE_SECTORS           (1 << PAGE_SECTORS_SHIFT)
 #define SECTOR_MASK            (PAGE_SECTORS - 1)
 
+/* Gives page index for which this sector belongs to. */
+#define PAGE_IDX_FROM_SECT(sect)       (sect >> PAGE_SECTORS_SHIFT)
+/* Gives index (offset) of the sector within page. */
+#define SECT_IDX_IN_PAGE(sect)         ((sect & SECTOR_MASK) << SECTOR_SHIFT)
+
 #define FREE_BATCH             16
 
 #define TICKS_PER_SEC          50ULL
@@ -708,20 +713,20 @@ static void null_free_sector(struct nullb *nullb, 
sector_t sector,
        struct radix_tree_root *root;
 
        root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
-       idx = sector >> PAGE_SECTORS_SHIFT;
+       idx = PAGE_IDX_FROM_SECT(sector);
        sector_bit = (sector & SECTOR_MASK);
 
        t_page = radix_tree_lookup(root, idx);
-       if (t_page) {
-               __clear_bit(sector_bit, t_page->bitmap);
-
-               if (null_page_empty(t_page)) {
-                       ret = radix_tree_delete_item(root, idx, t_page);
-                       WARN_ON(ret != t_page);
-                       null_free_page(ret);
-                       if (is_cache)
-                               nullb->dev->curr_cache -= PAGE_SIZE;
-               }
+       if (!t_page)
+               return;
+       __clear_bit(sector_bit, t_page->bitmap);
+
+       if (null_page_empty(t_page)) {
+               ret = radix_tree_delete_item(root, idx, t_page);
+               WARN_ON(ret != t_page);
+               null_free_page(ret);
+               if (is_cache)
+                       nullb->dev->curr_cache -= PAGE_SIZE;
        }
 }
 
@@ -733,11 +738,11 @@ static void null_zero_sector(struct nullb_device *d, 
sector_t sect,
        unsigned int offset;
        void *dest;
 
-       t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT);
+       t_page = radix_tree_lookup(root, PAGE_IDX_FROM_SECT(sect));
        if (!t_page)
                return;
 
-       offset = (sect & SECTOR_MASK) << SECTOR_SHIFT;
+       offset = SECT_IDX_IN_PAGE(sect);
        dest = kmap_atomic(t_page->page);
        memset(dest + offset, 0, SECTOR_SIZE * nr_sects);
        kunmap_atomic(dest);
@@ -797,7 +802,7 @@ static struct nullb_page *__null_lookup_page(struct nullb 
*nullb,
        struct nullb_page *t_page;
        struct radix_tree_root *root;
 
-       idx = sector >> PAGE_SECTORS_SHIFT;
+       idx = PAGE_IDX_FROM_SECT(sector);
        sector_bit = (sector & SECTOR_MASK);
 
        root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
@@ -973,7 +978,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page 
*source,
                if (null_cache_active(nullb) && !is_fua)
                        null_make_cache_space(nullb, PAGE_SIZE);
 
-               offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+               offset = SECT_IDX_IN_PAGE(sector);
                t_page = null_insert_page(nullb, sector,
                        !null_cache_active(nullb) || is_fua);
                if (!t_page)
@@ -1007,7 +1012,7 @@ static int copy_from_nullb(struct nullb *nullb, struct 
page *dest,
        while (count < n) {
                temp = min_t(size_t, nullb->dev->blocksize, n - count);
 
-               offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+               offset = SECT_IDX_IN_PAGE(sector);
                t_page = null_lookup_page(nullb, sector, false,
                        !null_cache_active(nullb));
 
-- 
2.17.0

Reply via email to