This patch adds support for memory backed REQ_OP_WRITE_ZEROES
operations for the null_blk request mode. We introduce two new
functions where we zeroout the sector(s) using memset which are part
of the payloadless write-zeroes request.

Signed-off-by: Chaitanya Kulkarni <[email protected]>
---
 drivers/block/null_blk_main.c | 45 ++++++++++++++++++++++++++++++++++-
 1 file changed, 44 insertions(+), 1 deletion(-)

diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 65da7c2d93b9..fca011a05277 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -725,6 +725,24 @@ static void null_free_sector(struct nullb *nullb, sector_t 
sector,
        }
 }
 
+static void null_zero_sector(struct nullb_device *d, sector_t sect,
+                            sector_t nr_sects, bool cache)
+{
+       struct radix_tree_root *root = cache ? &d->cache : &d->data;
+       struct nullb_page *t_page;
+       unsigned int offset;
+       void *dest;
+
+       t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT);
+       if (!t_page)
+               return;
+
+       offset = (sect & SECTOR_MASK) << SECTOR_SHIFT;
+       dest = kmap_atomic(t_page->page);
+       memset(dest + offset, 0, SECTOR_SIZE * nr_sects);
+       kunmap_atomic(dest);
+}
+
 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
        struct nullb_page *t_page, bool is_cache)
 {
@@ -1026,6 +1044,25 @@ static void null_handle_discard(struct nullb *nullb, 
sector_t sector, size_t n)
        spin_unlock_irq(&nullb->lock);
 }
 
+static void null_handle_write_zeroes(struct nullb *nullb, sector_t sector,
+                                    unsigned int bytes_left)
+{
+       sector_t nr_sectors;
+       size_t curr_bytes;
+
+       spin_lock_irq(&nullb->lock);
+       while (bytes_left > 0) {
+               curr_bytes = min_t(size_t, bytes_left, nullb->dev->blocksize);
+               nr_sectors = curr_bytes >> SECTOR_SHIFT;
+               null_zero_sector(nullb->dev, sector, nr_sectors, false);
+               if (null_cache_active(nullb))
+                       null_zero_sector(nullb->dev, sector, nr_sectors, true);
+               sector += nr_sectors;
+               bytes_left -= curr_bytes;
+       }
+       spin_unlock_irq(&nullb->lock);
+}
+
 static int null_handle_flush(struct nullb *nullb)
 {
        int err;
@@ -1075,9 +1112,15 @@ static int null_handle_rq(struct nullb_cmd *cmd)
 
        sector = blk_rq_pos(rq);
 
-       if (req_op(rq) == REQ_OP_DISCARD) {
+       switch (req_op(rq)) {
+       case REQ_OP_DISCARD:
                null_handle_discard(nullb, sector, blk_rq_bytes(rq));
                return 0;
+       case REQ_OP_WRITE_ZEROES:
+               null_handle_write_zeroes(nullb, sector, blk_rq_bytes(rq));
+               return 0;
+       default:
+               break;
        }
 
        spin_lock_irq(&nullb->lock);
-- 
2.17.0

Reply via email to