Read/write/flush are the most common operations, optimize switch in
is_abnormal_io() for those cases. Follows same pattern established in
block perf-wip commit ("block: optimise blk_may_split for normal rw")

Also, push is_abnormal_io() check and blk_queue_split() down from
dm_submit_bio() to dm_split_and_process_bio() and set new
'is_abnormal_io' flag in clone_info. Optimize __split_and_process_bio
and __process_abnormal_io by leveraging ci.is_abnormal_io flag.

Signed-off-by: Mike Snitzer <[email protected]>
---
 drivers/md/dm.c | 60 +++++++++++++++++++++++++++++----------------------------
 1 file changed, 31 insertions(+), 29 deletions(-)

diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3b87d151ef88..b9c30dfe0f2a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -84,7 +84,8 @@ struct clone_info {
        struct dm_io *io;
        sector_t sector;
        unsigned sector_count;
-       bool submit_as_polled;
+       bool is_abnormal_io:1;
+       bool submit_as_polled:1;
 };
 
 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
@@ -1492,21 +1493,24 @@ static void __send_changing_extent_only(struct 
clone_info *ci, struct dm_target
 
 static bool is_abnormal_io(struct bio *bio)
 {
-       bool r = false;
+       unsigned int op = bio_op(bio);
 
-       switch (bio_op(bio)) {
-       case REQ_OP_DISCARD:
-       case REQ_OP_SECURE_ERASE:
-       case REQ_OP_WRITE_ZEROES:
-               r = true;
-               break;
+       if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
+               switch (op) {
+               case REQ_OP_DISCARD:
+               case REQ_OP_SECURE_ERASE:
+               case REQ_OP_WRITE_ZEROES:
+                       return true;
+               default:
+                       break;
+               }
        }
 
-       return r;
+       return false;
 }
 
-static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
-                                 blk_status_t *status)
+static blk_status_t __process_abnormal_io(struct clone_info *ci,
+                                         struct dm_target *ti)
 {
        unsigned num_bios = 0;
 
@@ -1520,8 +1524,6 @@ static bool __process_abnormal_io(struct clone_info *ci, 
struct dm_target *ti,
        case REQ_OP_WRITE_ZEROES:
                num_bios = ti->num_write_zeroes_bios;
                break;
-       default:
-               return false;
        }
 
        /*
@@ -1531,12 +1533,10 @@ static bool __process_abnormal_io(struct clone_info 
*ci, struct dm_target *ti,
         * check was performed.
         */
        if (unlikely(!num_bios))
-               *status = BLK_STS_NOTSUPP;
-       else {
-               __send_changing_extent_only(ci, ti, num_bios);
-               *status = BLK_STS_OK;
-       }
-       return true;
+               return BLK_STS_NOTSUPP;
+
+       __send_changing_extent_only(ci, ti, num_bios);
+       return BLK_STS_OK;
 }
 
 /*
@@ -1589,11 +1589,12 @@ static blk_status_t __split_and_process_bio(struct 
clone_info *ci)
        struct bio *clone;
        struct dm_target *ti;
        unsigned len;
-       blk_status_t error = BLK_STS_IOERR;
 
        ti = dm_table_find_target(ci->map, ci->sector);
-       if (unlikely(!ti || __process_abnormal_io(ci, ti, &error)))
-               return error;
+       if (unlikely(!ti))
+               return BLK_STS_IOERR;
+       else if (unlikely(ci->is_abnormal_io))
+               return __process_abnormal_io(ci, ti);
 
        /*
         * Only support bio polling for normal IO, and the target io is
@@ -1618,6 +1619,7 @@ static void init_clone_info(struct clone_info *ci, struct 
mapped_device *md,
        ci->map = map;
        ci->io = alloc_io(md, bio);
        ci->bio = bio;
+       ci->is_abnormal_io = false;
        ci->submit_as_polled = false;
        ci->sector = bio->bi_iter.bi_sector;
        ci->sector_count = bio_sectors(bio);
@@ -1645,6 +1647,13 @@ static void dm_split_and_process_bio(struct 
mapped_device *md,
                __send_empty_flush(&ci);
                /* dm_io_complete submits any data associated with flush */
                goto out;
+       } else if (unlikely(is_abnormal_io(bio))) {
+               /*
+                * Use blk_queue_split() for abnormal IO (e.g. discard, etc)
+                * otherwise associated queue_limits won't be imposed.
+                */
+               blk_queue_split(&bio);
+               ci.is_abnormal_io = true;
        }
 
        error = __split_and_process_bio(&ci);
@@ -1698,13 +1707,6 @@ static void dm_submit_bio(struct bio *bio)
                goto out;
        }
 
-       /*
-        * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
-        * otherwise associated queue_limits won't be imposed.
-        */
-       if (unlikely(is_abnormal_io(bio)))
-               blk_queue_split(&bio);
-
        dm_split_and_process_bio(md, map, bio);
 out:
        dm_put_live_table_bio(md, srcu_idx, bio);
-- 
2.15.0

--
dm-devel mailing list
[email protected]
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to