Also change dm_io_complete() to use bio_clear_polled() so that it
clears both REQ_POLLED and BIO_PERCPU_CACHE if the bio is requeued due
to BLK_STS_DM_REQUEUE.

Only io_uring benefits from using BIOSET_PERCPU_CACHE, it is only safe
to use in non-interrupt context but io_uring's completions are all in
process context.

This change improves DM's hipri bio polling performance by ~7%.

Signed-off-by: Mike Snitzer <[email protected]>
---
 drivers/md/dm.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a2e80c376827..06f3720a190b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -899,9 +899,9 @@ static void dm_io_complete(struct dm_io *io)
                /*
                 * Upper layer won't help us poll split bio, io->orig_bio
                 * may only reflect a subset of the pre-split original,
-                * so clear REQ_POLLED in case of requeue
+                * so clear REQ_POLLED and BIO_PERCPU_CACHE on requeue.
                 */
-               bio->bi_opf &= ~REQ_POLLED;
+               bio_clear_polled(bio);
                return;
        }
 
@@ -3014,7 +3014,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct 
mapped_device *md, enum dm_qu
                pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
                front_pad = roundup(per_io_data_size, __alignof__(struct 
dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
                io_front_pad = roundup(per_io_data_size,  __alignof__(struct 
dm_io)) + DM_IO_BIO_OFFSET;
-               ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
+               ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 
BIOSET_PERCPU_CACHE);
                if (ret)
                        goto out;
                if (integrity && bioset_integrity_create(&pools->io_bs, 
pool_size))
-- 
2.15.0

--
dm-devel mailing list
[email protected]
https://listman.redhat.com/mailman/listinfo/dm-devel

Reply via email to