This patch creates a helper for handling throttling code in the
null_handle_cmd().

Signed-off-by: Chaitanya Kulkarni <[email protected]>
---
 drivers/block/null_blk_main.c | 43 +++++++++++++++++++++++------------
 1 file changed, 28 insertions(+), 15 deletions(-)

diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 99328ded60d1..98e2985f57fc 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1133,28 +1133,41 @@ static void null_restart_queue_async(struct nullb 
*nullb)
                blk_mq_start_stopped_hw_queues(q, true);
 }
 
-static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
+static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
 {
        struct nullb_device *dev = cmd->nq->dev;
        struct nullb *nullb = dev->nullb;
-       int err = 0;
+       blk_status_t sts = BLK_STS_OK;
+       struct request *rq = cmd->rq;
 
-       if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
-               struct request *rq = cmd->rq;
+       if (!test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags))
+               goto out;
 
-               if (!hrtimer_active(&nullb->bw_timer))
-                       hrtimer_restart(&nullb->bw_timer);
+       if (!hrtimer_active(&nullb->bw_timer))
+               hrtimer_restart(&nullb->bw_timer);
 
-               if (atomic_long_sub_return(blk_rq_bytes(rq),
-                               &nullb->cur_bytes) < 0) {
-                       null_stop_queue(nullb);
-                       /* race with timer */
-                       if (atomic_long_read(&nullb->cur_bytes) > 0)
-                               null_restart_queue_async(nullb);
-                       /* requeue request */
-                       return BLK_STS_DEV_RESOURCE;
-               }
+       if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
+               null_stop_queue(nullb);
+               /* race with timer */
+               if (atomic_long_read(&nullb->cur_bytes) > 0)
+                       null_restart_queue_async(nullb);
+               /* requeue request */
+               sts = BLK_STS_DEV_RESOURCE;
        }
+out:
+       return sts;
+}
+
+static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
+{
+       struct nullb_device *dev = cmd->nq->dev;
+       struct nullb *nullb = dev->nullb;
+       blk_status_t sts;
+       int err = 0;
+
+       sts = null_handle_throttled(cmd);
+       if (sts != BLK_STS_OK)
+               return sts;
 
        if (nullb->dev->badblocks.shift != -1) {
                int bad_sectors;
-- 
2.21.0

Reply via email to