Now no one uses these APIs anymore, so unexport them.

Signed-off-by: Ming Lei <[email protected]>
---
 block/blk-mq.c         | 80 --------------------------------------------------
 block/blk-mq.h         |  2 ++
 include/linux/blk-mq.h |  8 -----
 3 files changed, 2 insertions(+), 88 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index d994449c154b..e8a3486dc8a2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1208,12 +1208,6 @@ static void __blk_mq_delay_run_hw_queue(struct 
blk_mq_hw_ctx *hctx, bool async,
                                         msecs_to_jiffies(msecs));
 }
 
-void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
-{
-       __blk_mq_delay_run_hw_queue(hctx, true, msecs);
-}
-EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
-
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 {
        __blk_mq_delay_run_hw_queue(hctx, async, 0);
@@ -1255,60 +1249,6 @@ bool blk_mq_queue_stopped(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_queue_stopped);
 
-/*
- * This function is often used for pausing .queue_rq() by driver when
- * there isn't enough resource or some conditions aren't satisfied, and
- * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
- *
- * We do not guarantee that dispatch can be drained or blocked
- * after blk_mq_stop_hw_queue() returns. Please use
- * blk_mq_quiesce_queue() for that requirement.
- */
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
-{
-       cancel_delayed_work(&hctx->run_work);
-
-       set_bit(BLK_MQ_S_STOPPED, &hctx->state);
-}
-EXPORT_SYMBOL(blk_mq_stop_hw_queue);
-
-/*
- * This function is often used for pausing .queue_rq() by driver when
- * there isn't enough resource or some conditions aren't satisfied, and
- * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
- *
- * We do not guarantee that dispatch can be drained or blocked
- * after blk_mq_stop_hw_queues() returns. Please use
- * blk_mq_quiesce_queue() for that requirement.
- */
-void blk_mq_stop_hw_queues(struct request_queue *q)
-{
-       struct blk_mq_hw_ctx *hctx;
-       int i;
-
-       queue_for_each_hw_ctx(q, hctx, i)
-               blk_mq_stop_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_stop_hw_queues);
-
-void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
-{
-       clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
-
-       blk_mq_run_hw_queue(hctx, false);
-}
-EXPORT_SYMBOL(blk_mq_start_hw_queue);
-
-void blk_mq_start_hw_queues(struct request_queue *q)
-{
-       struct blk_mq_hw_ctx *hctx;
-       int i;
-
-       queue_for_each_hw_ctx(q, hctx, i)
-               blk_mq_start_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_start_hw_queues);
-
 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 {
        if (!blk_mq_hctx_stopped(hctx))
@@ -1317,7 +1257,6 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx 
*hctx, bool async)
        clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
        blk_mq_run_hw_queue(hctx, async);
 }
-EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
 
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
 {
@@ -1327,7 +1266,6 @@ void blk_mq_start_stopped_hw_queues(struct request_queue 
*q, bool async)
        queue_for_each_hw_ctx(q, hctx, i)
                blk_mq_start_stopped_hw_queue(hctx, async);
 }
-EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
 
 static void blk_mq_run_work_fn(struct work_struct *work)
 {
@@ -1352,24 +1290,6 @@ static void blk_mq_run_work_fn(struct work_struct *work)
 }
 
 
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
-{
-       if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
-               return;
-
-       /*
-        * Stop the hw queue, then modify currently delayed work.
-        * This should prevent us from running the queue prematurely.
-        * Mark the queue as auto-clearing STOPPED when it runs.
-        */
-       blk_mq_stop_hw_queue(hctx);
-       set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
-       kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-                                       &hctx->run_work,
-                                       msecs_to_jiffies(msecs));
-}
-EXPORT_SYMBOL(blk_mq_delay_queue);
-
 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
                                            struct request *rq,
                                            bool at_head)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index c4516d2a2d2c..9bc51d43155f 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -25,6 +25,8 @@ struct blk_mq_ctx {
        struct kobject          kobj;
 } ____cacheline_aligned_in_smp;
 
+void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8694fb39cd80..a539e38f5a76 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -243,18 +243,10 @@ void blk_mq_delay_kick_requeue_list(struct request_queue 
*q, unsigned long msecs
 void blk_mq_complete_request(struct request *rq);
 
 bool blk_mq_queue_stopped(struct request_queue *q);
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
-void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
-void blk_mq_stop_hw_queues(struct request_queue *q);
-void blk_mq_start_hw_queues(struct request_queue *q);
-void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
-void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_quiesce_queue(struct request_queue *q);
 void blk_mq_unquiesce_queue(struct request_queue *q);
-void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long 
msecs);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
                busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
-- 
2.9.4

Reply via email to