Factor out some common code that will also be used with blk-mq.

Signed-off-by: Adrian Hunter <adrian.hun...@intel.com>
---
 drivers/mmc/core/queue.c | 53 ++++++++++++++++++++++++++++--------------------
 1 file changed, 31 insertions(+), 22 deletions(-)

diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index affa7370ba82..14e9de9c783c 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -223,6 +223,36 @@ static void mmc_exit_request(struct request_queue *q, 
struct request *req)
        mq_rq->sg = NULL;
 }
 
+static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+       struct mmc_host *host = card->host;
+       u64 limit = BLK_BOUNCE_HIGH;
+
+       if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+               limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
+       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+       if (mmc_can_erase(card))
+               mmc_queue_setup_discard(mq->queue, card);
+
+       card->bouncesz = mmc_queue_calc_bouncesz(host);
+       if (card->bouncesz) {
+               blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
+               blk_queue_max_segments(mq->queue, card->bouncesz / 512);
+               blk_queue_max_segment_size(mq->queue, card->bouncesz);
+       } else {
+               blk_queue_bounce_limit(mq->queue, limit);
+               blk_queue_max_hw_sectors(mq->queue,
+                       min(host->max_blk_count, host->max_req_size / 512));
+               blk_queue_max_segments(mq->queue, host->max_segs);
+               blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+       }
+
+       /* Initialize thread_sem even if it is not used */
+       sema_init(&mq->thread_sem, 1);
+}
+
 /**
  * mmc_init_queue - initialise a queue structure.
  * @mq: mmc queue
@@ -236,12 +266,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card,
                   spinlock_t *lock, const char *subname)
 {
        struct mmc_host *host = card->host;
-       u64 limit = BLK_BOUNCE_HIGH;
        int ret = -ENOMEM;
 
-       if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
-               limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
-
        mq->card = card;
        mq->queue = blk_alloc_queue(GFP_KERNEL);
        if (!mq->queue)
@@ -260,25 +286,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card 
*card,
        }
 
        blk_queue_prep_rq(mq->queue, mmc_prep_request);
-       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
-       queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
-       if (mmc_can_erase(card))
-               mmc_queue_setup_discard(mq->queue, card);
 
-       card->bouncesz = mmc_queue_calc_bouncesz(host);
-       if (card->bouncesz) {
-               blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
-               blk_queue_max_segments(mq->queue, card->bouncesz / 512);
-               blk_queue_max_segment_size(mq->queue, card->bouncesz);
-       } else {
-               blk_queue_bounce_limit(mq->queue, limit);
-               blk_queue_max_hw_sectors(mq->queue,
-                       min(host->max_blk_count, host->max_req_size / 512));
-               blk_queue_max_segments(mq->queue, host->max_segs);
-               blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-       }
-
-       sema_init(&mq->thread_sem, 1);
+       mmc_setup_queue(mq, card);
 
        mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
                host->index, subname ? subname : "");
-- 
1.9.1

Reply via email to