On 21 August 2018 at 14:05, Adrian Hunter <adrian.hun...@intel.com> wrote:
> The mmc block driver does not support parallel dispatch of requests. In
> normal circumstances, all requests are anyway funneled through a single
> work item, so parallel dispatch never happens. However it can happen if
> there is no elevator.
>
> Fix that by detecting if a dispatch is in progress and returning busy
> (BLK_STS_RESOURCE) in that case
>
> Fixes: 81196976ed94 ("mmc: block: Add blk-mq support")
> Cc: sta...@vger.kernel.org
> Signed-off-by: Adrian Hunter <adrian.hun...@intel.com>

Thanks, applied for fixes!

Kind regards
Uffe

> ---
>  drivers/mmc/core/queue.c | 12 +++++++-----
>  drivers/mmc/core/queue.h |  1 +
>  2 files changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
> index 648eb6743ed5..6edffeed9953 100644
> --- a/drivers/mmc/core/queue.c
> +++ b/drivers/mmc/core/queue.c
> @@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set 
> *set, struct request *req,
>         mmc_exit_request(mq->queue, req);
>  }
>
> -/*
> - * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means 
> requests
> - * will not be dispatched in parallel.
> - */
>  static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
>                                     const struct blk_mq_queue_data *bd)
>  {
> @@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx 
> *hctx,
>
>         spin_lock_irq(q->queue_lock);
>
> -       if (mq->recovery_needed) {
> +       if (mq->recovery_needed || mq->busy) {
>                 spin_unlock_irq(q->queue_lock);
>                 return BLK_STS_RESOURCE;
>         }
> @@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx 
> *hctx,
>                 break;
>         }
>
> +       /* Parallel dispatch of requests is not supported at the moment */
> +       mq->busy = true;
> +
>         mq->in_flight[issue_type] += 1;
>         get_card = (mmc_tot_in_flight(mq) == 1);
>         cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
> @@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx 
> *hctx,
>                 mq->in_flight[issue_type] -= 1;
>                 if (mmc_tot_in_flight(mq) == 0)
>                         put_card = true;
> +               mq->busy = false;
>                 spin_unlock_irq(q->queue_lock);
>                 if (put_card)
>                         mmc_put_card(card, &mq->ctx);
> +       } else {
> +               WRITE_ONCE(mq->busy, false);
>         }
>
>         return ret;
> diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
> index 17e59d50b496..9bf3c9245075 100644
> --- a/drivers/mmc/core/queue.h
> +++ b/drivers/mmc/core/queue.h
> @@ -81,6 +81,7 @@ struct mmc_queue {
>         unsigned int            cqe_busy;
>  #define MMC_CQE_DCMD_BUSY      BIT(0)
>  #define MMC_CQE_QUEUE_FULL     BIT(1)
> +       bool                    busy;
>         bool                    use_cqe;
>         bool                    recovery_needed;
>         bool                    in_recovery;
> --
> 2.17.1
>

Reply via email to