Re: [PATCH 14/16] mmc: stop abusing the request queue_lock pointer

2018-11-15 Thread Christoph Hellwig
On Wed, Nov 14, 2018 at 06:56:41PM +0100, Ulf Hansson wrote:
> > } else {
> > @@ -397,6 +397,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct 
> > mmc_card *card,
> > int ret;
> >
> > mq->card = card;
> > +   mq->lock = lock;
> 
> Unless I am mistaken, it seems like the "lock" can also be removed as
> an in-parameter to mmc_init_queue() - and instead do the
> spin_lock_init() in here.
> 
> Moreover, that means we should drop the "lock" from the struct
> mmc_blk_data and instead move it to struct mmc_queue (rather than
> having a pointer to it.)

Which sounds like a sensible idead indeed, I'll look into it.


Re: [PATCH 14/16] mmc: stop abusing the request queue_lock pointer

2018-11-14 Thread Hannes Reinecke

On 11/14/18 5:02 PM, Christoph Hellwig wrote:

mmc uses the block layer struct request pointer to indirect their own
lock to the mmc_queue structure, given that the original lock isn't
reachable outside of block.c.  Add a lock pointer to struct mmc_queue
instead and stop overriding the block layer lock which protects fields
entirely separate from the mmc use.

Signed-off-by: Christoph Hellwig 
---
  drivers/mmc/core/block.c | 22 ++
  drivers/mmc/core/queue.c | 26 +-
  drivers/mmc/core/queue.h |  1 +
  3 files changed, 24 insertions(+), 25 deletions(-)


Reviewed-by: Hannes Reinecke 

Cheers,

Hannes
--
Dr. Hannes ReineckeTeamlead Storage & Networking
h...@suse.de   +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: F. Imendörffer, J. Smithard, J. Guild, D. Upmanyu, G. Norton
HRB 21284 (AG Nürnberg)


Re: [PATCH 14/16] mmc: stop abusing the request queue_lock pointer

2018-11-14 Thread Ulf Hansson
On 14 November 2018 at 17:02, Christoph Hellwig  wrote:
> mmc uses the block layer struct request pointer to indirect their own
> lock to the mmc_queue structure, given that the original lock isn't
> reachable outside of block.c.  Add a lock pointer to struct mmc_queue
> instead and stop overriding the block layer lock which protects fields
> entirely separate from the mmc use.
>
> Signed-off-by: Christoph Hellwig 
> ---
>  drivers/mmc/core/block.c | 22 ++
>  drivers/mmc/core/queue.c | 26 +-
>  drivers/mmc/core/queue.h |  1 +
>  3 files changed, 24 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
> index 27606e1382e5..70ec465beb69 100644
> --- a/drivers/mmc/core/block.c
> +++ b/drivers/mmc/core/block.c
> @@ -1483,7 +1483,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue 
> *mq, struct request *req)
> blk_mq_end_request(req, BLK_STS_OK);
> }
>
> -   spin_lock_irqsave(q->queue_lock, flags);
> +   spin_lock_irqsave(mq->lock, flags);
>
> mq->in_flight[mmc_issue_type(mq, req)] -= 1;
>
> @@ -1491,7 +1491,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue 
> *mq, struct request *req)
>
> mmc_cqe_check_busy(mq);
>
> -   spin_unlock_irqrestore(q->queue_lock, flags);
> +   spin_unlock_irqrestore(mq->lock, flags);
>
> if (!mq->cqe_busy)
> blk_mq_run_hw_queues(q, true);
> @@ -1988,17 +1988,16 @@ static void mmc_blk_mq_poll_completion(struct 
> mmc_queue *mq,
>
>  static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request 
> *req)
>  {
> -   struct request_queue *q = req->q;
> unsigned long flags;
> bool put_card;
>
> -   spin_lock_irqsave(q->queue_lock, flags);
> +   spin_lock_irqsave(mq->lock, flags);
>
> mq->in_flight[mmc_issue_type(mq, req)] -= 1;
>
> put_card = (mmc_tot_in_flight(mq) == 0);
>
> -   spin_unlock_irqrestore(q->queue_lock, flags);
> +   spin_unlock_irqrestore(mq->lock, flags);
>
> if (put_card)
> mmc_put_card(mq->card, &mq->ctx);
> @@ -2094,11 +2093,11 @@ static void mmc_blk_mq_req_done(struct mmc_request 
> *mrq)
>  * request does not need to wait (although it does need to
>  * complete complete_req first).
>  */
> -   spin_lock_irqsave(q->queue_lock, flags);
> +   spin_lock_irqsave(mq->lock, flags);
> mq->complete_req = req;
> mq->rw_wait = false;
> waiting = mq->waiting;
> -   spin_unlock_irqrestore(q->queue_lock, flags);
> +   spin_unlock_irqrestore(mq->lock, flags);
>
> /*
>  * If 'waiting' then the waiting task will complete this
> @@ -2117,10 +2116,10 @@ static void mmc_blk_mq_req_done(struct mmc_request 
> *mrq)
> /* Take the recovery path for errors or urgent background operations 
> */
> if (mmc_blk_rq_error(&mqrq->brq) ||
> mmc_blk_urgent_bkops_needed(mq, mqrq)) {
> -   spin_lock_irqsave(q->queue_lock, flags);
> +   spin_lock_irqsave(mq->lock, flags);
> mq->recovery_needed = true;
> mq->recovery_req = req;
> -   spin_unlock_irqrestore(q->queue_lock, flags);
> +   spin_unlock_irqrestore(mq->lock, flags);
> wake_up(&mq->wait);
> schedule_work(&mq->recovery_work);
> return;
> @@ -2136,7 +2135,6 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
>
>  static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
>  {
> -   struct request_queue *q = mq->queue;
> unsigned long flags;
> bool done;
>
> @@ -2144,7 +2142,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, 
> int *err)
>  * Wait while there is another request in progress, but not if 
> recovery
>  * is needed. Also indicate whether there is a request waiting to 
> start.
>  */
> -   spin_lock_irqsave(q->queue_lock, flags);
> +   spin_lock_irqsave(mq->lock, flags);
> if (mq->recovery_needed) {
> *err = -EBUSY;
> done = true;
> @@ -2152,7 +2150,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, 
> int *err)
> done = !mq->rw_wait;
> }
> mq->waiting = !done;
> -   spin_unlock_irqrestore(q->queue_lock, flags);
> +   spin_unlock_irqrestore(mq->lock, flags);
>
> return done;
>  }
> diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
> index 37617fb1f9de..ac6a5245275a 100644
> --- a/drivers/mmc/core/queue.c
> +++ b/drivers/mmc/core/queue.c
> @@ -89,9 +89,9 @@ void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
> struct mmc_queue *mq = q->queuedata;
> unsigned long flags;
>
> -   spin_lock_irqsave(q->queue_lock, flags);
> +   spin_lock_i

[PATCH 14/16] mmc: stop abusing the request queue_lock pointer

2018-11-14 Thread Christoph Hellwig
mmc uses the block layer struct request pointer to indirect their own
lock to the mmc_queue structure, given that the original lock isn't
reachable outside of block.c.  Add a lock pointer to struct mmc_queue
instead and stop overriding the block layer lock which protects fields
entirely separate from the mmc use.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/core/block.c | 22 ++
 drivers/mmc/core/queue.c | 26 +-
 drivers/mmc/core/queue.h |  1 +
 3 files changed, 24 insertions(+), 25 deletions(-)

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 27606e1382e5..70ec465beb69 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1483,7 +1483,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, 
struct request *req)
blk_mq_end_request(req, BLK_STS_OK);
}
 
-   spin_lock_irqsave(q->queue_lock, flags);
+   spin_lock_irqsave(mq->lock, flags);
 
mq->in_flight[mmc_issue_type(mq, req)] -= 1;
 
@@ -1491,7 +1491,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, 
struct request *req)
 
mmc_cqe_check_busy(mq);
 
-   spin_unlock_irqrestore(q->queue_lock, flags);
+   spin_unlock_irqrestore(mq->lock, flags);
 
if (!mq->cqe_busy)
blk_mq_run_hw_queues(q, true);
@@ -1988,17 +1988,16 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue 
*mq,
 
 static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
 {
-   struct request_queue *q = req->q;
unsigned long flags;
bool put_card;
 
-   spin_lock_irqsave(q->queue_lock, flags);
+   spin_lock_irqsave(mq->lock, flags);
 
mq->in_flight[mmc_issue_type(mq, req)] -= 1;
 
put_card = (mmc_tot_in_flight(mq) == 0);
 
-   spin_unlock_irqrestore(q->queue_lock, flags);
+   spin_unlock_irqrestore(mq->lock, flags);
 
if (put_card)
mmc_put_card(mq->card, &mq->ctx);
@@ -2094,11 +2093,11 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
 * request does not need to wait (although it does need to
 * complete complete_req first).
 */
-   spin_lock_irqsave(q->queue_lock, flags);
+   spin_lock_irqsave(mq->lock, flags);
mq->complete_req = req;
mq->rw_wait = false;
waiting = mq->waiting;
-   spin_unlock_irqrestore(q->queue_lock, flags);
+   spin_unlock_irqrestore(mq->lock, flags);
 
/*
 * If 'waiting' then the waiting task will complete this
@@ -2117,10 +2116,10 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
/* Take the recovery path for errors or urgent background operations */
if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
-   spin_lock_irqsave(q->queue_lock, flags);
+   spin_lock_irqsave(mq->lock, flags);
mq->recovery_needed = true;
mq->recovery_req = req;
-   spin_unlock_irqrestore(q->queue_lock, flags);
+   spin_unlock_irqrestore(mq->lock, flags);
wake_up(&mq->wait);
schedule_work(&mq->recovery_work);
return;
@@ -2136,7 +2135,6 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
 
 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
 {
-   struct request_queue *q = mq->queue;
unsigned long flags;
bool done;
 
@@ -2144,7 +2142,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, 
int *err)
 * Wait while there is another request in progress, but not if recovery
 * is needed. Also indicate whether there is a request waiting to start.
 */
-   spin_lock_irqsave(q->queue_lock, flags);
+   spin_lock_irqsave(mq->lock, flags);
if (mq->recovery_needed) {
*err = -EBUSY;
done = true;
@@ -2152,7 +2150,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, 
int *err)
done = !mq->rw_wait;
}
mq->waiting = !done;
-   spin_unlock_irqrestore(q->queue_lock, flags);
+   spin_unlock_irqrestore(mq->lock, flags);
 
return done;
 }
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 37617fb1f9de..ac6a5245275a 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -89,9 +89,9 @@ void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
struct mmc_queue *mq = q->queuedata;
unsigned long flags;
 
-   spin_lock_irqsave(q->queue_lock, flags);
+   spin_lock_irqsave(mq->lock, flags);
__mmc_cqe_recovery_notifier(mq);
-   spin_unlock_irqrestore(q->queue_lock, flags);
+   spin_unlock_irqrestore(mq->lock, flags);
 }
 
 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
@@ -128,14 +128,14 @@ static enum blk_