Re: [PATCH V5 12/14] block: mq-deadline: Introduce zone locking support

2017-10-01 Thread Damien Le Moal
Bart,

On Mon, 2017-09-25 at 22:00 +, Bart Van Assche wrote:
> On Mon, 2017-09-25 at 15:14 +0900, Damien Le Moal wrote:
> > +static inline bool deadline_request_needs_zone_wlock(struct deadline_data
> > *dd,
> > +struct request *rq)
> > +{
> > +
> > +   if (!dd->zones_wlock)
> > +   return false;
> > +
> > +   if (blk_rq_is_passthrough(rq))
> > +   return false;
> > +
> > +   switch (req_op(rq)) {
> > +   case REQ_OP_WRITE_ZEROES:
> > +   case REQ_OP_WRITE_SAME:
> > +   case REQ_OP_WRITE:
> > +   return blk_rq_zone_is_seq(rq);
> > +   default:
> > +   return false;
> > +   }
> 
> If anyone ever adds a new write request type it will be easy to overlook
> this
> function. Should the 'default' case be left out and should all request types
> be mentioned in the switch/case statement such that the compiler will issue
> a
> warning if a new request operation type is added to enum req_opf?

I tried, but that does not work. The switch-case needs either a default case
or a return after it. Otherwise I get a compilation warning (reached end of
non-void function).

> > +/*
> > + * Abuse the elv.priv[0] pointer to indicate if a request has write
> > + * locked its target zone. Only write request to a zoned block device
> > + * can own a zone write lock.
> > + */
> > +#define RQ_ZONE_WLOCKED((void *)1UL)
> > +static inline void deadline_set_request_zone_wlock(struct request *rq)
> > +{
> > +   rq->elv.priv[0] = RQ_ZONE_WLOCKED;
> > +}
> > +
> > +#define RQ_ZONE_NO_WLOCK   ((void *)0UL)
> > +static inline void deadline_clear_request_zone_wlock(struct request *rq)
> > +{
> > +   rq->elv.priv[0] = RQ_ZONE_NO_WLOCK;
> > +}
> 
> Should an enumeration type be introduced for RQ_ZONE_WLOCKED and
> RQ_ZONE_NO_WLOCK?

Sure. Added in V6.

> > +/*
> > + * Write lock the target zone of a write request.
> > + */
> > +static void deadline_wlock_zone(struct deadline_data *dd,
> > +   struct request *rq)
> > +{
> > +   unsigned int zno = blk_rq_zone_no(rq);
> > +
> > +   WARN_ON_ONCE(deadline_request_has_zone_wlock(rq));
> > +   WARN_ON_ONCE(test_and_set_bit(zno, dd->zones_wlock));
> > +   deadline_set_request_zone_wlock(rq);
> > +}
> > +
> > +/*
> > + * Write unlock the target zone of a write request.
> > + */
> > +static void deadline_wunlock_zone(struct deadline_data *dd,
> > + struct request *rq)
> > +{
> > +   unsigned int zno = blk_rq_zone_no(rq);
> > +   unsigned long flags;
> > +
> > +   spin_lock_irqsave(&dd->zone_lock, flags);
> > +
> > +   WARN_ON_ONCE(!test_and_clear_bit(zno, dd->zones_wlock));
> > +   deadline_clear_request_zone_wlock(rq);
> > +
> > +   spin_unlock_irqrestore(&dd->zone_lock, flags);
> > +}
> 
> Why does deadline_wunlock_zone() protect modifications with dd->zone_lock
> but
> deadline_wlock_zone() not? If this code is correct, please add a
> lockdep_assert_held() statement in the first function.

Yes, that was a little confusing. In V6, I move the introduction of the
zone_lock spinlock to when it is actually needed, that is the patch following
this one. And I added more comments in both the commit message and in the code
to explain why the spinlock is needed.

> > +/*
> > + * Test the write lock state of the target zone of a write request.
> > + */
> > +static inline bool deadline_zone_is_wlocked(struct deadline_data *dd,
> > +   struct request *rq)
> > +{
> > +   unsigned int zno = blk_rq_zone_no(rq);
> > +
> > +   return test_bit(zno, dd->zones_wlock);
> > +}
> 
> Do we really need the local variable 'zno'?

No we don't. Fixed.

Best regards.

-- 
Damien Le Moal
Western Digital

Re: [PATCH V5 12/14] block: mq-deadline: Introduce zone locking support

2017-09-25 Thread Bart Van Assche
On Mon, 2017-09-25 at 15:14 +0900, Damien Le Moal wrote:
> +static inline bool deadline_request_needs_zone_wlock(struct deadline_data 
> *dd,
> +  struct request *rq)
> +{
> +
> + if (!dd->zones_wlock)
> + return false;
> +
> + if (blk_rq_is_passthrough(rq))
> + return false;
> +
> + switch (req_op(rq)) {
> + case REQ_OP_WRITE_ZEROES:
> + case REQ_OP_WRITE_SAME:
> + case REQ_OP_WRITE:
> + return blk_rq_zone_is_seq(rq);
> + default:
> + return false;
> + }

If anyone ever adds a new write request type it will be easy to overlook this
function. Should the 'default' case be left out and should all request types
be mentioned in the switch/case statement such that the compiler will issue a
warning if a new request operation type is added to enum req_opf?

> +/*
> + * Abuse the elv.priv[0] pointer to indicate if a request has write
> + * locked its target zone. Only write request to a zoned block device
> + * can own a zone write lock.
> + */
> +#define RQ_ZONE_WLOCKED  ((void *)1UL)
> +static inline void deadline_set_request_zone_wlock(struct request *rq)
> +{
> + rq->elv.priv[0] = RQ_ZONE_WLOCKED;
> +}
> +
> +#define RQ_ZONE_NO_WLOCK ((void *)0UL)
> +static inline void deadline_clear_request_zone_wlock(struct request *rq)
> +{
> + rq->elv.priv[0] = RQ_ZONE_NO_WLOCK;
> +}

Should an enumeration type be introduced for RQ_ZONE_WLOCKED and 
RQ_ZONE_NO_WLOCK?

> +/*
> + * Write lock the target zone of a write request.
> + */
> +static void deadline_wlock_zone(struct deadline_data *dd,
> + struct request *rq)
> +{
> + unsigned int zno = blk_rq_zone_no(rq);
> +
> + WARN_ON_ONCE(deadline_request_has_zone_wlock(rq));
> + WARN_ON_ONCE(test_and_set_bit(zno, dd->zones_wlock));
> + deadline_set_request_zone_wlock(rq);
> +}
> +
> +/*
> + * Write unlock the target zone of a write request.
> + */
> +static void deadline_wunlock_zone(struct deadline_data *dd,
> +   struct request *rq)
> +{
> + unsigned int zno = blk_rq_zone_no(rq);
> + unsigned long flags;
> +
> + spin_lock_irqsave(&dd->zone_lock, flags);
> +
> + WARN_ON_ONCE(!test_and_clear_bit(zno, dd->zones_wlock));
> + deadline_clear_request_zone_wlock(rq);
> +
> + spin_unlock_irqrestore(&dd->zone_lock, flags);
> +}

Why does deadline_wunlock_zone() protect modifications with dd->zone_lock but
deadline_wlock_zone() not? If this code is correct, please add a
lockdep_assert_held() statement in the first function.

> +/*
> + * Test the write lock state of the target zone of a write request.
> + */
> +static inline bool deadline_zone_is_wlocked(struct deadline_data *dd,
> + struct request *rq)
> +{
> + unsigned int zno = blk_rq_zone_no(rq);
> +
> + return test_bit(zno, dd->zones_wlock);
> +}

Do we really need the local variable 'zno'?

> +/*
> + * For zoned block devices, write unlock the target zone of
> + * completed write requests.
> + */
> +static void dd_completed_request(struct request *rq)
> +{
> +

Please leave out the blank line at the start of this function.

Thanks,

Bart.

[PATCH V5 12/14] block: mq-deadline: Introduce zone locking support

2017-09-24 Thread Damien Le Moal
For a write request to a zoned block device, lock the request target
zone upon request displatch. The zone is unlocked either when the
request completes or when the request is requeued (inserted).

To indicate that a request has locked its target zone, use the first
pointer of the request elevator private data to store the value
RQ_ZONE_WLOCKED. Testing for this value allows quick decision in
dd_insert_request() and dd_completed_request() regarding the need for
unlocking the target zone of a request.

Signed-off-by: Damien Le Moal 
---
 block/mq-deadline.c | 114 
 1 file changed, 114 insertions(+)

diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 296880e2471f..186c32099845 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -178,6 +178,93 @@ deadline_move_request(struct deadline_data *dd, struct 
request *rq)
 }
 
 /*
+ * Return true if a request is a write requests that needs zone
+ * write locking.
+ */
+static inline bool deadline_request_needs_zone_wlock(struct deadline_data *dd,
+struct request *rq)
+{
+
+   if (!dd->zones_wlock)
+   return false;
+
+   if (blk_rq_is_passthrough(rq))
+   return false;
+
+   switch (req_op(rq)) {
+   case REQ_OP_WRITE_ZEROES:
+   case REQ_OP_WRITE_SAME:
+   case REQ_OP_WRITE:
+   return blk_rq_zone_is_seq(rq);
+   default:
+   return false;
+   }
+}
+
+/*
+ * Abuse the elv.priv[0] pointer to indicate if a request has write
+ * locked its target zone. Only write request to a zoned block device
+ * can own a zone write lock.
+ */
+#define RQ_ZONE_WLOCKED((void *)1UL)
+static inline void deadline_set_request_zone_wlock(struct request *rq)
+{
+   rq->elv.priv[0] = RQ_ZONE_WLOCKED;
+}
+
+#define RQ_ZONE_NO_WLOCK   ((void *)0UL)
+static inline void deadline_clear_request_zone_wlock(struct request *rq)
+{
+   rq->elv.priv[0] = RQ_ZONE_NO_WLOCK;
+}
+
+static inline bool deadline_request_has_zone_wlock(struct request *rq)
+{
+   return rq->elv.priv[0] == RQ_ZONE_WLOCKED;
+}
+
+/*
+ * Write lock the target zone of a write request.
+ */
+static void deadline_wlock_zone(struct deadline_data *dd,
+   struct request *rq)
+{
+   unsigned int zno = blk_rq_zone_no(rq);
+
+   WARN_ON_ONCE(deadline_request_has_zone_wlock(rq));
+   WARN_ON_ONCE(test_and_set_bit(zno, dd->zones_wlock));
+   deadline_set_request_zone_wlock(rq);
+}
+
+/*
+ * Write unlock the target zone of a write request.
+ */
+static void deadline_wunlock_zone(struct deadline_data *dd,
+ struct request *rq)
+{
+   unsigned int zno = blk_rq_zone_no(rq);
+   unsigned long flags;
+
+   spin_lock_irqsave(&dd->zone_lock, flags);
+
+   WARN_ON_ONCE(!test_and_clear_bit(zno, dd->zones_wlock));
+   deadline_clear_request_zone_wlock(rq);
+
+   spin_unlock_irqrestore(&dd->zone_lock, flags);
+}
+
+/*
+ * Test the write lock state of the target zone of a write request.
+ */
+static inline bool deadline_zone_is_wlocked(struct deadline_data *dd,
+   struct request *rq)
+{
+   unsigned int zno = blk_rq_zone_no(rq);
+
+   return test_bit(zno, dd->zones_wlock);
+}
+
+/*
  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
  */
@@ -316,6 +403,11 @@ static struct request *__dd_dispatch_request(struct 
blk_mq_hw_ctx *hctx)
dd->batching++;
deadline_move_request(dd, rq);
 done:
+   /*
+* If the request needs its target zone locked, do it.
+*/
+   if (deadline_request_needs_zone_wlock(dd, rq))
+   deadline_wlock_zone(dd, rq);
rq->rq_flags |= RQF_STARTED;
return rq;
 }
@@ -466,6 +558,13 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, 
struct request *rq,
struct deadline_data *dd = q->elevator->elevator_data;
const int data_dir = rq_data_dir(rq);
 
+   /*
+* This may be a requeue of a request that has locked its
+* target zone. If this is the case, release the request zone lock.
+*/
+   if (deadline_request_has_zone_wlock(rq))
+   deadline_wunlock_zone(dd, rq);
+
if (blk_mq_sched_try_insert_merge(q, rq))
return;
 
@@ -510,6 +609,20 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
spin_unlock(&dd->lock);
 }
 
+/*
+ * For zoned block devices, write unlock the target zone of
+ * completed write requests.
+ */
+static void dd_completed_request(struct request *rq)
+{
+
+   if (deadline_request_has_zone_wlock(rq)) {
+   struct deadline_data *dd = rq->q->elevator->elevator_data;
+
+   deadline_wunlock_zone(dd, rq);
+   }
+}
+
 static bool dd_has_work(struct blk_mq_hw_ctx *hc