Re: [PATCH v2] xen/scsifront: don't request a slot on the ring until request is ready

2016-12-09 Thread Juergen Gross
On 02/12/16 07:15, Juergen Gross wrote:
> Instead of requesting a new slot on the ring to the backend early, do
> so only after all has been setup for the request to be sent. This
> makes error handling easier as we don't need to undo the request id
> allocation and ring slot allocation.
> 
> Suggested-by: Jan Beulich 
> Signed-off-by: Juergen Gross 


Commited to xen/tip.git for-linus-4.10


Juergen
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2] xen/scsifront: don't request a slot on the ring until request is ready

2016-12-08 Thread Boris Ostrovsky



On 12/02/2016 01:15 AM, Juergen Gross wrote:

Instead of requesting a new slot on the ring to the backend early, do
so only after all has been setup for the request to be sent. This
makes error handling easier as we don't need to undo the request id
allocation and ring slot allocation.

Suggested-by: Jan Beulich 
Signed-off-by: Juergen Gross 


Reviewed-by: Boris Ostrovsky 

(although it would be good to have someone familiar with this code look 
at this as well).

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2] xen/scsifront: don't request a slot on the ring until request is ready

2016-12-05 Thread Juergen Gross
On 05/12/16 16:32, Boris Ostrovsky wrote:
> On 12/02/2016 01:15 AM, Juergen Gross wrote:
>>  
>> -static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info 
>> *info)
>> +static int scsifront_do_request(struct vscsifrnt_info *info,
>> +struct vscsifrnt_shadow *shadow)
>>  {
>>  struct vscsiif_front_ring *ring = &(info->ring);
>>  struct vscsiif_request *ring_req;
>> +struct scsi_cmnd *sc = shadow->sc;
>>  uint32_t id;
>> +int i, notify;
>> +
>> +if (RING_FULL(>ring))
>> +return -EBUSY;
>>  
>>  id = scsifront_get_rqid(info);  /* use id in response */
>>  if (id >= VSCSIIF_MAX_REQS)
>> -return NULL;
>> +return -EBUSY;
>> +
>> +info->shadow[id] = shadow;
>> +shadow->rqid = id;
>>  
>>  ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
>> -
>>  ring->req_prod_pvt++;
>>  
>> -ring_req->rqid = (uint16_t)id;
>> +ring_req->rqid= id;
>> +ring_req->act = shadow->act;
>> +ring_req->ref_rqid= shadow->ref_rqid;
>> +ring_req->nr_segments = shadow->nr_segments;
> 
> Shouldn't req_prod_pvt be incremented after you've copied everything? (I
> realize that there is not error until everything has been copied but still.)

That doesn't really matter as it is private.

>> @@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info 
>> *info,
>>  }
>>  
>>  if (seg_grants)
>> -ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
>> +shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
> 
> Are we guaranteed that seg_grants is not going to have VSCSIIF_SG_GRANT
> bit set?

Absolutely, yes. Can't be larger than VSCSIIF_SG_TABLESIZE which is 26.


Juergen
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v2] xen/scsifront: don't request a slot on the ring until request is ready

2016-12-05 Thread Boris Ostrovsky
On 12/02/2016 01:15 AM, Juergen Gross wrote:
>  
> -static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
> +static int scsifront_do_request(struct vscsifrnt_info *info,
> + struct vscsifrnt_shadow *shadow)
>  {
>   struct vscsiif_front_ring *ring = &(info->ring);
>   struct vscsiif_request *ring_req;
> + struct scsi_cmnd *sc = shadow->sc;
>   uint32_t id;
> + int i, notify;
> +
> + if (RING_FULL(>ring))
> + return -EBUSY;
>  
>   id = scsifront_get_rqid(info);  /* use id in response */
>   if (id >= VSCSIIF_MAX_REQS)
> - return NULL;
> + return -EBUSY;
> +
> + info->shadow[id] = shadow;
> + shadow->rqid = id;
>  
>   ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
> -
>   ring->req_prod_pvt++;
>  
> - ring_req->rqid = (uint16_t)id;
> + ring_req->rqid= id;
> + ring_req->act = shadow->act;
> + ring_req->ref_rqid= shadow->ref_rqid;
> + ring_req->nr_segments = shadow->nr_segments;

Shouldn't req_prod_pvt be incremented after you've copied everything? (I
realize that there is not error until everything has been copied but still.)



> @@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info 
> *info,
>   }
>  
>   if (seg_grants)
> - ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
> + shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;

Are we guaranteed that seg_grants is not going to have VSCSIIF_SG_GRANT
bit set?

-boris



--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2] xen/scsifront: don't request a slot on the ring until request is ready

2016-12-01 Thread Juergen Gross
Instead of requesting a new slot on the ring to the backend early, do
so only after all has been setup for the request to be sent. This
makes error handling easier as we don't need to undo the request id
allocation and ring slot allocation.

Suggested-by: Jan Beulich 
Signed-off-by: Juergen Gross 
---
Aargh! Need more coffee! Resend with corrected mail address for Martin Petersen
---
 drivers/scsi/xen-scsifront.c | 190 +++
 1 file changed, 84 insertions(+), 106 deletions(-)

diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index c01316c..9aa1fe1 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
 struct vscsifrnt_shadow {
/* command between backend and frontend */
unsigned char act;
+   uint8_t nr_segments;
uint16_t rqid;
+   uint16_t ref_rqid;
 
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg;  /* scatter/gather elements */
+   struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
 
/* Do reset or abort function. */
wait_queue_head_t wq_reset; /* reset work queue   */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info 
*info, uint32_t id)
scsifront_wake_up(info);
 }
 
-static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+static int scsifront_do_request(struct vscsifrnt_info *info,
+   struct vscsifrnt_shadow *shadow)
 {
struct vscsiif_front_ring *ring = &(info->ring);
struct vscsiif_request *ring_req;
+   struct scsi_cmnd *sc = shadow->sc;
uint32_t id;
+   int i, notify;
+
+   if (RING_FULL(>ring))
+   return -EBUSY;
 
id = scsifront_get_rqid(info);  /* use id in response */
if (id >= VSCSIIF_MAX_REQS)
-   return NULL;
+   return -EBUSY;
+
+   info->shadow[id] = shadow;
+   shadow->rqid = id;
 
ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
-
ring->req_prod_pvt++;
 
-   ring_req->rqid = (uint16_t)id;
+   ring_req->rqid= id;
+   ring_req->act = shadow->act;
+   ring_req->ref_rqid= shadow->ref_rqid;
+   ring_req->nr_segments = shadow->nr_segments;
 
-   return ring_req;
-}
+   ring_req->id  = sc->device->id;
+   ring_req->lun = sc->device->lun;
+   ring_req->channel = sc->device->channel;
+   ring_req->cmd_len = sc->cmd_len;
 
-static void scsifront_do_request(struct vscsifrnt_info *info)
-{
-   struct vscsiif_front_ring *ring = &(info->ring);
-   int notify;
+   BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+   memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+   ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
+   ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+   for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
+   ring_req->seg[i] = shadow->seg[i];
 
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
+
+   return 0;
 }
 
-static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+static void scsifront_gnttab_done(struct vscsifrnt_info *info,
+ struct vscsifrnt_shadow *shadow)
 {
-   struct vscsifrnt_shadow *s = info->shadow[id];
int i;
 
-   if (s->sc->sc_data_direction == DMA_NONE)
+   if (shadow->sc->sc_data_direction == DMA_NONE)
return;
 
-   for (i = 0; i < s->nr_grants; i++) {
-   if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+   for (i = 0; i < shadow->nr_grants; i++) {
+   if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
 "grant still in use by backend\n");
BUG();
}
-   gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+   gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
 
-   kfree(s->sg);
+   kfree(shadow->sg);
 }
 
 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
   struct vscsiif_response *ring_rsp)
 {
+   struct vscsifrnt_shadow *shadow;
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
 
id = ring_rsp->rqid;
-   sc = info->shadow[id]->sc;
+   shadow = info->shadow[id];
+   sc = shadow->sc;
 
BUG_ON(sc == NULL);
 
-   scsifront_gnttab_done(info, id);
+   scsifront_gnttab_done(info, shadow);
scsifront_put_rqid(info, id);
 
sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void 

[PATCH v2] xen/scsifront: don't request a slot on the ring until request is ready

2016-12-01 Thread Juergen Gross
Instead of requesting a new slot on the ring to the backend early, do
so only after all has been setup for the request to be sent. This
makes error handling easier as we don't need to undo the request id
allocation and ring slot allocation.

Suggested-by: Jan Beulich 
Signed-off-by: Juergen Gross 
---
Resend with corrected mail address for Martin Peter
---
 drivers/scsi/xen-scsifront.c | 190 +++
 1 file changed, 84 insertions(+), 106 deletions(-)

diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index c01316c..9aa1fe1 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
 struct vscsifrnt_shadow {
/* command between backend and frontend */
unsigned char act;
+   uint8_t nr_segments;
uint16_t rqid;
+   uint16_t ref_rqid;
 
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg;  /* scatter/gather elements */
+   struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
 
/* Do reset or abort function. */
wait_queue_head_t wq_reset; /* reset work queue   */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info 
*info, uint32_t id)
scsifront_wake_up(info);
 }
 
-static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+static int scsifront_do_request(struct vscsifrnt_info *info,
+   struct vscsifrnt_shadow *shadow)
 {
struct vscsiif_front_ring *ring = &(info->ring);
struct vscsiif_request *ring_req;
+   struct scsi_cmnd *sc = shadow->sc;
uint32_t id;
+   int i, notify;
+
+   if (RING_FULL(>ring))
+   return -EBUSY;
 
id = scsifront_get_rqid(info);  /* use id in response */
if (id >= VSCSIIF_MAX_REQS)
-   return NULL;
+   return -EBUSY;
+
+   info->shadow[id] = shadow;
+   shadow->rqid = id;
 
ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
-
ring->req_prod_pvt++;
 
-   ring_req->rqid = (uint16_t)id;
+   ring_req->rqid= id;
+   ring_req->act = shadow->act;
+   ring_req->ref_rqid= shadow->ref_rqid;
+   ring_req->nr_segments = shadow->nr_segments;
 
-   return ring_req;
-}
+   ring_req->id  = sc->device->id;
+   ring_req->lun = sc->device->lun;
+   ring_req->channel = sc->device->channel;
+   ring_req->cmd_len = sc->cmd_len;
 
-static void scsifront_do_request(struct vscsifrnt_info *info)
-{
-   struct vscsiif_front_ring *ring = &(info->ring);
-   int notify;
+   BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+   memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+   ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
+   ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+   for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
+   ring_req->seg[i] = shadow->seg[i];
 
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
+
+   return 0;
 }
 
-static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+static void scsifront_gnttab_done(struct vscsifrnt_info *info,
+ struct vscsifrnt_shadow *shadow)
 {
-   struct vscsifrnt_shadow *s = info->shadow[id];
int i;
 
-   if (s->sc->sc_data_direction == DMA_NONE)
+   if (shadow->sc->sc_data_direction == DMA_NONE)
return;
 
-   for (i = 0; i < s->nr_grants; i++) {
-   if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+   for (i = 0; i < shadow->nr_grants; i++) {
+   if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
 "grant still in use by backend\n");
BUG();
}
-   gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+   gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
 
-   kfree(s->sg);
+   kfree(shadow->sg);
 }
 
 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
   struct vscsiif_response *ring_rsp)
 {
+   struct vscsifrnt_shadow *shadow;
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
 
id = ring_rsp->rqid;
-   sc = info->shadow[id]->sc;
+   shadow = info->shadow[id];
+   sc = shadow->sc;
 
BUG_ON(sc == NULL);
 
-   scsifront_gnttab_done(info, id);
+   scsifront_gnttab_done(info, shadow);
scsifront_put_rqid(info, id);
 
sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct 

[PATCH v2] xen/scsifront: don't request a slot on the ring until request is ready

2016-12-01 Thread Juergen Gross
Instead of requesting a new slot on the ring to the backend early, do
so only after all has been setup for the request to be sent. This
makes error handling easier as we don't need to undo the request id
allocation and ring slot allocation.

Suggested-by: Jan Beulich 
Signed-off-by: Juergen Gross 
---
 drivers/scsi/xen-scsifront.c | 190 +++
 1 file changed, 84 insertions(+), 106 deletions(-)

diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index c01316c..9aa1fe1 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
 struct vscsifrnt_shadow {
/* command between backend and frontend */
unsigned char act;
+   uint8_t nr_segments;
uint16_t rqid;
+   uint16_t ref_rqid;
 
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg;  /* scatter/gather elements */
+   struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
 
/* Do reset or abort function. */
wait_queue_head_t wq_reset; /* reset work queue   */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info 
*info, uint32_t id)
scsifront_wake_up(info);
 }
 
-static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+static int scsifront_do_request(struct vscsifrnt_info *info,
+   struct vscsifrnt_shadow *shadow)
 {
struct vscsiif_front_ring *ring = &(info->ring);
struct vscsiif_request *ring_req;
+   struct scsi_cmnd *sc = shadow->sc;
uint32_t id;
+   int i, notify;
+
+   if (RING_FULL(>ring))
+   return -EBUSY;
 
id = scsifront_get_rqid(info);  /* use id in response */
if (id >= VSCSIIF_MAX_REQS)
-   return NULL;
+   return -EBUSY;
+
+   info->shadow[id] = shadow;
+   shadow->rqid = id;
 
ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
-
ring->req_prod_pvt++;
 
-   ring_req->rqid = (uint16_t)id;
+   ring_req->rqid= id;
+   ring_req->act = shadow->act;
+   ring_req->ref_rqid= shadow->ref_rqid;
+   ring_req->nr_segments = shadow->nr_segments;
 
-   return ring_req;
-}
+   ring_req->id  = sc->device->id;
+   ring_req->lun = sc->device->lun;
+   ring_req->channel = sc->device->channel;
+   ring_req->cmd_len = sc->cmd_len;
 
-static void scsifront_do_request(struct vscsifrnt_info *info)
-{
-   struct vscsiif_front_ring *ring = &(info->ring);
-   int notify;
+   BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+   memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+   ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
+   ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+   for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
+   ring_req->seg[i] = shadow->seg[i];
 
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
+
+   return 0;
 }
 
-static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+static void scsifront_gnttab_done(struct vscsifrnt_info *info,
+ struct vscsifrnt_shadow *shadow)
 {
-   struct vscsifrnt_shadow *s = info->shadow[id];
int i;
 
-   if (s->sc->sc_data_direction == DMA_NONE)
+   if (shadow->sc->sc_data_direction == DMA_NONE)
return;
 
-   for (i = 0; i < s->nr_grants; i++) {
-   if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+   for (i = 0; i < shadow->nr_grants; i++) {
+   if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
 "grant still in use by backend\n");
BUG();
}
-   gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+   gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
 
-   kfree(s->sg);
+   kfree(shadow->sg);
 }
 
 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
   struct vscsiif_response *ring_rsp)
 {
+   struct vscsifrnt_shadow *shadow;
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
 
id = ring_rsp->rqid;
-   sc = info->shadow[id]->sc;
+   shadow = info->shadow[id];
+   sc = shadow->sc;
 
BUG_ON(sc == NULL);
 
-   scsifront_gnttab_done(info, id);
+   scsifront_gnttab_done(info, shadow);
scsifront_put_rqid(info, id);
 
sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info 
*info)
 
 static int map_data_for_request(struct