Re: [PATCH 1/2] target_core_alua: Referrals infrastructure

2013-10-17 Thread Hannes Reinecke
On 10/17/2013 12:28 AM, Nicholas A. Bellinger wrote:
 On Wed, 2013-10-16 at 09:25 +0200, Hannes Reinecke wrote:
 Add infrastructure for referrals.

 Signed-off-by: Hannes Reinecke h...@suse.de
 ---
  drivers/target/target_core_alua.c | 151 
 ++
  drivers/target/target_core_alua.h |   4 +-
  drivers/target/target_core_configfs.c |  12 ++-
  drivers/target/target_core_device.c   |   2 +
  drivers/target/target_core_sbc.c  |   5 +-
  drivers/target/target_core_spc.c  |  20 +
  include/scsi/scsi.h   |   1 +
  include/target/target_core_base.h |  18 
  8 files changed, 209 insertions(+), 4 deletions(-)

 diff --git a/drivers/target/target_core_alua.c 
 b/drivers/target/target_core_alua.c
 index 166bee6..8f66146 100644
 --- a/drivers/target/target_core_alua.c
 +++ b/drivers/target/target_core_alua.c
 @@ -56,6 +56,75 @@ static LIST_HEAD(lu_gps_list);
  struct t10_alua_lu_gp *default_lu_gp;
  
  /*
 + * REPORT REFERRALS
 + *
 + * See sbc3r35 section 5.23
 + */
 +sense_reason_t
 +target_emulate_report_referrals(struct se_cmd *cmd)
 +{
 +struct se_device *dev = cmd-se_dev;
 +struct t10_alua_lba_map *map;
 +struct t10_alua_lba_map_member *map_mem;
 +unsigned char *buf;
 +u32 rd_len = 0, off;
 +
 +if (cmd-data_length  4) {
 +pr_warn(REPORT REFERRALS allocation length %u too
 + small\n, cmd-data_length);
 +return TCM_INVALID_CDB_FIELD;
 +}
 +
 +buf = transport_kmap_data_sg(cmd);
 +if (!buf)
 +return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 +
 +off = 4;
 +spin_lock(dev-t10_alua.lba_map_lock);
 +if (list_empty(dev-t10_alua.lba_map_list)) {
 +spin_unlock(dev-t10_alua.lba_map_lock);
 +transport_kunmap_data_sg(cmd);
 +
 +return TCM_UNSUPPORTED_SCSI_OPCODE;
 +}
 +
 +list_for_each_entry(map, dev-t10_alua.lba_map_list,
 +lba_map_list) {
 +int desc_num = off + 3;
 +int pg_num;
 +
 +off += 4;
 +put_unaligned_be64(map-lba_map_first_lba, buf[off]);
 +off += 8;
 +put_unaligned_be64(map-lba_map_last_lba, buf[off]);
 +off += 8;
 +rd_len += 20;
 +pg_num = 0;
 +list_for_each_entry(map_mem, map-lba_map_mem_list,
 +lba_map_mem_list) {
 +buf[off++] = map_mem-lba_map_mem_alua_state  0x0f;
 +off++;
 +buf[off++] = (map_mem-lba_map_mem_alua_pg_id  8)  
 0xff;
 +buf[off++] = (map_mem-lba_map_mem_alua_pg_id  0xff);
 +rd_len += 4;
 +pg_num++;
 +}
 +buf[desc_num] = pg_num;
 +}
 +spin_unlock(dev-t10_alua.lba_map_lock);
 +
 
 For both of these list walks, there needs to be a check against offset
 vs. -data_length to know when the available payload length has been
 exhausted..
 
Right. Will be fixing it up.

[ .. ]
 diff --git a/drivers/target/target_core_spc.c 
 b/drivers/target/target_core_spc.c
 index e39d442..282b5bb 100644
 --- a/drivers/target/target_core_spc.c
 +++ b/drivers/target/target_core_spc.c
 @@ -476,6 +476,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char 
 *buf)
  /* If WriteCache emulation is enabled, set V_SUP */
  if (spc_check_dev_wce(dev))
  buf[6] = 0x01;
 +/* If an LBA map is present set R_SUP */
 +spin_lock(cmd-se_dev-t10_alua.lba_map_lock);
 +if (!list_empty(dev-t10_alua.lba_map_list))
 +buf[8] = 0x10;
 +spin_unlock(cmd-se_dev-t10_alua.lba_map_lock);
  return 0;
  }
 
 Is there ever a case where R_SUP should be reported, but lba_map_list is
 empty..?
 
Not that I can see. If R_SUP is set it means the 'REPORT REFERRALS'
is supported. And 'REPORT REFERRALS' without a map is pretty much
pointless.

 How about a se_device attribute called 'emulate_referrals' to determine
 when to report R_SUP..?  Otherwise, perhaps using the se_lun - se_port
 - sep_alua_tg_pt_gp_mem - tg_pt_gp provided bit for
 tg_pt_gp_alua_supported_states instead..?
 
I was thinking about the very same thing, but then figured it was
easier to equal R_SUP with !list_empty(lba_map_list) instead of
having a separate flag.
Or crawling indirections just to find the very same information ...

  
 @@ -627,6 +632,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char 
 *buf)
  return 0;
  }
  
 +/* Referrals VPD page */
 +static sense_reason_t
 +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
 +{
 +struct se_device *dev = cmd-se_dev;
 +
 +buf[0] = dev-transport-get_device_type(dev);
 +buf[3] = 0x0c;
 +put_unaligned_be32(dev-t10_alua.lba_map_segment_size, buf[8]);
 +put_unaligned_be32(dev-t10_alua.lba_map_segment_size, buf[12]);
 +
 
 Typo..  Offset for byte 12 should be the lba_map_segment_multiplier..
 
Oops ...

Will be 

[PATCH 1/2] target_core_alua: Referrals infrastructure

2013-10-16 Thread Hannes Reinecke
Add infrastructure for referrals.

Signed-off-by: Hannes Reinecke h...@suse.de
---
 drivers/target/target_core_alua.c | 151 ++
 drivers/target/target_core_alua.h |   4 +-
 drivers/target/target_core_configfs.c |  12 ++-
 drivers/target/target_core_device.c   |   2 +
 drivers/target/target_core_sbc.c  |   5 +-
 drivers/target/target_core_spc.c  |  20 +
 include/scsi/scsi.h   |   1 +
 include/target/target_core_base.h |  18 
 8 files changed, 209 insertions(+), 4 deletions(-)

diff --git a/drivers/target/target_core_alua.c 
b/drivers/target/target_core_alua.c
index 166bee6..8f66146 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -56,6 +56,75 @@ static LIST_HEAD(lu_gps_list);
 struct t10_alua_lu_gp *default_lu_gp;
 
 /*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+   struct se_device *dev = cmd-se_dev;
+   struct t10_alua_lba_map *map;
+   struct t10_alua_lba_map_member *map_mem;
+   unsigned char *buf;
+   u32 rd_len = 0, off;
+
+   if (cmd-data_length  4) {
+   pr_warn(REPORT REFERRALS allocation length %u too
+small\n, cmd-data_length);
+   return TCM_INVALID_CDB_FIELD;
+   }
+
+   buf = transport_kmap_data_sg(cmd);
+   if (!buf)
+   return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+   off = 4;
+   spin_lock(dev-t10_alua.lba_map_lock);
+   if (list_empty(dev-t10_alua.lba_map_list)) {
+   spin_unlock(dev-t10_alua.lba_map_lock);
+   transport_kunmap_data_sg(cmd);
+
+   return TCM_UNSUPPORTED_SCSI_OPCODE;
+   }
+
+   list_for_each_entry(map, dev-t10_alua.lba_map_list,
+   lba_map_list) {
+   int desc_num = off + 3;
+   int pg_num;
+
+   off += 4;
+   put_unaligned_be64(map-lba_map_first_lba, buf[off]);
+   off += 8;
+   put_unaligned_be64(map-lba_map_last_lba, buf[off]);
+   off += 8;
+   rd_len += 20;
+   pg_num = 0;
+   list_for_each_entry(map_mem, map-lba_map_mem_list,
+   lba_map_mem_list) {
+   buf[off++] = map_mem-lba_map_mem_alua_state  0x0f;
+   off++;
+   buf[off++] = (map_mem-lba_map_mem_alua_pg_id  8)  
0xff;
+   buf[off++] = (map_mem-lba_map_mem_alua_pg_id  0xff);
+   rd_len += 4;
+   pg_num++;
+   }
+   buf[desc_num] = pg_num;
+   }
+   spin_unlock(dev-t10_alua.lba_map_lock);
+
+   /*
+* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+*/
+   put_unaligned_be16(rd_len, buf[2]);
+
+   transport_kunmap_data_sg(cmd);
+
+   target_complete_cmd(cmd, GOOD);
+   return 0;
+}
+
+/*
  * REPORT_TARGET_PORT_GROUPS
  *
  * See spc4r17 section 6.27
@@ -389,6 +458,80 @@ static inline int core_alua_state_nonoptimized(
return 0;
 }
 
+static inline int core_alua_state_lba_dependent(
+   struct se_cmd *cmd,
+   struct t10_alua_tg_pt_gp *tg_pt_gp,
+   u8 *alua_ascq)
+{
+   struct se_device *dev = cmd-se_dev;
+   u32 segment_size, segment_mult, sectors;
+   u64 lba;
+
+   /* Only need to check for cdb actually containing LBAs */
+   if (!cmd-se_cmd_flags  SCF_SCSI_DATA_CDB)
+   return 0;
+
+   spin_lock(dev-t10_alua.lba_map_lock);
+   segment_size = dev-t10_alua.lba_map_segment_size;
+   segment_mult = dev-t10_alua.lba_map_segment_multiplier;
+   sectors = cmd-data_length / dev-dev_attrib.block_size;
+
+   lba = cmd-t_task_lba;
+   while (lba  cmd-t_task_lba + sectors) {
+   struct t10_alua_lba_map *cur_map = NULL, *map;
+   struct t10_alua_lba_map_member *map_mem;
+
+   list_for_each_entry(map, dev-t10_alua.lba_map_list,
+   lba_map_list) {
+   u64 start_lba, last_lba;
+   u64 first_lba = map-lba_map_first_lba;
+
+   if (segment_mult) {
+   start_lba = lba % (segment_size * segment_mult);
+   last_lba = first_lba + segment_size - 1;
+   if (start_lba = first_lba 
+   start_lba = last_lba) {
+   lba += segment_size;
+   cur_map = map;
+   break;
+   }
+   } else {
+   last_lba = map-lba_map_last_lba;
+   if (lba = first_lba  lba = last_lba) {
+

Re: [PATCH 1/2] target_core_alua: Referrals infrastructure

2013-10-16 Thread Nicholas A. Bellinger
On Wed, 2013-10-16 at 09:25 +0200, Hannes Reinecke wrote:
 Add infrastructure for referrals.
 
 Signed-off-by: Hannes Reinecke h...@suse.de
 ---
  drivers/target/target_core_alua.c | 151 
 ++
  drivers/target/target_core_alua.h |   4 +-
  drivers/target/target_core_configfs.c |  12 ++-
  drivers/target/target_core_device.c   |   2 +
  drivers/target/target_core_sbc.c  |   5 +-
  drivers/target/target_core_spc.c  |  20 +
  include/scsi/scsi.h   |   1 +
  include/target/target_core_base.h |  18 
  8 files changed, 209 insertions(+), 4 deletions(-)
 
 diff --git a/drivers/target/target_core_alua.c 
 b/drivers/target/target_core_alua.c
 index 166bee6..8f66146 100644
 --- a/drivers/target/target_core_alua.c
 +++ b/drivers/target/target_core_alua.c
 @@ -56,6 +56,75 @@ static LIST_HEAD(lu_gps_list);
  struct t10_alua_lu_gp *default_lu_gp;
  
  /*
 + * REPORT REFERRALS
 + *
 + * See sbc3r35 section 5.23
 + */
 +sense_reason_t
 +target_emulate_report_referrals(struct se_cmd *cmd)
 +{
 + struct se_device *dev = cmd-se_dev;
 + struct t10_alua_lba_map *map;
 + struct t10_alua_lba_map_member *map_mem;
 + unsigned char *buf;
 + u32 rd_len = 0, off;
 +
 + if (cmd-data_length  4) {
 + pr_warn(REPORT REFERRALS allocation length %u too
 +  small\n, cmd-data_length);
 + return TCM_INVALID_CDB_FIELD;
 + }
 +
 + buf = transport_kmap_data_sg(cmd);
 + if (!buf)
 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 +
 + off = 4;
 + spin_lock(dev-t10_alua.lba_map_lock);
 + if (list_empty(dev-t10_alua.lba_map_list)) {
 + spin_unlock(dev-t10_alua.lba_map_lock);
 + transport_kunmap_data_sg(cmd);
 +
 + return TCM_UNSUPPORTED_SCSI_OPCODE;
 + }
 +
 + list_for_each_entry(map, dev-t10_alua.lba_map_list,
 + lba_map_list) {
 + int desc_num = off + 3;
 + int pg_num;
 +
 + off += 4;
 + put_unaligned_be64(map-lba_map_first_lba, buf[off]);
 + off += 8;
 + put_unaligned_be64(map-lba_map_last_lba, buf[off]);
 + off += 8;
 + rd_len += 20;
 + pg_num = 0;
 + list_for_each_entry(map_mem, map-lba_map_mem_list,
 + lba_map_mem_list) {
 + buf[off++] = map_mem-lba_map_mem_alua_state  0x0f;
 + off++;
 + buf[off++] = (map_mem-lba_map_mem_alua_pg_id  8)  
 0xff;
 + buf[off++] = (map_mem-lba_map_mem_alua_pg_id  0xff);
 + rd_len += 4;
 + pg_num++;
 + }
 + buf[desc_num] = pg_num;
 + }
 + spin_unlock(dev-t10_alua.lba_map_lock);
 +

For both of these list walks, there needs to be a check against offset
vs. -data_length to know when the available payload length has been
exhausted..

 + /*
 +  * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 +  */
 + put_unaligned_be16(rd_len, buf[2]);
 +
 + transport_kunmap_data_sg(cmd);
 +
 + target_complete_cmd(cmd, GOOD);
 + return 0;
 +}
 +
 +/*
   * REPORT_TARGET_PORT_GROUPS
   *
   * See spc4r17 section 6.27
 @@ -389,6 +458,80 @@ static inline int core_alua_state_nonoptimized(
   return 0;
  }
  
 +static inline int core_alua_state_lba_dependent(
 + struct se_cmd *cmd,
 + struct t10_alua_tg_pt_gp *tg_pt_gp,
 + u8 *alua_ascq)
 +{
 + struct se_device *dev = cmd-se_dev;
 + u32 segment_size, segment_mult, sectors;
 + u64 lba;
 +
 + /* Only need to check for cdb actually containing LBAs */
 + if (!cmd-se_cmd_flags  SCF_SCSI_DATA_CDB)
 + return 0;
 +
 + spin_lock(dev-t10_alua.lba_map_lock);
 + segment_size = dev-t10_alua.lba_map_segment_size;
 + segment_mult = dev-t10_alua.lba_map_segment_multiplier;
 + sectors = cmd-data_length / dev-dev_attrib.block_size;
 +
 + lba = cmd-t_task_lba;
 + while (lba  cmd-t_task_lba + sectors) {
 + struct t10_alua_lba_map *cur_map = NULL, *map;
 + struct t10_alua_lba_map_member *map_mem;
 +
 + list_for_each_entry(map, dev-t10_alua.lba_map_list,
 + lba_map_list) {
 + u64 start_lba, last_lba;
 + u64 first_lba = map-lba_map_first_lba;
 +
 + if (segment_mult) {
 + start_lba = lba % (segment_size * segment_mult);
 + last_lba = first_lba + segment_size - 1;
 + if (start_lba = first_lba 
 + start_lba = last_lba) {
 + lba += segment_size;
 + cur_map = map;
 + break;
 + }
 +