Replace sg_get_rq_mark() with sg_find_srp_by_id() and
sg_get_ready_srp(). Add sg_chk_mmap() to check flags and
reserve buffer available for mmap() based requests. Add
sg_copy_sense() and sg_rec_v3_state() which is just
refactoring. Add sg_calc_rq_dur() and sg_get_dur() in
preparation for optional nanosecond duration timing.

Signed-off-by: Douglas Gilbert <dgilb...@interlog.com>
---
 drivers/scsi/sg.c | 293 +++++++++++++++++++++++++++++++---------------
 1 file changed, 200 insertions(+), 93 deletions(-)

diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4e6f6fb2a54e..7f3a4b937a5a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -158,16 +158,20 @@ struct sg_fd;
 
 struct sg_request {    /* SG_MAX_QUEUE requests outstanding per file */
        struct list_head entry; /* list entry */
-       struct sg_fd *parentfp; /* NULL -> not in use */
        struct sg_scatter_hold data;    /* hold buffer, perhaps scatter list */
        struct sg_io_hdr header;  /* scsi command+info, see <scsi/sg.h> */
        u8 sense_b[SCSI_SENSE_BUFFERSIZE];
+       u32 duration;           /* cmd duration in milliseconds */
        char res_used;          /* 1 -> using reserve buffer, 0 -> not ... */
        char orphan;            /* 1 -> drop on sight, 0 -> normal */
        char sg_io_owned;       /* 1 -> packet belongs to SG_IO */
        /* done protected by rq_list_lock */
        char done;              /* 0->before bh, 1->before read, 2->read */
        atomic_t rq_st;         /* request state, holds a enum sg_rq_state */
+       u8 cmd_opcode;          /* first byte of SCSI cdb */
+       u64 start_ns;           /* starting point of command duration calc */
+       unsigned long frq_bm[1];        /* see SG_FRQ_* defines above */
+       struct sg_fd *parentfp; /* pointer to owning fd, even when on fl */
        struct request *rq;     /* released in sg_rq_end_io(), bio kept */
        struct bio *bio;        /* kept until this req -->SG_RS_INACTIVE */
        struct execute_work ew_orph;    /* harvest orphan request */
@@ -233,7 +237,7 @@ static ssize_t sg_submit(struct sg_fd *sfp, struct file 
*filp,
                         const char __user *buf, size_t count, bool blocking,
                         bool read_only, bool sg_io_owned,
                         struct sg_request **o_srp);
-static int sg_common_write(struct sg_fd *sfp, struct sg_comm_wr_t *cwp);
+static int sg_common_write(struct sg_fd *sfp, struct sg_comm_wr_t *cwrp);
 static int sg_rd_append(struct sg_request *srp, void __user *outp,
                        int num_xfer);
 static void sg_remove_sgat(struct sg_fd *sfp, struct sg_scatter_hold *schp);
@@ -243,6 +247,7 @@ static void sg_link_reserve(struct sg_fd *sfp, struct 
sg_request *srp,
 static void sg_unlink_reserve(struct sg_fd *sfp, struct sg_request *srp);
 static struct sg_fd *sg_add_sfp(struct sg_device *sdp);
 static void sg_remove_sfp(struct kref *);
+static struct sg_request *sg_find_srp_by_id(struct sg_fd *sfp, int pack_id);
 static struct sg_request *sg_add_request(struct sg_fd *sfp);
 static void sg_deact_request(struct sg_fd *sfp, struct sg_request *srp);
 static struct sg_device *sg_get_dev(int dev);
@@ -454,7 +459,8 @@ sg_open(struct inode *inode, struct file *filp)
        sfp = sg_add_sfp(sdp);
        if (IS_ERR(sfp)) {
                res = PTR_ERR(sfp);
-               goto out_undo; }
+               goto out_undo;
+       }
 
        filp->private_data = sfp;
        atomic_inc(&sdp->open_cnt);
@@ -522,7 +528,6 @@ sg_release(struct inode *inode, struct file *filp)
 static ssize_t
 sg_write(struct file *filp, const char __user *p, size_t count, loff_t *ppos)
 {
-       bool blocking = !(filp->f_flags & O_NONBLOCK);
        int mxsize, cmd_size, input_size, res;
        u8 opcode;
        struct sg_device *sdp;
@@ -619,21 +624,19 @@ sg_write(struct file *filp, const char __user *p, size_t 
count, loff_t *ppos)
                return -EFAULT;
        /*
         * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
-        * but is is possible that the app intended SG_DXFER_TO_DEV, because 
there
-        * is a non-zero input_size, so emit a warning.
+        * but it is possible that the app intended SG_DXFER_TO_DEV, because
+        * there is a non-zero input_size, so emit a warning.
         */
        if (h3p->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
-               printk_ratelimited(KERN_WARNING
-                                  "sg_write: data in/out %d/%d bytes "
-                                  "for SCSI command 0x%x-- guessing "
-                                  "data in;\n   program %s not setting "
-                                  "count and/or reply_len properly\n",
-                                  ohp->reply_len - (int)SZ_SG_HEADER,
-                                  input_size, (unsigned int) cmnd[0],
-                                  current->comm);
+               printk_ratelimited
+                       (KERN_WARNING
+                        "%s: data in/out %d/%d bytes for SCSI command 0x%x-- 
guessing data in;\n"
+                        "   program %s not setting count and/or reply_len 
properly\n",
+                        __func__, ohp->reply_len - (int)SZ_SG_HEADER,
+                        input_size, (unsigned int)cmnd[0], current->comm);
        }
        cwr.timeout = sfp->timeout;
-       cwr.blocking = blocking;
+       cwr.blocking = !(filp->f_flags & O_NONBLOCK);
        cwr.srp = srp;
        cwr.cmnd = cmnd;
        res = sg_common_write(sfp, &cwr);
@@ -661,6 +664,18 @@ sg_fetch_cmnd(struct file *filp, struct sg_fd *sfp, const 
u8 __user *u_cdbp,
        return 0;
 }
 
+static inline int
+sg_chk_mmap(struct sg_fd *sfp, int rq_flags, int len)
+{
+       if (len > sfp->reserve.buflen)
+               return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
+       if (rq_flags & SG_FLAG_DIRECT_IO)
+               return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
+       if (sfp->res_in_use)
+               return -EBUSY;  /* reserve buffer already being used */
+       return 0;
+}
+
 static ssize_t
 sg_submit(struct sg_fd *sfp, struct file *filp, const char __user *buf,
          size_t count, bool blocking, bool read_only, bool sg_io_owned,
@@ -692,17 +707,10 @@ sg_submit(struct sg_fd *sfp, struct file *filp, const 
char __user *buf,
                return -ENOSYS;
        }
        if (hp->flags & SG_FLAG_MMAP_IO) {
-               if (hp->dxfer_len > sfp->reserve.buflen) {
-                       sg_deact_request(sfp, srp);
-                       return -ENOMEM; /* MMAP_IO size must fit in reserve 
buffer */
-               }
-               if (hp->flags & SG_FLAG_DIRECT_IO) {
+               res = sg_chk_mmap(sfp, hp->flags, hp->dxfer_len);
+               if (res) {
                        sg_deact_request(sfp, srp);
-                       return -EINVAL; /* either MMAP_IO or DIRECT_IO (not 
both) */
-               }
-               if (sfp->res_in_use) {
-                       sg_deact_request(sfp, srp);
-                       return -EBUSY;  /* reserve buffer already being used */
+                       return res;
                }
        }
        ul_timeout = msecs_to_jiffies(srp->header.timeout);
@@ -724,6 +732,12 @@ sg_submit(struct sg_fd *sfp, struct file *filp, const char 
__user *buf,
        return count;
 }
 
+/*
+ * All writes and submits converge on this function to launch the SCSI
+ * command/request (via blk_execute_rq_nowait). Returns a pointer to a
+ * sg_request object holding the request just issued or a negated errno
+ * value twisted by ERR_PTR.
+ */
 static int
 sg_common_write(struct sg_fd *sfp, struct sg_comm_wr_t *cwrp)
 {
@@ -802,37 +816,45 @@ sg_rstate_chg(struct sg_request *srp, enum sg_rq_state 
old_st,
  * sg_ctl_ioreceive(). wait_event_interruptible will return if this one
  * returns true (or an event like a signal (e.g. control-C) occurs).
  */
-
-static struct sg_request *
-sg_get_rq_mark(struct sg_fd *sfp, int pack_id)
+static inline bool
+sg_get_ready_srp(struct sg_fd *sfp, struct sg_request **srpp, int pack_id)
 {
-       struct sg_request *resp;
-       unsigned long iflags;
+       struct sg_request *srp;
 
-       spin_lock_irqsave(&sfp->rq_list_lock, iflags);
-       list_for_each_entry(resp, &sfp->rq_list, entry) {
-               /* look for requests that are ready + not SG_IO owned */
-               if ((resp->done == 1) && (!resp->sg_io_owned) &&
-                   ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
-                       resp->done = 2; /* guard against other readers */
-                       spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-                       return resp;
-               }
+       if (unlikely(SG_IS_DETACHING(sfp->parentdp))) {
+               *srpp = NULL;
+               return true;
        }
-       spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-       return NULL;
+       srp = sg_find_srp_by_id(sfp, pack_id);
+       *srpp = srp;
+       return !!srp;
 }
 
+/*
+ * Returns number of bytes copied to user space provided sense buffer or
+ * negated errno value.
+ */
 static int
-srp_done(struct sg_fd *sfp, struct sg_request *srp)
+sg_copy_sense(struct sg_request *srp)
 {
-       unsigned long flags;
-       int ret;
+       int len = 0;
+       struct sg_io_hdr *hp = &srp->header;
 
-       spin_lock_irqsave(&sfp->rq_list_lock, flags);
-       ret = srp->done;
-       spin_unlock_irqrestore(&sfp->rq_list_lock, flags);
-       return ret;
+       /* If need be, copy the sense buffer to the user space */
+       if ((CHECK_CONDITION & hp->masked_status) ||
+           (DRIVER_SENSE & hp->driver_status)) {
+               int sb_len = SCSI_SENSE_BUFFERSIZE;
+
+               sb_len = (hp->mx_sb_len > sb_len) ? sb_len :
+                                                   hp->mx_sb_len;
+               /* Additional sense length field */
+               len = 8 + (int)srp->sense_b[7];
+               len = (len > sb_len) ? sb_len : len;
+               if (copy_to_user(hp->sbp, srp->sense_b, len))
+                       return -EFAULT;
+               hp->sb_len_wr = len;
+       }
+       return len;
 }
 
 #if IS_ENABLED(CONFIG_SCSI_LOGGING)
@@ -861,37 +883,32 @@ sg_rep_rq_state_fail(struct sg_fd *sfp, enum sg_rq_state 
exp_old_st,
 }
 #endif
 
-static ssize_t
-sg_v3_receive(struct sg_fd *sfp, char __user *buf, struct sg_request *srp)
+static int
+sg_rec_v3_state(struct sg_fd *sfp, struct sg_request *srp)
 {
-       struct sg_io_hdr *hp = &srp->header;
        int err = 0;
-       int len;
-
-       hp->sb_len_wr = 0;
-       if ((hp->mx_sb_len > 0) && hp->sbp) {
-               if ((CHECK_CONDITION & hp->masked_status) ||
-                   (DRIVER_SENSE & hp->driver_status)) {
-                       int sb_len = SCSI_SENSE_BUFFERSIZE;
-
-                       sb_len = (hp->mx_sb_len > sb_len) ? sb_len :
-                                                           hp->mx_sb_len;
-                       /* Additional sense length field */
-                       len = 8 + (int) srp->sense_b[7];
-                       len = (len > sb_len) ? sb_len : len;
-                       if (copy_to_user(hp->sbp, srp->sense_b, len)) {
-                               err = -EFAULT;
-                               goto err_out;
-                       }
-                       hp->sb_len_wr = len;
-               }
-       }
+       int sb_len_wr;
+
+       sb_len_wr = sg_copy_sense(srp);
+       if (sb_len_wr < 0)
+               return sb_len_wr;
+       if (unlikely(SG_IS_DETACHING(sfp->parentdp)))
+               err = -ENODEV;
+       return err;
+}
+
+static int
+sg_v3_receive(struct sg_fd *sfp, struct sg_request *srp, void __user *p)
+{
+       int err = 0;
+       struct sg_io_hdr *hp = &srp->header;
+
+       SG_LOG(3, sfp, "%s: srp=0x%p\n", __func__, srp);
+       err = sg_rec_v3_state(sfp, srp);
        if (hp->masked_status || hp->host_status || hp->driver_status)
                hp->info |= SG_INFO_CHECK;
-       if (copy_to_user(buf, hp, SZ_SG_IO_HDR))
+       if (copy_to_user(p, hp, SZ_SG_IO_HDR))
                err = -EFAULT;
-
-err_out:
        sg_finish_scsi_blk_rq(srp);
        sg_deact_request(sfp, srp);
        return err ? err : 0;
@@ -970,16 +987,22 @@ sg_rd_v1v2(void __user *buf, int count, struct sg_fd *sfp,
        return res;
 }
 
+/*
+ * This is the read(2) system call entry point (see sg_fops) for this driver.
+ * Accepts v1, v2 or v3 type headers (not v4). Returns count or negated
+ * errno; if count is 0 then v3: returns -EINVAL; v1+v2: 0 when no other
+ * error detected or -EIO.
+ */
 static ssize_t
 sg_read(struct file *filp, char __user *p, size_t count, loff_t *ppos)
 {
        bool could_be_v3;
        bool non_block = !!(filp->f_flags & O_NONBLOCK);
-       int want_id = -1;
+       int want_id = SG_PACK_ID_WILDCARD;
        int hlen, ret;
-       struct sg_device *sdp;
+       struct sg_device *sdp = NULL;
        struct sg_fd *sfp;
-       struct sg_request *srp;
+       struct sg_request *srp = NULL;
        struct sg_header *h2p = NULL;
        struct sg_io_hdr a_sg_io_hdr;
 
@@ -994,7 +1017,7 @@ sg_read(struct file *filp, char __user *p, size_t count, 
loff_t *ppos)
        sfp = filp->private_data;
        sdp = sfp->parentdp;
        SG_LOG(3, sfp, "%s: read() count=%d\n", __func__, (int)count);
-       ret = sg_allow_if_err_recovery(sdp, false);
+       ret = sg_allow_if_err_recovery(sdp, non_block);
        if (ret)
                return ret;
 
@@ -1013,17 +1036,13 @@ sg_read(struct file *filp, char __user *p, size_t 
count, loff_t *ppos)
                if (h2p->reply_len < 0 && could_be_v3) {
                        struct sg_io_hdr *v3_hdr = (struct sg_io_hdr *)h2p;
 
-                       if (likely(v3_hdr->interface_id == 'S')) {
+                       if (v3_hdr->interface_id == 'S') {
                                struct sg_io_hdr __user *h3_up;
 
                                h3_up = (struct sg_io_hdr __user *)p;
                                ret = get_user(want_id, &h3_up->pack_id);
-                               if (unlikely(ret))
+                               if (ret)
                                        return ret;
-                       } else if (v3_hdr->interface_id == 'Q') {
-                               pr_info_once("sg: %s: v4 interface%s here\n",
-                                            __func__, " disallowed");
-                               return -EPERM;
                        } else {
                                return -EPERM;
                        }
@@ -1031,27 +1050,27 @@ sg_read(struct file *filp, char __user *p, size_t 
count, loff_t *ppos)
                        want_id = h2p->pack_id;
                }
        }
-       srp = sg_get_rq_mark(sfp, want_id);
-       if (!srp) {             /* now wait on packet to arrive */
+       srp = sg_find_srp_by_id(sfp, want_id);
+       if (!srp) {     /* nothing available so wait on packet to arrive or */
                if (SG_IS_DETACHING(sdp))
                        return -ENODEV;
                if (non_block) /* O_NONBLOCK or v3::flags & SGV4_FLAG_IMMED */
                        return -EAGAIN;
-               ret = wait_event_interruptible
-                               (sfp->read_wait,
-                                (SG_IS_DETACHING(sdp) ||
-                                 (srp = sg_get_rq_mark(sfp, want_id))));
+               ret = wait_event_interruptible(sfp->read_wait,
+                                              sg_get_ready_srp(sfp, &srp,
+                                                               want_id));
                if (SG_IS_DETACHING(sdp))
                        return -ENODEV;
                if (ret)        /* -ERESTARTSYS as signal hit process */
                        return ret;
+               /* otherwise srp should be valid */
        }
        if (srp->header.interface_id == '\0') {
                ret = sg_rd_v1v2(p, (int)count, sfp, srp);
        } else {
                if ((int)count < SZ_SG_IO_HDR)
                        return -EINVAL;
-               ret = sg_v3_receive(sfp, p, srp);
+               ret = sg_v3_receive(sfp, srp, p);
        }
        if (ret < 0)
                SG_LOG(1, sfp, "%s: negated errno: %d\n", __func__, ret);
@@ -1101,6 +1120,52 @@ sg_calc_sgat_param(struct sg_device *sdp)
        sdp->max_sgat_sz = sz;
 }
 
+static u32
+sg_calc_rq_dur(const struct sg_request *srp)
+{
+       ktime_t ts0 = srp->start_ns;
+       ktime_t now_ts;
+       s64 diff;
+
+       if (ts0 == 0)
+               return 0;
+       if (unlikely(ts0 == U64_MAX))   /* _prior_ to issuing req */
+               return 999999999;       /* eye catching */
+       now_ts = ktime_get_boottime_ns();
+       if (unlikely(ts0 > now_ts))
+               return 999999998;
+       /* unlikely req duration will exceed 2**32 milliseconds */
+       diff = ktime_ms_delta(now_ts, ts0);
+       return (diff > (s64)U32_MAX) ? 3999999999U : (u32)diff;
+}
+
+/* Return of U32_MAX means srp is inactive or in slave waiting state */
+static u32
+sg_get_dur(struct sg_request *srp, const enum sg_rq_state *sr_stp,
+          bool *is_durp)
+{
+       bool is_dur = false;
+       u32 res = U32_MAX;
+
+       switch (sr_stp ? *sr_stp : atomic_read(&srp->rq_st)) {
+       case SG_RS_INFLIGHT:
+       case SG_RS_BUSY:
+               res = sg_calc_rq_dur(srp);
+               break;
+       case SG_RS_AWAIT_RD:
+       case SG_RS_DONE_RD:
+               res = srp->duration;
+               is_dur = true;  /* completion has occurred, timing finished */
+               break;
+       case SG_RS_INACTIVE:
+       default:
+               break;
+       }
+       if (is_durp)
+               *is_durp = is_dur;
+       return res;
+}
+
 static void
 sg_fill_request_table(struct sg_fd *sfp, struct sg_req_info *rinfo)
 {
@@ -1117,6 +1182,7 @@ sg_fill_request_table(struct sg_fd *sfp, struct 
sg_req_info *rinfo)
                        srp->header.masked_status &
                        srp->header.host_status &
                        srp->header.driver_status;
+               rinfo[val].duration = sg_get_dur(srp, NULL, NULL); /* dummy */
                if (srp->done)
                        rinfo[val].duration =
                                srp->header.duration;
@@ -1134,6 +1200,18 @@ sg_fill_request_table(struct sg_fd *sfp, struct 
sg_req_info *rinfo)
        }
 }
 
+static int
+srp_done(struct sg_fd *sfp, struct sg_request *srp)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&sfp->rq_list_lock, flags);
+       ret = srp->done;
+       spin_unlock_irqrestore(&sfp->rq_list_lock, flags);
+       return ret;
+}
+
 /*
  * Handles ioctl(SG_IO) for blocking (sync) usage of v3 or v4 interface.
  * Returns 0 on success else a negated errno.
@@ -1161,7 +1239,7 @@ sg_ctl_sg_io(struct file *filp, struct sg_device *sdp, 
struct sg_fd *sfp,
        if (srp->done) {
                srp->done = 2;
                spin_unlock_irq(&sfp->rq_list_lock);
-               res = sg_v3_receive(sfp, p, srp);
+               res = sg_v3_receive(sfp, srp, p);
                return (res < 0) ? res : 0;
        }
        srp->orphan = 1;
@@ -1361,7 +1439,9 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned 
long arg)
                        return result;
                if (val > SG_MAX_CDB_SIZE)
                        return -ENOMEM;
-               sfp->next_cmd_len = (val > 0) ? val : 0;
+               mutex_lock(&sfp->f_mutex);
+               sfp->next_cmd_len = max_t(int, val, 0);
+               mutex_unlock(&sfp->f_mutex);
                return 0;
        case SG_GET_ACCESS_COUNT:
                SG_LOG(3, sfp, "%s:    SG_GET_ACCESS_COUNT\n", __func__);
@@ -2306,6 +2386,33 @@ sg_rd_append(struct sg_request *srp, void __user *outp, 
int num_xfer)
        return res;
 }
 
+/*
+ * If there are multiple requests outstanding, the speed of this function is
+ * important. SG_PACK_ID_WILDCARD is -1 and that case is typically
+ * the fast path. This function is only used in the non-blocking cases.
+ * Returns pointer to (first) matching sg_request or NULL. If found,
+ * sg_request state is moved from SG_RS_AWAIT_RD to SG_RS_BUSY.
+ */
+static struct sg_request *
+sg_find_srp_by_id(struct sg_fd *sfp, int pack_id)
+{
+       unsigned long iflags;
+       struct sg_request *resp;
+
+       spin_lock_irqsave(&sfp->rq_list_lock, iflags);
+       list_for_each_entry(resp, &sfp->rq_list, entry) {
+               /* look for requests that are ready + not SG_IO owned */
+               if (resp->done == 1 && !resp->sg_io_owned &&
+                   (-1 == pack_id || resp->header.pack_id == pack_id)) {
+                       resp->done = 2; /* guard against other readers */
+                       spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+                       return resp;
+               }
+       }
+       spin_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+       return NULL;
+}
+
 static void
 sg_link_reserve(struct sg_fd *sfp, struct sg_request *srp, int size)
 {
-- 
2.23.0

Reply via email to