This patch removes all spinlocks from ipr's queuecommand path.

Signed-off-by: Brian King <brk...@linux.vnet.ibm.com>
---

 drivers/scsi/ipr.c |   70 +++++++++++++++++++++++++++++++----------------------
 drivers/scsi/ipr.h |   52 +++++++++++++++++++++++++++++++++++++--
 2 files changed, 91 insertions(+), 31 deletions(-)

diff -puN drivers/scsi/ipr.h~ipr_lockless_atomic drivers/scsi/ipr.h
--- linux-2.6.git/drivers/scsi/ipr.h~ipr_lockless_atomic        2016-09-06 
21:01:16.260239296 -0500
+++ linux-2.6.git-bjking1/drivers/scsi/ipr.h    2016-09-06 21:07:15.309878131 
-0500
@@ -512,6 +512,7 @@ struct ipr_hrr_queue {
        struct list_head hrrq_error_q;
        spinlock_t _lock;
        spinlock_t *lock;
+       atomic_t active;
 
        volatile u32 toggle_bit;
        u32 size;
@@ -521,6 +522,7 @@ struct ipr_hrr_queue {
        u8 ioa_is_dead:1;
        u8 allow_cmds:1;
        u8 removing_ioa:1;
+       u8 is_active:1;
 
        struct irq_poll iopoll;
 };
@@ -1293,13 +1295,13 @@ struct ipr_sata_port {
 };
 
 struct ipr_resource_entry {
-       u8 needs_sync_complete:1;
+       u8 needs_sync_complete:1; /* atomic? gscsi and vset */
        u8 in_erp:1;
        u8 add_to_ml:1;
        u8 del_from_ml:1;
        u8 resetting_device:1;
-       u8 reset_occurred:1;
-       u8 raw_mode:1;
+       u8 reset_occurred:1; /* atomic? gscsi only */
+       u8 raw_mode:1; /* atomic? AF DASD only */
 
        u32 bus;                /* AKA channel */
        u32 target;             /* AKA id */
@@ -1492,6 +1494,7 @@ struct ipr_ioa_cfg {
        u8 cfg_locked:1;
        u8 clear_isr:1;
        u8 probe_done:1;
+       u8 allow_cmds:1;
 
        u8 revid;
 
@@ -1544,6 +1547,7 @@ struct ipr_ioa_cfg {
        struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
        u32 hrrq_num;
        atomic_t  hrrq_index;
+       atomic_t  hrrq_active;
        u16 identify_hrrq_index;
 
        struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
@@ -1868,6 +1872,48 @@ static inline void ipr_free_cmd(struct i
                  ipr_cmd->hrrq->active_map);
 }
 
+static inline void ipr_hrrq_activate(struct ipr_hrr_queue *hrrq)
+{
+       if (!hrrq->is_active) {
+               atomic_inc(&hrrq->active);
+               atomic_inc(&hrrq->ioa_cfg->hrrq_active);
+               hrrq->is_active = 1;
+       }
+}
+
+static inline void ipr_hrrq_deactivate(struct ipr_hrr_queue *hrrq)
+{
+       if (hrrq->is_active) {
+               hrrq->is_active = 0;
+
+               if (!atomic_dec_return(&hrrq->active))
+                       atomic_dec(&hrrq->ioa_cfg->hrrq_active);
+       }
+}
+
+static inline int ipr_hrrq_enter(struct ipr_hrr_queue *hrrq)
+{
+       return atomic_inc_not_zero(&hrrq->active);
+}
+
+static void ipr_reset_ioa_job(struct ipr_cmnd *);
+
+static inline void ipr_hrrq_exit(struct ipr_hrr_queue *hrrq)
+{
+       unsigned long flags;
+       struct ipr_ioa_cfg *ioa_cfg;
+
+       if (!atomic_dec_return(&hrrq->active)) {
+               ioa_cfg = hrrq->ioa_cfg;
+
+               if (!atomic_dec_return(&ioa_cfg->hrrq_active)) {
+                       spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+                       ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+               }
+       }
+}
+
 static inline struct ipr_cmnd* ipr_first_active_cmd(struct ipr_hrr_queue *hrrq)
 {
        int i = find_first_bit(hrrq->active_map, hrrq->size);
diff -puN drivers/scsi/ipr.c~ipr_lockless_atomic drivers/scsi/ipr.c
--- linux-2.6.git/drivers/scsi/ipr.c~ipr_lockless_atomic        2016-09-06 
21:01:16.264239271 -0500
+++ linux-2.6.git-bjking1/drivers/scsi/ipr.c    2016-09-06 21:07:15.311878103 
-0500
@@ -6455,10 +6455,8 @@ static void ipr_scsi_done(struct ipr_cmn
 
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
                scsi_dma_unmap(scsi_cmd);
-               spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
                ipr_free_cmd(ipr_cmd);
                scsi_cmd->scsi_done(scsi_cmd);
-               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
        } else {
                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
                spin_lock(&ipr_cmd->hrrq->_lock);
@@ -6513,9 +6511,27 @@ static int ipr_queuecommand(struct Scsi_
 
        hrrq = &ioa_cfg->hrrq[hrrq_id];
 
+       if (!ipr_hrrq_enter(hrrq)) {
+               dev_info(&ioa_cfg->pdev->dev, "Failed to get a ref\n");
+
+               spin_lock_irqsave(hrrq->lock, hrrq_flags);
+               if (hrrq->ioa_is_dead || hrrq->removing_ioa) {
+                       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+                       memset(scsi_cmd->sense_buffer, 0, 
SCSI_SENSE_BUFFERSIZE);
+                       scsi_cmd->result = (DID_NO_CONNECT << 16);
+                       scsi_cmd->scsi_done(scsi_cmd);
+                       return 0;
+               }
+
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
-       if (ipr_cmd == NULL)
+       if (ipr_cmd == NULL) {
+               ipr_hrrq_exit(hrrq);
                return SCSI_MLQUEUE_HOST_BUSY;
+       }
 
        ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
        ioarcb = &ipr_cmd->ioarcb;
@@ -6529,7 +6545,10 @@ static int ipr_queuecommand(struct Scsi_
                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
 
                if (res->reset_occurred) {
+                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
                        res->reset_occurred = 0;
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, 
lock_flags);
+
                        ioarcb->cmd_pkt.flags_lo |= 
IPR_FLAGS_LO_DELAY_AFTER_RST;
                }
        }
@@ -6542,6 +6561,13 @@ static int ipr_queuecommand(struct Scsi_
                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
                else
                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
+
+               if (res->needs_sync_complete) {
+                       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
+                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+                       res->needs_sync_complete = 0;
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, 
lock_flags);
+               }
        }
 
        if (scsi_cmd->cmnd[0] >= 0xC0 &&
@@ -6560,38 +6586,20 @@ static int ipr_queuecommand(struct Scsi_
        else
                rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
 
-       spin_lock_irqsave(hrrq->lock, hrrq_flags);
-       if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead && 
!hrrq->removing_ioa))) {
-               ipr_free_cmd(ipr_cmd);
-               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
-               if (!rc)
+       if (rc) {
+               if (!ipr_cmnd_complete(ipr_cmd)) {
+                       ipr_free_cmd(ipr_cmd);
                        scsi_dma_unmap(scsi_cmd);
+               }
+               ipr_hrrq_exit(hrrq);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
-       if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa)) {
-               ipr_free_cmd(ipr_cmd);
-               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
-               scsi_dma_unmap(scsi_cmd);
-               goto err_nodev;
-       }
-
        ioarcb->res_handle = res->res_handle;
-       if (res->needs_sync_complete) {
-               ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
-               res->needs_sync_complete = 0;
-       }
+
        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
        ipr_send_command(ipr_cmd);
-       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
-       return 0;
-
-err_nodev:
-       spin_lock_irqsave(hrrq->lock, hrrq_flags);
-       memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
-       scsi_cmd->result = (DID_NO_CONNECT << 16);
-       scsi_cmd->scsi_done(scsi_cmd);
-       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+       ipr_hrrq_exit(hrrq);
        return 0;
 }
 
@@ -7160,6 +7168,7 @@ static int ipr_ioa_reset_done(struct ipr
        for (j = 0; j < ioa_cfg->hrrq_num; j++) {
                spin_lock(&ioa_cfg->hrrq[j]._lock);
                ioa_cfg->hrrq[j].allow_cmds = 1;
+               ipr_hrrq_activate(&ioa_cfg->hrrq[j]);
                spin_unlock(&ioa_cfg->hrrq[j]._lock);
        }
        wmb();
@@ -9110,6 +9119,9 @@ static void ipr_reset_ioa_job(struct ipr
                        return;
                }
 
+               if (atomic_read(&ioa_cfg->hrrq_active))
+                       return;
+
                if (IPR_IOASC_SENSE_KEY(ioasc)) {
                        rc = ipr_cmd->job_step_failed(ipr_cmd);
                        if (rc == IPR_RC_JOB_RETURN)
@@ -9147,6 +9159,7 @@ static void _ipr_initiate_ioa_reset(stru
        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
                spin_lock(&ioa_cfg->hrrq[i]._lock);
                ioa_cfg->hrrq[i].allow_cmds = 0;
+               ipr_hrrq_deactivate(&ioa_cfg->hrrq[i]);
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
        wmb();
@@ -9333,6 +9346,7 @@ static void ipr_pci_perm_failure(struct
                for (i = 0; i < ioa_cfg->hrrq_num; i++) {
                        spin_lock(&ioa_cfg->hrrq[i]._lock);
                        ioa_cfg->hrrq[i].allow_cmds = 0;
+                       ipr_hrrq_deactivate(&ioa_cfg->hrrq[i]);
                        spin_unlock(&ioa_cfg->hrrq[i]._lock);
                }
                wmb();
_


------------------------------------------------------------------------------
_______________________________________________
Iprdd-devel mailing list
Iprdd-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/iprdd-devel

Reply via email to