Removes the free and pending ipr_cmd queues. This is the first patch in a series
to make queuecommand lockless for the ipr driver. Creates a bitarray for each
hardware queue so we can track whether an ipr_cmd struct is free or not. Also
adds an atomic flag on each ipr_cmd to ensure we don't race between command
completion and error handling.

Signed-off-by: Brian King <[email protected]>
---

 drivers/scsi/ipr.c |  176 +++++++++++++++++++++--------------------------------
 drivers/scsi/ipr.h |   51 ++++++++++++++-
 2 files changed, 122 insertions(+), 105 deletions(-)

diff -puN drivers/scsi/ipr.h~ipr_cmd_bitarray drivers/scsi/ipr.h
--- linux-2.6.git/drivers/scsi/ipr.h~ipr_cmd_bitarray   2016-09-05 
14:44:02.353628212 -0500
+++ linux-2.6.git-bjking1/drivers/scsi/ipr.h    2016-09-05 15:18:16.881117822 
-0500
@@ -507,9 +507,8 @@ struct ipr_hrr_queue {
        volatile __be32 *hrrq_start;
        volatile __be32 *hrrq_end;
        volatile __be32 *hrrq_curr;
+       unsigned long *active_map;
 
-       struct list_head hrrq_free_q;
-       struct list_head hrrq_pending_q;
        struct list_head hrrq_error_q;
        spinlock_t _lock;
        spinlock_t *lock;
@@ -1621,6 +1620,7 @@ struct ipr_cmnd {
        struct list_head queue;
        struct scsi_cmnd *scsi_cmd;
        struct ata_queued_cmd *qc;
+       unsigned long atomic_flags;
        struct completion completion;
        struct timer_list timer;
        struct work_struct work;
@@ -1847,6 +1847,53 @@ ipr_err("-------------------------------
  * Inlines
  */
 
+enum ipr_atomic_flags {
+       IPR_CMD_COMPLETE = 0,
+};
+
+static inline int ipr_cmnd_complete(struct ipr_cmnd *ipr_cmd)
+{
+       /* xxx may be overkill since completions all occur under the HRRQ lock 
*/
+       return test_and_set_bit(IPR_CMD_COMPLETE, &ipr_cmd->atomic_flags);
+}
+
+static inline void ipr_cmnd_clear_complete(struct ipr_cmnd *ipr_cmd)
+{
+       clear_bit(IPR_CMD_COMPLETE, &ipr_cmd->atomic_flags);
+}
+
+static inline void ipr_free_cmd(struct ipr_cmnd *ipr_cmd)
+{
+       clear_bit(ipr_cmd->cmd_index - ipr_cmd->hrrq->min_cmd_id,
+                 ipr_cmd->hrrq->active_map);
+}
+
+static inline struct ipr_cmnd* ipr_first_active_cmd(struct ipr_hrr_queue *hrrq)
+{
+       int i = find_first_bit(hrrq->active_map, hrrq->size);
+       struct ipr_cmnd *ipr_cmd = NULL;
+
+       if (i < hrrq->size)
+               ipr_cmd = hrrq->ioa_cfg->ipr_cmnd_list[i + hrrq->min_cmd_id];
+       return ipr_cmd;
+}
+
+static inline struct ipr_cmnd* ipr_next_active_cmd(struct ipr_hrr_queue *hrrq, 
struct ipr_cmnd *ipr_cmd)
+{
+       int last = ipr_cmd->cmd_index - hrrq->min_cmd_id;
+       int i;
+
+       i = find_next_bit(hrrq->active_map, hrrq->size, last + 1);
+       if (i < hrrq->size)
+               return hrrq->ioa_cfg->ipr_cmnd_list[i + hrrq->min_cmd_id];
+       return NULL;
+}
+
+#define for_each_active_cmd(ipr_cmd, hrrq) \
+               for (ipr_cmd = ipr_first_active_cmd(hrrq); \
+                       ipr_cmd; \
+                       ipr_cmd = ipr_next_active_cmd(hrrq, ipr_cmd))
+
 /**
  * ipr_is_ioa_resource - Determine if a resource is the IOA
  * @res:       resource entry struct
diff -puN drivers/scsi/ipr.c~ipr_cmd_bitarray drivers/scsi/ipr.c
--- linux-2.6.git/drivers/scsi/ipr.c~ipr_cmd_bitarray   2016-09-05 
14:44:02.357628190 -0500
+++ linux-2.6.git-bjking1/drivers/scsi/ipr.c    2016-09-05 15:19:21.255759157 
-0500
@@ -656,6 +656,7 @@ static void ipr_reinit_ipr_cmnd(struct i
        dma_addr_t dma_addr = ipr_cmd->dma_addr;
        int hrrq_id;
 
+       ipr_cmnd_clear_complete(ipr_cmd);
        hrrq_id = ioarcb->cmd_pkt.hrrq_id;
        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
        ioarcb->cmd_pkt.hrrq_id = hrrq_id;
@@ -711,16 +712,17 @@ static void ipr_init_ipr_cmnd(struct ipr
 static
 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
 {
-       struct ipr_cmnd *ipr_cmd = NULL;
+       int index;
 
-       if (likely(!list_empty(&hrrq->hrrq_free_q))) {
-               ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
-                       struct ipr_cmnd, queue);
-               list_del(&ipr_cmd->queue);
-       }
+       do {
+               index = find_first_zero_bit(hrrq->active_map, hrrq->size);
 
+               if (index == hrrq->size)
+                       return NULL;
 
-       return ipr_cmd;
+       } while (test_and_set_bit(index, hrrq->active_map));
+
+       return hrrq->ioa_cfg->ipr_cmnd_list[index + hrrq->min_cmd_id];
 }
 
 /**
@@ -843,7 +845,7 @@ static void ipr_sata_eh_done(struct ipr_
        sata_port->ioasa.status |= ATA_BUSY;
 
        spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
        ata_qc_complete(qc);
 }
@@ -869,7 +871,7 @@ static void __ipr_scsi_eh_done(struct ip
        if (ipr_cmd->eh_comp)
                complete(ipr_cmd->eh_comp);
        spin_lock(&ipr_cmd->hrrq->_lock);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        spin_unlock(&ipr_cmd->hrrq->_lock);
 }
 
@@ -897,9 +899,6 @@ static void ipr_scsi_eh_done(struct ipr_
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
        if (ipr_cmd->eh_comp)
                complete(ipr_cmd->eh_comp);
-       spin_lock(&ipr_cmd->hrrq->_lock);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
-       spin_unlock(&ipr_cmd->hrrq->_lock);
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 }
 
@@ -937,27 +936,30 @@ static void ipr_send_back_failed_ops(str
  **/
 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
 {
-       struct ipr_cmnd *ipr_cmd, *temp;
+       struct ipr_cmnd *ipr_cmd;
        struct ipr_hrr_queue *hrrq;
 
        ENTER;
        for_each_hrrq(hrrq, ioa_cfg) {
                spin_lock(&hrrq->_lock);
-               list_for_each_entry_safe(ipr_cmd,
-                                       temp, &hrrq->hrrq_pending_q, queue) {
-                       list_move_tail(&ipr_cmd->queue, &hrrq->hrrq_error_q);
-
-                       ipr_cmd->s.ioasa.hdr.ioasc =
-                               cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
-                       ipr_cmd->s.ioasa.hdr.ilid =
-                               cpu_to_be32(IPR_DRIVER_ILID);
-
-                       if (ipr_cmd->scsi_cmd)
-                               ipr_cmd->fast_done = ipr_scsi_eh_done;
-                       else if (ipr_cmd->qc)
-                               ipr_cmd->fast_done = ipr_sata_eh_done;
+               for_each_active_cmd(ipr_cmd, hrrq) {
+                       if (ipr_cmd == ioa_cfg->reset_cmd)
+                               continue;
 
-                       del_timer(&ipr_cmd->timer);
+                       if (!ipr_cmnd_complete(ipr_cmd)) {
+                               list_add_tail(&ipr_cmd->queue, 
&hrrq->hrrq_error_q);
+                               ipr_cmd->s.ioasa.hdr.ioasc =
+                                       cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+                               ipr_cmd->s.ioasa.hdr.ilid =
+                                       cpu_to_be32(IPR_DRIVER_ILID);
+
+                               if (ipr_cmd->scsi_cmd)
+                                       ipr_cmd->fast_done = ipr_scsi_eh_done;
+                               else if (ipr_cmd->qc)
+                                       ipr_cmd->fast_done = ipr_sata_eh_done;
+
+                               del_timer(&ipr_cmd->timer);
+                       }
                }
                spin_unlock(&hrrq->_lock);
        }
@@ -1010,8 +1012,6 @@ static void ipr_do_req(struct ipr_cmnd *
                       void (*done) (struct ipr_cmnd *),
                       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
 {
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
-
        ipr_cmd->done = done;
 
        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -1145,7 +1145,6 @@ static void ipr_send_hcam(struct ipr_ioa
 
        if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
-               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
 
                ipr_cmd->u.hostrcb = hostrcb;
@@ -1534,7 +1533,7 @@ static void ipr_process_ccn(struct ipr_c
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
        list_del_init(&hostrcb->queue);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
 
        if (ioasc) {
                if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
@@ -2654,7 +2653,7 @@ static void ipr_process_error(struct ipr
                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 
        list_del_init(&hostrcb->queue);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
 
        if (!ioasc) {
                ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -5075,7 +5074,7 @@ static int ipr_wait_for_ops(struct ipr_i
 
                for_each_hrrq(hrrq, ioa_cfg) {
                        spin_lock_irqsave(hrrq->lock, flags);
-                       list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, 
queue) {
+                       for_each_active_cmd(ipr_cmd, hrrq) {
                                if (match(ipr_cmd, device)) {
                                        ipr_cmd->eh_comp = &comp;
                                        wait++;
@@ -5092,7 +5091,7 @@ static int ipr_wait_for_ops(struct ipr_i
 
                                for_each_hrrq(hrrq, ioa_cfg) {
                                        spin_lock_irqsave(hrrq->lock, flags);
-                                       list_for_each_entry(ipr_cmd, 
&hrrq->hrrq_pending_q, queue) {
+                                       for_each_active_cmd(ipr_cmd, hrrq) {
                                                if (match(ipr_cmd, device)) {
                                                        ipr_cmd->eh_comp = NULL;
                                                        wait++;
@@ -5193,7 +5192,7 @@ static int ipr_device_reset(struct ipr_i
 
        ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        if (ipr_is_gata(res) && res->sata_port && ioasc != 
IPR_IOASC_IOA_WAS_RESET) {
                if (ipr_cmd->ioa_cfg->sis64)
                        memcpy(&res->sata_port->ioasa, 
&ipr_cmd->s.ioasa64.u.gata,
@@ -5294,7 +5293,7 @@ static int __ipr_eh_dev_reset(struct scs
 
        for_each_hrrq(hrrq, ioa_cfg) {
                spin_lock(&hrrq->_lock);
-               list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+               for_each_active_cmd(ipr_cmd, hrrq) {
                        if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
                                if (ipr_cmd->scsi_cmd)
                                        ipr_cmd->done = ipr_scsi_eh_done;
@@ -5362,7 +5361,7 @@ static void ipr_bus_reset_done(struct ip
        else
                ipr_cmd->sibling->done(ipr_cmd->sibling);
 
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        LEAVE;
 }
 
@@ -5451,7 +5450,7 @@ static int ipr_cancel_op(struct scsi_cmn
 
        for_each_hrrq(hrrq, ioa_cfg) {
                spin_lock(&hrrq->_lock);
-               list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+               for_each_active_cmd(ipr_cmd, hrrq) {
                        if (ipr_cmd->scsi_cmd == scsi_cmd) {
                                ipr_cmd->done = ipr_scsi_eh_done;
                                op_found = 1;
@@ -5485,7 +5484,7 @@ static int ipr_cancel_op(struct scsi_cmn
                ipr_trace;
        }
 
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        if (!ipr_is_naca_model(res))
                res->needs_sync_complete = 1;
 
@@ -5634,7 +5633,6 @@ static irqreturn_t ipr_handle_other_inte
                                /* clear stage change */
                                writel(IPR_PCII_IPL_STAGE_CHANGE, 
ioa_cfg->regs.clr_interrupt_reg);
                                int_reg = 
readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
-                               list_del(&ioa_cfg->reset_cmd->queue);
                                del_timer(&ioa_cfg->reset_cmd->timer);
                                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
                                return IRQ_HANDLED;
@@ -5649,7 +5647,6 @@ static irqreturn_t ipr_handle_other_inte
                writel(IPR_PCII_IOA_TRANS_TO_OPER, 
ioa_cfg->regs.set_interrupt_mask_reg);
                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 
-               list_del(&ioa_cfg->reset_cmd->queue);
                del_timer(&ioa_cfg->reset_cmd->timer);
                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
        } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
@@ -5731,9 +5728,11 @@ static int ipr_process_hrrq(struct ipr_h
                ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
                ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
-               ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
-
-               list_move_tail(&ipr_cmd->queue, doneq);
+               if (!ipr_cmnd_complete(ipr_cmd)) {
+                       ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+                       list_add_tail(&ipr_cmd->queue, doneq);
+                       num_hrrq++;
+               }
 
                if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
                        hrr_queue->hrrq_curr++;
@@ -5741,7 +5740,7 @@ static int ipr_process_hrrq(struct ipr_h
                        hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
                        hrr_queue->toggle_bit ^= 1u;
                }
-               num_hrrq++;
+
                if (budget > 0 && num_hrrq >= budget)
                        break;
        }
@@ -6039,7 +6038,7 @@ static void ipr_erp_done(struct ipr_cmnd
                res->in_erp = 0;
        }
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        scsi_cmd->scsi_done(scsi_cmd);
 }
 
@@ -6056,6 +6055,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(
        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
        dma_addr_t dma_addr = ipr_cmd->dma_addr;
 
+       ipr_cmnd_clear_complete(ipr_cmd);
        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
        ioarcb->data_transfer_length = 0;
        ioarcb->read_data_transfer_length = 0;
@@ -6430,7 +6430,7 @@ static void ipr_erp_start(struct ipr_ioa
        }
 
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        scsi_cmd->scsi_done(scsi_cmd);
 }
 
@@ -6455,9 +6455,8 @@ static void ipr_scsi_done(struct ipr_cmn
 
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
                scsi_dma_unmap(scsi_cmd);
-
                spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
-               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+               ipr_free_cmd(ipr_cmd);
                scsi_cmd->scsi_done(scsi_cmd);
                spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
        } else {
@@ -6514,32 +6513,9 @@ static int ipr_queuecommand(struct Scsi_
 
        hrrq = &ioa_cfg->hrrq[hrrq_id];
 
-       spin_lock_irqsave(hrrq->lock, hrrq_flags);
-       /*
-        * We are currently blocking all devices due to a host reset
-        * We have told the host to stop giving us new requests, but
-        * ERP ops don't count. FIXME
-        */
-       if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && 
!hrrq->removing_ioa)) {
-               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
-               return SCSI_MLQUEUE_HOST_BUSY;
-       }
-
-       /*
-        * FIXME - Create scsi_set_host_offline interface
-        *  and the ioa_is_dead check can be removed
-        */
-       if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
-               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
-               goto err_nodev;
-       }
-
        ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
-       if (ipr_cmd == NULL) {
-               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+       if (ipr_cmd == NULL)
                return SCSI_MLQUEUE_HOST_BUSY;
-       }
-       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
 
        ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
        ioarcb = &ipr_cmd->ioarcb;
@@ -6585,16 +6561,16 @@ static int ipr_queuecommand(struct Scsi_
                rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
 
        spin_lock_irqsave(hrrq->lock, hrrq_flags);
-       if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
-               list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+       if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead && 
!hrrq->removing_ioa))) {
+               ipr_free_cmd(ipr_cmd);
                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                if (!rc)
                        scsi_dma_unmap(scsi_cmd);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
-       if (unlikely(hrrq->ioa_is_dead)) {
-               list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+       if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa)) {
+               ipr_free_cmd(ipr_cmd);
                spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                scsi_dma_unmap(scsi_cmd);
                goto err_nodev;
@@ -6605,7 +6581,6 @@ static int ipr_queuecommand(struct Scsi_
                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
                res->needs_sync_complete = 0;
        }
-       list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
        ipr_send_command(ipr_cmd);
        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
@@ -6721,7 +6696,7 @@ static void ipr_ata_post_internal(struct
 
        for_each_hrrq(hrrq, ioa_cfg) {
                spin_lock(&hrrq->_lock);
-               list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+               for_each_active_cmd(ipr_cmd, hrrq) {
                        if (ipr_cmd->qc == qc)
                                found++;
                }
@@ -6794,7 +6769,7 @@ static void ipr_sata_done(struct ipr_cmn
                qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
        else
                qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        spin_unlock_irqrestore(ipr_cmd->hrrq->lock, flags);
        ata_qc_complete(qc);
 }
@@ -6960,7 +6935,7 @@ static unsigned int ipr_qc_issue(struct
        spin_lock(&ipr_cmd->hrrq->_lock);
        if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
                        ipr_cmd->hrrq->ioa_is_dead)) {
-               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+               ipr_free_cmd(ipr_cmd);
                spin_unlock(&ipr_cmd->hrrq->_lock);
                return AC_ERR_SYSTEM;
        }
@@ -6977,7 +6952,6 @@ static unsigned int ipr_qc_issue(struct
        memset(regs, 0, sizeof(*regs));
        ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
 
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
        ipr_cmd->qc = qc;
        ipr_cmd->done = ipr_sata_done;
        ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -7157,7 +7131,7 @@ static int ipr_ioa_bringdown_done(struct
        ipr_send_back_failed_ops(ioa_cfg);
        spin_lock_irq(ioa_cfg->host->host_lock);
 
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        wake_up_all(&ioa_cfg->reset_wait_q);
        LEAVE;
 
@@ -7216,7 +7190,7 @@ static int ipr_ioa_reset_done(struct ipr
        dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
 
        ioa_cfg->reset_retries = 0;
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        wake_up_all(&ioa_cfg->reset_wait_q);
 
        spin_unlock_irq(ioa_cfg->host->host_lock);
@@ -7558,7 +7532,7 @@ static int ipr_reset_cmd_failed(struct i
                ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
 
        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
        return IPR_RC_JOB_RETURN;
 }
 
@@ -8065,8 +8039,7 @@ static int ipr_ioafp_page0_inquiry(struc
                if (!ipr_testmode) {
                        ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
-                       list_add_tail(&ipr_cmd->queue,
-                                       &ioa_cfg->hrrq->hrrq_free_q);
+                       ipr_free_cmd(ipr_cmd);
                        return IPR_RC_JOB_RETURN;
                }
        }
@@ -8208,8 +8181,8 @@ static void ipr_reset_timer_done(struct
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
        if (ioa_cfg->reset_cmd == ipr_cmd) {
-               list_del(&ipr_cmd->queue);
-               ipr_cmd->done(ipr_cmd);
+               if (!ipr_cmnd_complete(ipr_cmd))
+                       ipr_cmd->done(ipr_cmd);
        }
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -8234,7 +8207,6 @@ static void ipr_reset_start_timer(struct
 {
 
        ENTER;
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
        ipr_cmd->done = ipr_reset_ioa_job;
 
        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -8329,8 +8301,6 @@ static int ipr_reset_next_stage(struct i
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
 
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
-
        return IPR_RC_JOB_RETURN;
 }
 
@@ -8400,7 +8370,6 @@ static int ipr_reset_enable_ioa(struct i
        ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 
        LEAVE;
        return IPR_RC_JOB_RETURN;
@@ -8931,10 +8900,10 @@ static int ipr_reset_cancel_hcam_done(st
 
        for_each_hrrq(hrrq, ioa_cfg) {
                spin_lock(&hrrq->_lock);
-               list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
+               for_each_active_cmd(loop_cmd, hrrq) {
                        count++;
                        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
-                       list_add_tail(&ipr_cmd->queue, 
&ipr_cmd->hrrq->hrrq_free_q);
+                       ipr_free_cmd(ipr_cmd);
                        rc = IPR_RC_JOB_RETURN;
                        break;
                }
@@ -8970,7 +8939,7 @@ static int ipr_reset_cancel_hcam(struct
 
        if (!hrrq->ioa_is_dead) {
                if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
-                       list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, 
queue) {
+                       for_each_active_cmd(hcam_cmd, hrrq) {
                                if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != 
IPR_HOST_CONTROLLED_ASYNC)
                                        continue;
 
@@ -9137,8 +9106,7 @@ static void ipr_reset_ioa_job(struct ipr
                         * We are doing nested adapter resets and this is
                         * not the current reset job.
                         */
-                       list_add_tail(&ipr_cmd->queue,
-                                       &ipr_cmd->hrrq->hrrq_free_q);
+                       ipr_free_cmd(ipr_cmd);
                        return;
                }
 
@@ -9275,7 +9243,6 @@ static int ipr_reset_freeze(struct ipr_c
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
        wmb();
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
        ipr_cmd->done = ipr_reset_ioa_job;
        return IPR_RC_JOB_RETURN;
 }
@@ -9456,6 +9423,9 @@ static void ipr_free_cmd_blks(struct ipr
                }
        }
 
+       for (i = 0; i < ioa_cfg->hrrq_num; i++)
+               kfree(ioa_cfg->hrrq[i].active_map);
+
        if (ioa_cfg->ipr_cmd_pool)
                dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
 
@@ -9613,6 +9583,8 @@ static int ipr_alloc_cmd_blks(struct ipr
                        ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
                }
                ioa_cfg->hrrq[i].size = entries_each_hrrq;
+               ioa_cfg->hrrq[i].active_map = 
kcalloc(BITS_TO_LONGS(entries_each_hrrq),
+                                                     sizeof(unsigned long), 
GFP_KERNEL);
        }
 
        BUG_ON(ioa_cfg->hrrq_num == 0);
@@ -9664,7 +9636,6 @@ static int ipr_alloc_cmd_blks(struct ipr
 
                ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
                ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
-               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
                if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
                        hrrq_id++;
        }
@@ -9911,8 +9882,6 @@ static void ipr_init_ioa_cfg(struct ipr_
        pci_set_drvdata(pdev, ioa_cfg);
 
        for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
-               INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
-               INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
                INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_error_q);
                spin_lock_init(&ioa_cfg->hrrq[i]._lock);
                if (i == 0)
@@ -10820,9 +10789,10 @@ static struct pci_driver ipr_driver = {
  **/
 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
 {
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       ipr_free_cmd(ipr_cmd);
 }
 
+
 /**
  * ipr_halt - Issue shutdown prepare to all adapters
  *
_


------------------------------------------------------------------------------
_______________________________________________
Iprdd-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/iprdd-devel

Reply via email to