Brian King <[email protected]> writes:
> This converts ipr's queuecommand path to use percpu refcounts
> rather than atomics.
>
> Signed-off-by: Brian King <[email protected]>
> ---
>
> drivers/scsi/ipr.c | 111
> ++++++++++++++++++++++++++++++++++++++++++-----------
> drivers/scsi/ipr.h | 53 +++----------------------
> 2 files changed, 98 insertions(+), 66 deletions(-)
>
> diff -puN drivers/scsi/ipr.h~ipr_lockless_percpu drivers/scsi/ipr.h
> --- linux-2.6.git/drivers/scsi/ipr.h~ipr_lockless_percpu 2016-09-06
> 21:08:18.268234535 -0500
> +++ linux-2.6.git-bjking1/drivers/scsi/ipr.h 2016-09-06 21:08:18.278234468
> -0500
> @@ -33,6 +33,7 @@
> #include <linux/list.h>
> #include <linux/kref.h>
> #include <linux/irq_poll.h>
> +#include <linux/percpu-refcount.h>
> #include <scsi/scsi.h>
> #include <scsi/scsi_cmnd.h>
>
> @@ -512,7 +513,6 @@ struct ipr_hrr_queue {
> struct list_head hrrq_error_q;
> spinlock_t _lock;
> spinlock_t *lock;
> - atomic_t active;
>
> volatile u32 toggle_bit;
> u32 size;
> @@ -522,7 +522,6 @@ struct ipr_hrr_queue {
> u8 ioa_is_dead:1;
> u8 allow_cmds:1;
> u8 removing_ioa:1;
> - u8 is_active:1;
>
> struct irq_poll iopoll;
> };
> @@ -1472,6 +1471,12 @@ enum ipr_sdt_state {
> DUMP_OBTAINED
> };
>
> +struct ipr_ioa_ref_cnt {
> + struct percpu_ref ref;
> + int released;
> + struct ipr_ioa_cfg *ioa_cfg;
> +};
Do we really need the pointer back to ioa_cfg? The released variable
could be inferred if with keep track of the first reference. If we
handle these two points, we don't need the ipr_ioa_ref_cnt structure and
can encapsulate the percpu_ref counter directly in the main structure.
> +
> /* Per-controller data */
> struct ipr_ioa_cfg {
> char eye_catcher[8];
> @@ -1547,7 +1552,6 @@ struct ipr_ioa_cfg {
> struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
> u32 hrrq_num;
> atomic_t hrrq_index;
> - atomic_t hrrq_active;
> u16 identify_hrrq_index;
>
> struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
> @@ -1592,6 +1596,7 @@ struct ipr_ioa_cfg {
> int (*reset) (struct ipr_cmnd *);
>
> struct ata_host ata_host;
> + struct ipr_ioa_ref_cnt *ioa_usage_counter;
> char ipr_cmd_label[8];
> #define IPR_CMD_LABEL "ipr_cmd"
> u32 max_cmds;
> @@ -1872,48 +1877,6 @@ static inline void ipr_free_cmd(struct i
> ipr_cmd->hrrq->active_map);
> }
>
> -static inline void ipr_hrrq_activate(struct ipr_hrr_queue *hrrq)
> -{
> - if (!hrrq->is_active) {
> - atomic_inc(&hrrq->active);
> - atomic_inc(&hrrq->ioa_cfg->hrrq_active);
> - hrrq->is_active = 1;
> - }
> -}
> -
> -static inline void ipr_hrrq_deactivate(struct ipr_hrr_queue *hrrq)
> -{
> - if (hrrq->is_active) {
> - hrrq->is_active = 0;
> -
> - if (!atomic_dec_return(&hrrq->active))
> - atomic_dec(&hrrq->ioa_cfg->hrrq_active);
> - }
> -}
> -
> -static inline int ipr_hrrq_enter(struct ipr_hrr_queue *hrrq)
> -{
> - return atomic_inc_not_zero(&hrrq->active);
> -}
> -
> -static void ipr_reset_ioa_job(struct ipr_cmnd *);
> -
> -static inline void ipr_hrrq_exit(struct ipr_hrr_queue *hrrq)
> -{
> - unsigned long flags;
> - struct ipr_ioa_cfg *ioa_cfg;
> -
> - if (!atomic_dec_return(&hrrq->active)) {
> - ioa_cfg = hrrq->ioa_cfg;
> -
> - if (!atomic_dec_return(&ioa_cfg->hrrq_active)) {
> - spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
> - ipr_reset_ioa_job(ioa_cfg->reset_cmd);
> - spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
> - }
> - }
> -}
> -
> static inline struct ipr_cmnd* ipr_first_active_cmd(struct ipr_hrr_queue
> *hrrq)
> {
> int i = find_first_bit(hrrq->active_map, hrrq->size);
> diff -puN drivers/scsi/ipr.c~ipr_lockless_percpu drivers/scsi/ipr.c
> --- linux-2.6.git/drivers/scsi/ipr.c~ipr_lockless_percpu 2016-09-06
> 21:08:18.272234509 -0500
> +++ linux-2.6.git-bjking1/drivers/scsi/ipr.c 2016-09-06 21:08:18.280234455
> -0500
> @@ -6511,7 +6511,7 @@ static int ipr_queuecommand(struct Scsi_
>
> hrrq = &ioa_cfg->hrrq[hrrq_id];
>
> - if (!ipr_hrrq_enter(hrrq)) {
> + if (!percpu_ref_tryget_live(&ioa_cfg->ioa_usage_counter->ref)) {
I'd like to see these calls encapsulated inside ipr_ioa_enter,ipr_ioa_exit or
something like that. Just cosmetic, though.
> dev_info(&ioa_cfg->pdev->dev, "Failed to get a ref\n");
>
> spin_lock_irqsave(hrrq->lock, hrrq_flags);
> @@ -6529,7 +6529,7 @@ static int ipr_queuecommand(struct Scsi_
>
> ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
> if (ipr_cmd == NULL) {
> - ipr_hrrq_exit(hrrq);
> + percpu_ref_put(&ioa_cfg->ioa_usage_counter->ref);
> return SCSI_MLQUEUE_HOST_BUSY;
> }
>
> @@ -6591,7 +6591,7 @@ static int ipr_queuecommand(struct Scsi_
> ipr_free_cmd(ipr_cmd);
> scsi_dma_unmap(scsi_cmd);
> }
> - ipr_hrrq_exit(hrrq);
> + percpu_ref_put(&ioa_cfg->ioa_usage_counter->ref);
> return SCSI_MLQUEUE_HOST_BUSY;
> }
>
> @@ -6599,7 +6599,7 @@ static int ipr_queuecommand(struct Scsi_
>
> ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
> ipr_send_command(ipr_cmd);
> - ipr_hrrq_exit(hrrq);
> + percpu_ref_put(&ioa_cfg->ioa_usage_counter->ref);
> return 0;
> }
>
> @@ -7161,14 +7161,17 @@ static int ipr_ioa_reset_done(struct ipr
> {
> struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
> struct ipr_resource_entry *res;
> + struct ipr_hrr_queue *hrrq;
> int j;
>
> ENTER;
> ioa_cfg->in_reset_reload = 0;
> + hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
> + if (!hrrq->ioa_is_dead && !hrrq->removing_ioa)
> + ioa_cfg->allow_cmds = 1;
> for (j = 0; j < ioa_cfg->hrrq_num; j++) {
> spin_lock(&ioa_cfg->hrrq[j]._lock);
> ioa_cfg->hrrq[j].allow_cmds = 1;
> - ipr_hrrq_activate(&ioa_cfg->hrrq[j]);
> spin_unlock(&ioa_cfg->hrrq[j]._lock);
> }
> wmb();
> @@ -7203,6 +7206,8 @@ static int ipr_ioa_reset_done(struct ipr
> wake_up_all(&ioa_cfg->reset_wait_q);
>
> spin_unlock_irq(ioa_cfg->host->host_lock);
> + if (ioa_cfg->allow_cmds)
> + percpu_ref_reinit(&ioa_cfg->ioa_usage_counter->ref);
> scsi_unblock_requests(ioa_cfg->host);
> ipr_send_back_failed_ops(ioa_cfg);
> spin_lock_irq(ioa_cfg->host->host_lock);
> @@ -9119,9 +9124,6 @@ static void ipr_reset_ioa_job(struct ipr
> return;
> }
>
> - if (atomic_read(&ioa_cfg->hrrq_active))
> - return;
> -
> if (IPR_IOASC_SENSE_KEY(ioasc)) {
> rc = ipr_cmd->job_step_failed(ipr_cmd);
> if (rc == IPR_RC_JOB_RETURN)
> @@ -9134,6 +9136,48 @@ static void ipr_reset_ioa_job(struct ipr
> } while (rc == IPR_RC_JOB_CONTINUE);
> }
>
> +static void ipr_ioa_usage_counter_release(struct percpu_ref *ref)
> +{
> + struct ipr_ioa_ref_cnt *ioa_ref = (struct ipr_ioa_ref_cnt *)ref;
> + struct ipr_ioa_cfg *ioa_cfg = ioa_ref->ioa_cfg;
> + unsigned long lock_flags;
> +
> + ENTER;
> + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
> + if (ioa_cfg->reset_cmd)
> + ipr_reset_ioa_job(ioa_cfg->reset_cmd);
> + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
> + LEAVE;
> +}
> +
> +/**
> + * ipr_reset_quiesce_work - Quiesce queuecommand
> + * @work: work struct
> + *
> + * Description: Quiesce queuecommand.
> + *
> + **/
> +static void ipr_reset_quiesce_work(struct work_struct *work)
> +{
> + struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
> + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
> + unsigned long lock_flags = 0;
> + int allow_cmds;
> + ENTER;
> +
> + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
> + allow_cmds = ioa_cfg->allow_cmds;
> + ioa_cfg->allow_cmds = 0;
> + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
> +
> + /* xxx add a timeout here? */
Wouldn't a timeout indicate a bug in the code? I think we don't need an
explicit timeout, since it should always converge to zero, unless
there's a bug.
> + if (allow_cmds)
> + percpu_ref_kill(&ioa_cfg->ioa_usage_counter->ref);
> + else
> + ipr_ioa_usage_counter_release(&ioa_cfg->ioa_usage_counter->ref);
> + LEAVE;
> +}
> +
> /**
> * _ipr_initiate_ioa_reset - Initiate an adapter reset
> * @ioa_cfg: ioa config struct
> @@ -9159,7 +9203,6 @@ static void _ipr_initiate_ioa_reset(stru
> for (i = 0; i < ioa_cfg->hrrq_num; i++) {
> spin_lock(&ioa_cfg->hrrq[i]._lock);
> ioa_cfg->hrrq[i].allow_cmds = 0;
> - ipr_hrrq_deactivate(&ioa_cfg->hrrq[i]);
> spin_unlock(&ioa_cfg->hrrq[i]._lock);
> }
> wmb();
> @@ -9171,7 +9214,8 @@ static void _ipr_initiate_ioa_reset(stru
> ipr_cmd->job_step = job_step;
> ipr_cmd->u.shutdown_type = shutdown_type;
>
> - ipr_reset_ioa_job(ipr_cmd);
> + INIT_WORK(&ipr_cmd->work, ipr_reset_quiesce_work);
> + queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
> }
>
> /**
> @@ -9346,7 +9390,6 @@ static void ipr_pci_perm_failure(struct
> for (i = 0; i < ioa_cfg->hrrq_num; i++) {
> spin_lock(&ioa_cfg->hrrq[i]._lock);
> ioa_cfg->hrrq[i].allow_cmds = 0;
> - ipr_hrrq_deactivate(&ioa_cfg->hrrq[i]);
> spin_unlock(&ioa_cfg->hrrq[i]._lock);
> }
> wmb();
> @@ -9403,6 +9446,7 @@ static int ipr_probe_ioa_part2(struct ip
> spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
> dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
> ioa_cfg->probe_done = 1;
> + ioa_cfg->allow_cmds = 1;
> if (ioa_cfg->needs_hard_reset) {
> ioa_cfg->needs_hard_reset = 0;
> ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
> @@ -9540,6 +9584,8 @@ static void ipr_free_all_resources(struc
> iounmap(ioa_cfg->hdw_dma_regs);
> pci_release_regions(pdev);
> ipr_free_mem(ioa_cfg);
> + percpu_ref_exit(&ioa_cfg->ioa_usage_counter->ref);
> + kfree(ioa_cfg->ioa_usage_counter);
> scsi_host_put(ioa_cfg->host);
> pci_disable_device(pdev);
> LEAVE;
> @@ -10146,11 +10192,21 @@ static int ipr_probe_ioa(struct pci_dev
> goto out_scsi_host_put;
> }
>
> + ioa_cfg->ioa_usage_counter =
> kzalloc(sizeof(*ioa_cfg->ioa_usage_counter),
> + GFP_KERNEL);
> +
> + if (!ioa_cfg->ioa_usage_counter) {
> + dev_err(&pdev->dev, "Failed to allocate ioa_usage_counter\n");
> + rc = -ENOMEM;
> + goto out_scsi_host_put;
> + }
> +
> /* set SIS 32 or SIS 64 */
> ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
> ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
> ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
> ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
> + ioa_cfg->ioa_usage_counter->ioa_cfg = ioa_cfg;
>
> if (ipr_transop_timeout)
> ioa_cfg->transop_timeout = ipr_transop_timeout;
> @@ -10163,13 +10219,24 @@ static int ipr_probe_ioa(struct pci_dev
>
> ipr_init_ioa_cfg(ioa_cfg, host, pdev);
>
> + rc = percpu_ref_init(&ioa_cfg->ioa_usage_counter->ref,
> + ipr_ioa_usage_counter_release,
> + 0, GFP_KERNEL);
> +
> + if (rc) {
> + dev_err(&pdev->dev,
> + "Couldn't initialize percpu counter\n");
> + kfree(ioa_cfg->ioa_usage_counter);
> + goto out_scsi_host_put;
> + }
> +
> ipr_regs_pci = pci_resource_start(pdev, 0);
>
> rc = pci_request_regions(pdev, IPR_NAME);
> if (rc < 0) {
> dev_err(&pdev->dev,
> "Couldn't register memory range of registers\n");
> - goto out_scsi_host_put;
> + goto out_percpu_ref;
> }
>
> rc = pci_enable_device(pdev);
> @@ -10373,18 +10440,18 @@ static int ipr_probe_ioa(struct pci_dev
> (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E &&
> !ioa_cfg->revid)) {
> ioa_cfg->needs_warm_reset = 1;
> ioa_cfg->reset = ipr_reset_slot_reset;
> -
> - ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
> - WQ_MEM_RECLAIM,
> host->host_no);
> -
> - if (!ioa_cfg->reset_work_q) {
> - dev_err(&pdev->dev, "Couldn't register reset
> workqueue\n");
> - rc = -ENOMEM;
> - goto out_free_sata_work_q;
> - }
> } else
> ioa_cfg->reset = ipr_reset_start_bist;
>
> + ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
> + WQ_MEM_RECLAIM,
> host->host_no);
> +
> + if (!ioa_cfg->reset_work_q) {
> + dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
> + rc = -ENOMEM;
> + goto out_free_sata_work_q;
> + }
> +
> spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
> list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
> spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
> @@ -10411,6 +10478,8 @@ out_disable:
> pci_disable_device(pdev);
> out_release_regions:
> pci_release_regions(pdev);
> +out_percpu_ref:
> + percpu_ref_exit(&ioa_cfg->ioa_usage_counter->ref);
> out_scsi_host_put:
> scsi_host_put(host);
> goto out;
> _
>
>
> ------------------------------------------------------------------------------
> _______________________________________________
> Iprdd-devel mailing list
> [email protected]
> https://lists.sourceforge.net/lists/listinfo/iprdd-devel
>
--
Gabriel Krisman Bertazi
------------------------------------------------------------------------------
_______________________________________________
Iprdd-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/iprdd-devel