It does seem we can simplify mthca_cq in a slightly different way.
mthca_cq_clean() doesn't need to take a CQ reference, because we know
the CQ can't go away before all associated QPs are gone, and at least
one QP will stay around until mthca_cq_clean() returns.

So the below patch is both a fix and a decent cleanup:

--- infiniband/hw/mthca/mthca_provider.h        (revision 6945)
+++ infiniband/hw/mthca/mthca_provider.h        (working copy)
@@ -197,7 +197,7 @@ struct mthca_cq_resize {
 struct mthca_cq {
        struct ib_cq            ibcq;
        spinlock_t              lock;
-       atomic_t                refcount;
+       int                     refcount;
        int                     cqn;
        u32                     cons_index;
        struct mthca_cq_buf     buf;
--- infiniband/hw/mthca/mthca_dev.h     (revision 6945)
+++ infiniband/hw/mthca/mthca_dev.h     (working copy)
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev
 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
 void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
                    enum ib_event_type event_type);
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
                    struct mthca_srq *srq);
 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int 
nent);
--- infiniband/hw/mthca/mthca_cq.c      (revision 6945)
+++ infiniband/hw/mthca/mthca_cq.c      (working copy)
@@ -234,14 +234,19 @@ void mthca_cq_event(struct mthca_dev *de
 {
        struct mthca_cq *cq;
        struct ib_event event;
+       unsigned long flags;
 
-       spin_lock(&dev->cq_table.lock);
+       spin_lock_irqsave(&dev->cq_table.lock, flags);
 
        cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 
1));
 
-       if (cq)
-               atomic_inc(&cq->refcount);
-       spin_unlock(&dev->cq_table.lock);
+       if (cq) {
+               spin_lock(&cq->lock);
+               ++cq->refcount;
+               spin_unlock(&cq->lock);
+       }
+
+       spin_unlock_irqrestore(&dev->cq_table.lock, flags);
 
        if (!cq) {
                mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
@@ -254,8 +259,10 @@ void mthca_cq_event(struct mthca_dev *de
        if (cq->ibcq.event_handler)
                cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
 
-       if (atomic_dec_and_test(&cq->refcount))
+       spin_lock_irqsave(&cq->lock, flags);
+       if (!--cq->refcount)
                wake_up(&cq->wait);
+       spin_unlock_irqrestore(&cq->lock, flags);
 }
 
 static inline int is_recv_cqe(struct mthca_cqe *cqe)
@@ -267,23 +274,13 @@ static inline int is_recv_cqe(struct mth
                return !(cqe->is_send & 0x80);
 }
 
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
                    struct mthca_srq *srq)
 {
-       struct mthca_cq *cq;
        struct mthca_cqe *cqe;
        u32 prod_index;
        int nfreed = 0;
 
-       spin_lock_irq(&dev->cq_table.lock);
-       cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 
1));
-       if (cq)
-               atomic_inc(&cq->refcount);
-       spin_unlock_irq(&dev->cq_table.lock);
-
-       if (!cq)
-               return;
-
        spin_lock_irq(&cq->lock);
 
        /*
@@ -301,7 +298,7 @@ void mthca_cq_clean(struct mthca_dev *de
 
        if (0)
                mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi 
%d\n",
-                         qpn, cqn, cq->cons_index, prod_index);
+                         qpn, cq->cqn, cq->cons_index, prod_index);
 
        /*
         * Now sweep backwards through the CQ, removing CQ entries
@@ -323,10 +320,6 @@ void mthca_cq_clean(struct mthca_dev *de
                cq->cons_index += nfreed;
                update_cons_index(dev, cq, nfreed);
        }
-
-       spin_unlock_irq(&cq->lock);
-       if (atomic_dec_and_test(&cq->refcount))
-               wake_up(&cq->wait);
 }
 
 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
@@ -821,7 +814,7 @@ int mthca_init_cq(struct mthca_dev *dev,
        }
 
        spin_lock_init(&cq->lock);
-       atomic_set(&cq->refcount, 1);
+       cq->refcount = 1;
        init_waitqueue_head(&cq->wait);
 
        memset(cq_context, 0, sizeof *cq_context);
@@ -896,6 +889,17 @@ err_out:
        return err;
 }
 
+static int get_cq_refcount(struct mthca_cq *cq)
+{
+       int c;
+
+       spin_lock_irq(&cq->lock);
+       c = cq->refcount;
+       spin_unlock_irq(&cq->lock);
+
+       return c;
+}
+
 void mthca_free_cq(struct mthca_dev *dev,
                   struct mthca_cq *cq)
 {
@@ -936,8 +940,11 @@ void mthca_free_cq(struct mthca_dev *dev
        else
                synchronize_irq(dev->pdev->irq);
 
-       atomic_dec(&cq->refcount);
-       wait_event(cq->wait, !atomic_read(&cq->refcount));
+       spin_lock_irq(&cq->lock);
+       --cq->refcount;
+       spin_unlock_irq(&cq->lock);
+
+       wait_event(cq->wait, !get_cq_refcount(cq));
 
        if (cq->is_kernel) {
                mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
--- infiniband/hw/mthca/mthca_qp.c      (revision 6945)
+++ infiniband/hw/mthca/mthca_qp.c      (working copy)
@@ -831,10 +831,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, 
         * entries and reinitialize the QP.
         */
        if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
-               mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+               mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
                               qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
                if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
-                       mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, 
qp->qpn,
+                       mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
                                       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : 
NULL);
 
                mthca_wq_init(&qp->sq);
@@ -1356,10 +1356,10 @@ void mthca_free_qp(struct mthca_dev *dev
         * unref the mem-free tables and free the QPN in our table.
         */
        if (!qp->ibqp.uobject) {
-               mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+               mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
                               qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
                if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
-                       mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, 
qp->qpn,
+                       mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
                                       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : 
NULL);
 
                mthca_free_memfree(dev, qp);
_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to