Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=35ff032e65ab5cc03bbba46cefece7376c7c562f
Commit:     35ff032e65ab5cc03bbba46cefece7376c7c562f
Parent:     b9099ff63c75216d6ca10bce5a1abcd9293c27e6
Author:     Ralph Campbell <[EMAIL PROTECTED]>
AuthorDate: Fri Apr 27 11:11:11 2007 -0700
Committer:  Roland Dreier <[EMAIL PROTECTED]>
CommitDate: Mon Apr 30 17:30:27 2007 -0700

    IB/ipath: Don't call spin_lock_irq() from interrupt context
    
    This patch fixes the problem reported by Bernd Schubert <[EMAIL PROTECTED]>
    with kernel debug options enabled:
    
        BUG: at kernel/lockdep.c:1860 trace_hardirqs_on()
    
    This was caused by using spin_lock_irq()/spin_unlock_irq() from
    interrupt context.  Fix all the places that might be called from
    interrupts to use spin_lock_irqsave()/spin_unlock_irqrestore().
    
    Signed-off-by: Ralph Campbell <[EMAIL PROTECTED]>
    Signed-off-by: Roland Dreier <[EMAIL PROTECTED]>
---
 drivers/infiniband/hw/ipath/ipath_rc.c |   18 +++++++++++-------
 1 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c 
b/drivers/infiniband/hw/ipath/ipath_rc.c
index b4b88d0..e3e5332 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -587,6 +587,7 @@ static void send_rc_ack(struct ipath_qp *qp)
        u32 hwords;
        struct ipath_ib_header hdr;
        struct ipath_other_headers *ohdr;
+       unsigned long flags;
 
        /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
        if (qp->r_head_ack_queue != qp->s_tail_ack_queue)
@@ -640,11 +641,11 @@ static void send_rc_ack(struct ipath_qp *qp)
        dev->n_rc_qacks++;
 
 queue_ack:
-       spin_lock_irq(&qp->s_lock);
+       spin_lock_irqsave(&qp->s_lock, flags);
        qp->s_flags |= IPATH_S_ACK_PENDING;
        qp->s_nak_state = qp->r_nak_state;
        qp->s_ack_psn = qp->r_ack_psn;
-       spin_unlock_irq(&qp->s_lock);
+       spin_unlock_irqrestore(&qp->s_lock, flags);
 
        /* Call ipath_do_rc_send() in another thread. */
        tasklet_hi_schedule(&qp->s_task);
@@ -1294,6 +1295,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev 
*dev,
        struct ipath_ack_entry *e;
        u8 i, prev;
        int old_req;
+       unsigned long flags;
 
        if (diff > 0) {
                /*
@@ -1327,7 +1329,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev 
*dev,
        psn &= IPATH_PSN_MASK;
        e = NULL;
        old_req = 1;
-       spin_lock_irq(&qp->s_lock);
+       spin_lock_irqsave(&qp->s_lock, flags);
        for (i = qp->r_head_ack_queue; ; i = prev) {
                if (i == qp->s_tail_ack_queue)
                        old_req = 0;
@@ -1425,7 +1427,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev 
*dev,
                 * after all the previous RDMA reads and atomics.
                 */
                if (i == qp->r_head_ack_queue) {
-                       spin_unlock_irq(&qp->s_lock);
+                       spin_unlock_irqrestore(&qp->s_lock, flags);
                        qp->r_nak_state = 0;
                        qp->r_ack_psn = qp->r_psn - 1;
                        goto send_ack;
@@ -1443,7 +1445,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev 
*dev,
        tasklet_hi_schedule(&qp->s_task);
 
 unlock_done:
-       spin_unlock_irq(&qp->s_lock);
+       spin_unlock_irqrestore(&qp->s_lock, flags);
 done:
        return 1;
 
@@ -1453,10 +1455,12 @@ send_ack:
 
 static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
 {
-       spin_lock_irq(&qp->s_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->s_lock, flags);
        qp->state = IB_QPS_ERR;
        ipath_error_qp(qp, err);
-       spin_unlock_irq(&qp->s_lock);
+       spin_unlock_irqrestore(&qp->s_lock, flags);
 }
 
 /**
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to