diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2007-04-27 11:11:11 -0700 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-04-30 17:30:27 -0700 |
commit | 35ff032e65ab5cc03bbba46cefece7376c7c562f (patch) | |
tree | 76bfb852b0ccf0b095b39b4a4226da087d51dbdf /drivers/infiniband/hw/ipath/ipath_rc.c | |
parent | b9099ff63c75216d6ca10bce5a1abcd9293c27e6 (diff) | |
download | op-kernel-dev-35ff032e65ab5cc03bbba46cefece7376c7c562f.zip op-kernel-dev-35ff032e65ab5cc03bbba46cefece7376c7c562f.tar.gz |
IB/ipath: Don't call spin_lock_irq() from interrupt context
This patch fixes the problem reported by Bernd Schubert <bs@q-leap.de>
with kernel debug options enabled:
BUG: at kernel/lockdep.c:1860 trace_hardirqs_on()
This was caused by using spin_lock_irq()/spin_unlock_irq() from
interrupt context. Fix all the places that might be called from
interrupts to use spin_lock_irqsave()/spin_unlock_irqrestore().
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_rc.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_rc.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index b4b88d0..e3e5332 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -587,6 +587,7 @@ static void send_rc_ack(struct ipath_qp *qp) u32 hwords; struct ipath_ib_header hdr; struct ipath_other_headers *ohdr; + unsigned long flags; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ if (qp->r_head_ack_queue != qp->s_tail_ack_queue) @@ -640,11 +641,11 @@ static void send_rc_ack(struct ipath_qp *qp) dev->n_rc_qacks++; queue_ack: - spin_lock_irq(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); qp->s_flags |= IPATH_S_ACK_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); /* Call ipath_do_rc_send() in another thread. */ tasklet_hi_schedule(&qp->s_task); @@ -1294,6 +1295,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, struct ipath_ack_entry *e; u8 i, prev; int old_req; + unsigned long flags; if (diff > 0) { /* @@ -1327,7 +1329,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, psn &= IPATH_PSN_MASK; e = NULL; old_req = 1; - spin_lock_irq(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); for (i = qp->r_head_ack_queue; ; i = prev) { if (i == qp->s_tail_ack_queue) old_req = 0; @@ -1425,7 +1427,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, * after all the previous RDMA reads and atomics. */ if (i == qp->r_head_ack_queue) { - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->r_psn - 1; goto send_ack; @@ -1443,7 +1445,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, tasklet_hi_schedule(&qp->s_task); unlock_done: - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); done: return 1; @@ -1453,10 +1455,12 @@ send_ack: static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) { - spin_lock_irq(&qp->s_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); qp->state = IB_QPS_ERR; ipath_error_qp(qp, err); - spin_unlock_irq(&qp->s_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); } /** |