[ofa-general] [PATCH] IB/ipath - Don't call spin_lock_irq() from interrupt context

Ralph Campbell ralph.campbell at qlogic.com
Fri Apr 27 11:11:11 PDT 2007


This patch fixes the problem reported by Bernd Schubert <bs at q-leap.de>
with kernel debug options enabled.
BUG: at kernel/lockdep.c:1860 trace_hardirqs_on()

Hopefully, this can be included in OFED 1.2 as well as
going upstream.

Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>

diff -r 97262e873c51 drivers/infiniband/hw/ipath/ipath_rc.c
--- a/drivers/infiniband/hw/ipath/ipath_rc.c	Fri Apr 20 14:39:31 2007 -0700
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c	Fri Apr 20 14:45:03 2007 -0700
@@ -582,6 +582,7 @@ static void send_rc_ack(struct ipath_qp 
 	u32 hwords;
 	struct ipath_ib_header hdr;
 	struct ipath_other_headers *ohdr;
+	unsigned long flags;
 
 	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
 	if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
@@ -637,11 +638,11 @@ static void send_rc_ack(struct ipath_qp 
 	dev->n_rc_qacks++;
 
 queue_ack:
-	spin_lock_irq(&qp->s_lock);
+	spin_lock_irqsave(&qp->s_lock, flags);
 	qp->s_flags |= IPATH_S_ACK_PENDING;
 	qp->s_nak_state = qp->r_nak_state;
 	qp->s_ack_psn = qp->r_ack_psn;
-	spin_unlock_irq(&qp->s_lock);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
 
 	/* Call ipath_do_rc_send() in another thread. */
 	tasklet_hi_schedule(&qp->s_task);
@@ -1292,6 +1293,7 @@ static inline int ipath_rc_rcv_error(str
 	struct ipath_ack_entry *e;
 	u8 i, prev;
 	int old_req;
+	unsigned long flags;
 
 	if (diff > 0) {
 		/*
@@ -1325,7 +1327,7 @@ static inline int ipath_rc_rcv_error(str
 	psn &= IPATH_PSN_MASK;
 	e = NULL;
 	old_req = 1;
-	spin_lock_irq(&qp->s_lock);
+	spin_lock_irqsave(&qp->s_lock, flags);
 	for (i = qp->r_head_ack_queue; ; i = prev) {
 		if (i == qp->s_tail_ack_queue)
 			old_req = 0;
@@ -1423,7 +1425,7 @@ static inline int ipath_rc_rcv_error(str
 		 * after all the previous RDMA reads and atomics.
 		 */
 		if (i == qp->r_head_ack_queue) {
-			spin_unlock_irq(&qp->s_lock);
+			spin_unlock_irqrestore(&qp->s_lock, flags);
 			qp->r_nak_state = 0;
 			qp->r_ack_psn = qp->r_psn - 1;
 			goto send_ack;
@@ -1440,7 +1442,7 @@ static inline int ipath_rc_rcv_error(str
 	tasklet_hi_schedule(&qp->s_task);
 
 unlock_done:
-	spin_unlock_irq(&qp->s_lock);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
 done:
 	return 1;
 
@@ -1450,10 +1452,12 @@ send_ack:
 
 static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
 {
-	spin_lock_irq(&qp->s_lock);
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->s_lock, flags);
 	qp->state = IB_QPS_ERR;
 	ipath_error_qp(qp, err);
-	spin_unlock_irq(&qp->s_lock);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
 }
 
 /**





More information about the general mailing list