[ofa-general] Re: [PATCHv3 for-2.6.21] IB/mthca: fix race in QP destroy
Roland Dreier
rdreier at cisco.com
Tue Mar 6 20:03:46 PST 2007
What do you think of something like this, plus merging the async event
and command interface EQs?
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index efd79ef..6e247e2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -279,15 +279,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe)
return !(cqe->is_send & 0x80);
}
-void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
- struct mthca_srq *srq)
+void __mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
+ struct mthca_srq *srq)
{
struct mthca_cqe *cqe;
u32 prod_index;
int nfreed = 0;
- spin_lock_irq(&cq->lock);
-
/*
* First we need to find the current producer index, so we
* know where to start cleaning from. It doesn't matter if HW
@@ -325,8 +323,14 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
cq->cons_index += nfreed;
update_cons_index(dev, cq, nfreed);
}
+}
- spin_unlock_irq(&cq->lock);
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
+ struct mthca_srq *srq)
+{
+ spin_lock_irq(cq->lock);
+ __mthca_cq_clean(dev, cq, qpn, srq);
+ spin_unlock_irq(cq->lock);
}
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index b7e42ef..a786f56 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -505,6 +505,8 @@ void mthca_free_cq(struct mthca_dev *dev,
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
enum ib_event_type event_type);
+void __mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
+ struct mthca_srq *srq);
void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
struct mthca_srq *srq);
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 1c6b63a..560b99a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1390,6 +1390,10 @@ void mthca_free_qp(struct mthca_dev *dev,
struct mthca_cq *send_cq;
struct mthca_cq *recv_cq;
+ if (qp->state != IB_QPS_RESET)
+ mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
+ NULL, 0, &status);
+
send_cq = to_mcq(qp->ibqp.send_cq);
recv_cq = to_mcq(qp->ibqp.recv_cq);
@@ -1403,28 +1407,27 @@ void mthca_free_qp(struct mthca_dev *dev,
mthca_array_clear(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1));
--qp->refcount;
+
+ if (!qp->ibqp.uobject) {
+ __mthca_cq_clean(dev, send_cq, qp->qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+ if (send_cq != recv_cq)
+ __mthca_cq_clean(dev, recv_cq, qp->qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+ }
+
spin_unlock(&dev->qp_table.lock);
mthca_unlock_cqs(send_cq, recv_cq);
wait_event(qp->wait, !get_qp_refcount(dev, qp));
- if (qp->state != IB_QPS_RESET)
- mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
- NULL, 0, &status);
-
/*
* If this is a userspace QP, the buffers, MR, CQs and so on
* will be cleaned up in userspace, so all we have to do is
* unref the mem-free tables and free the QPN in our table.
*/
if (!qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
- qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
- if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
- qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
-
mthca_free_memfree(dev, qp);
mthca_free_wqe_buf(dev, qp);
}
More information about the general
mailing list