[openib-general] Re: [PATCH] cm refcount race fix

Roland Dreier rdreier at cisco.com
Mon May 8 15:01:56 PDT 2006


    Michael> As a solution, we can decide that cq recount is protected
    Michael> by cq_table.lock.  what do you say.

Yes, that makes sense.  Like this (locking comments also fixed).

I'll do the QP and SRQ stuff now, it's so easy...

--- infiniband/hw/mthca/mthca_provider.h	(revision 6999)
+++ infiniband/hw/mthca/mthca_provider.h	(working copy)
@@ -139,11 +139,12 @@ struct mthca_ah {
  * a qp may be locked, with the send cq locked first.  No other
  * nesting should be done.
  *
- * Each struct mthca_cq/qp also has an atomic_t ref count.  The
- * pointer from the cq/qp_table to the struct counts as one reference.
- * This reference also is good for access through the consumer API, so
- * modifying the CQ/QP etc doesn't need to take another reference.
- * Access because of a completion being polled does need a reference.
+ * Each struct mthca_cq/qp also has an ref count, protected by the
+ * corresponding table lock.  The pointer from the cq/qp_table to the
+ * struct counts as one reference.  This reference also is good for
+ * access through the consumer API, so modifying the CQ/QP etc doesn't
+ * need to take another reference.  Access because of a completion
+ * being polled does need a reference.
  *
  * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
  * destroy function to sleep on.
@@ -159,8 +160,9 @@ struct mthca_ah {
  * - decrement ref count; if zero, wake up waiters
  *
  * To destroy a CQ/QP, we can do the following:
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
- * - decrement ref count
+ * - lock cq/qp_table
+ * - remove pointer and decrement ref count
+ * - unlock cq/qp_table lock
  * - wait_event until ref count is zero
  *
  * It is the consumer's responsibilty to make sure that no QP
@@ -197,7 +199,7 @@ struct mthca_cq_resize {
 struct mthca_cq {
 	struct ib_cq		ibcq;
 	spinlock_t		lock;
-	atomic_t		refcount;
+	int			refcount;
 	int			cqn;
 	u32			cons_index;
 	struct mthca_cq_buf	buf;
--- infiniband/hw/mthca/mthca_dev.h	(revision 6999)
+++ infiniband/hw/mthca/mthca_dev.h	(working copy)
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev
 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
 void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
 		    enum ib_event_type event_type);
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
 		    struct mthca_srq *srq);
 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
--- infiniband/hw/mthca/mthca_cq.c	(revision 6999)
+++ infiniband/hw/mthca/mthca_cq.c	(working copy)
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *de
 	spin_lock(&dev->cq_table.lock);
 
 	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
-
 	if (cq)
-		atomic_inc(&cq->refcount);
+		++cq->refcount;
+
 	spin_unlock(&dev->cq_table.lock);
 
 	if (!cq) {
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *de
 	if (cq->ibcq.event_handler)
 		cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
 
-	if (atomic_dec_and_test(&cq->refcount))
+	spin_lock(&dev->cq_table.lock);
+	if (!--cq->refcount)
 		wake_up(&cq->wait);
+	spin_unlock(&dev->cq_table.lock);
 }
 
 static inline int is_recv_cqe(struct mthca_cqe *cqe)
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mth
 		return !(cqe->is_send & 0x80);
 }
 
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
 		    struct mthca_srq *srq)
 {
-	struct mthca_cq *cq;
 	struct mthca_cqe *cqe;
 	u32 prod_index;
 	int nfreed = 0;
 
-	spin_lock_irq(&dev->cq_table.lock);
-	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
-	if (cq)
-		atomic_inc(&cq->refcount);
-	spin_unlock_irq(&dev->cq_table.lock);
-
-	if (!cq)
-		return;
-
 	spin_lock_irq(&cq->lock);
 
 	/*
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *de
 
 	if (0)
 		mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
-			  qpn, cqn, cq->cons_index, prod_index);
+			  qpn, cq->cqn, cq->cons_index, prod_index);
 
 	/*
 	 * Now sweep backwards through the CQ, removing CQ entries
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *de
 	}
 
 	spin_unlock_irq(&cq->lock);
-	if (atomic_dec_and_test(&cq->refcount))
-		wake_up(&cq->wait);
 }
 
 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev,
 	}
 
 	spin_lock_init(&cq->lock);
-	atomic_set(&cq->refcount, 1);
+	cq->refcount = 1;
 	init_waitqueue_head(&cq->wait);
 
 	memset(cq_context, 0, sizeof *cq_context);
@@ -896,6 +886,17 @@ err_out:
 	return err;
 }
 
+static int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
+{
+	int c;
+
+	spin_lock_irq(&dev->cq_table.lock);
+	c = cq->refcount;
+	spin_unlock_irq(&dev->cq_table.lock);
+
+	return c;
+}
+
 void mthca_free_cq(struct mthca_dev *dev,
 		   struct mthca_cq *cq)
 {
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev
 	spin_lock_irq(&dev->cq_table.lock);
 	mthca_array_clear(&dev->cq_table.cq,
 			  cq->cqn & (dev->limits.num_cqs - 1));
+	--cq->refcount;
 	spin_unlock_irq(&dev->cq_table.lock);
 
 	if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev
 	else
 		synchronize_irq(dev->pdev->irq);
 
-	atomic_dec(&cq->refcount);
-	wait_event(cq->wait, !atomic_read(&cq->refcount));
+	wait_event(cq->wait, !get_cq_refcount(dev, cq));
 
 	if (cq->is_kernel) {
 		mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
--- infiniband/hw/mthca/mthca_qp.c	(revision 6999)
+++ infiniband/hw/mthca/mthca_qp.c	(working copy)
@@ -831,10 +831,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, 
 	 * entries and reinitialize the QP.
 	 */
 	if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
-		mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+		mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
 			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
 		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
-			mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+			mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
 				       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
 
 		mthca_wq_init(&qp->sq);
@@ -1356,10 +1356,10 @@ void mthca_free_qp(struct mthca_dev *dev
 	 * unref the mem-free tables and free the QPN in our table.
 	 */
 	if (!qp->ibqp.uobject) {
-		mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+		mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
 			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
 		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
-			mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+			mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
 				       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
 
 		mthca_free_memfree(dev, qp);



More information about the general mailing list