[openib-general] [PATCH] CM: fix locking order sending a REQ

Sean Hefty mshefty at ichips.intel.com
Thu Jan 20 13:10:44 PST 2005


Patch for re-order locking semantics when sending a REQ.

signed-off-by: Sean Hefty <sean.hefty at intel.com>

Index: core/cm.c
===================================================================
--- core/cm.c	(revision 1605)
+++ core/cm.c	(working copy)
@@ -703,17 +703,25 @@
 		   struct ib_cm_req_param *param)
 {
 	struct cm_id_private *cm_id_priv;
-	struct cm_msg *msg;
 	struct ib_send_wr *bad_send_wr;
 	struct ib_sa_path_rec *path;
-	unsigned long flags, flags2;
+	unsigned long flags;
 	int ret;
 
 	ret = cm_validate_req_param(param);
 	if (ret)
-		goto out;
+		return ret;
 
+	/* Verify that we're not in timewait. */
 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	if (cm_id->state != IB_CM_IDLE) {
+		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		ret = -EINVAL;
+		goto out;
+	}
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
 	cm_id_priv->port = cm_find_port(param->qp->device,
 					&param->primary_path->sgid);
 	if (!cm_id_priv->port) {
@@ -737,35 +745,41 @@
 	path = param->primary_path;
 	cm_set_ah_attr(&cm_id_priv->ah_attr, cm_id_priv->port->port_num,
 		       path->dlid, path->sl, path->slid & 0x7F);
-	ret = cm_alloc_msg(cm_id_priv, &msg);
+	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
 	if (ret)
 		goto out;
 
-	cm_format_req((struct cm_req_msg *)&msg->mad, cm_id_priv, param);
-	msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
+	cm_format_req((struct cm_req_msg *)&cm_id_priv->msg->mad,
+		      cm_id_priv, param);
+	cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
+	cm_id_priv->msg->sent_state = IB_CM_REQ_SENT;
+
+	/*
+	 * Received REQs won't match until we're in REQ_SENT state.  This
+	 * simplifies error recovery if the send fails.
+	 */
+	if (param->peer_to_peer) {
+		spin_lock_irqsave(&cm.lock, flags);
+		cm_insert_service(cm_id_priv);
+		spin_unlock_irqrestore(&cm.lock, flags);
+	}
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
-	if (cm_id->state == IB_CM_IDLE)
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
-				       &msg->send_wr, &bad_send_wr);
-	else
-		ret = -EINVAL;
+	ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+				&cm_id_priv->msg->send_wr, &bad_send_wr);
 
 	if (ret) {
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		if (param->peer_to_peer) {
+			spin_lock_irqsave(&cm.lock, flags);
+			rb_erase(&cm_id_priv->service_node, &cm.service_table);
+			spin_unlock_irqrestore(&cm.lock, flags);
+		}
 		cm_free_msg(cm_id_priv->msg);
 		goto out;
 	}
+	BUG_ON(cm_id->state != IB_CM_IDLE);
 	cm_id->state = IB_CM_REQ_SENT;
-	msg->sent_state = IB_CM_REQ_SENT;
-	cm_id_priv->msg = msg;
-
-	if (param->peer_to_peer) {
-		/* todo: locking is out of order - fix me */
-		spin_lock_irqsave(&cm.lock, flags2);
-		cm_insert_service(cm_id_priv);
-		spin_unlock_irqrestore(&cm.lock, flags2);
-	}
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 out:
 	return ret;



More information about the general mailing list