[openib-general] [PATCH] cma: protect against adding device during destruction

Sean Hefty sean.hefty at intel.com
Thu Aug 31 10:03:33 PDT 2006


Can you see if this patch helps any?

This closes a window where address resolution can attach an rdma_cm_id
to a device during destruction of the rdma_cm_id.  This can result in
the rdma_cm_id remaining in the device list after its memory has been
freed.

Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
Index: cma.c
===================================================================
--- cma.c	(revision 9192)
+++ cma.c	(working copy)
@@ -283,7 +284,6 @@ static int cma_acquire_ib_dev(struct rdm
 
 	ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
 
-	mutex_lock(&lock);
 	list_for_each_entry(cma_dev, &dev_list, list) {
 		ret = ib_find_cached_gid(cma_dev->device, &gid,
 					 &id_priv->id.port_num, NULL);
@@ -292,7 +292,6 @@ static int cma_acquire_ib_dev(struct rdm
 			break;
 		}
 	}
-	mutex_unlock(&lock);
 	return ret;
 }
 
@@ -781,7 +780,9 @@ void rdma_destroy_id(struct rdma_cm_id *
 	state = cma_exch(id_priv, CMA_DESTROYING);
 	cma_cancel_operation(id_priv, state);
 
+	mutex_lock(&lock);
 	if (id_priv->cma_dev) {
+		mutex_unlock(&lock);
 		switch (rdma_node_get_transport(id->device->node_type)) {
 		case RDMA_TRANSPORT_IB:
 	 		if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
@@ -793,8 +794,8 @@ void rdma_destroy_id(struct rdma_cm_id *
 		cma_leave_mc_groups(id_priv);
 	  	mutex_lock(&lock);
 		cma_detach_from_dev(id_priv);
-		mutex_unlock(&lock);
 	}
+	mutex_unlock(&lock);
 
 	cma_release_port(id_priv);
 	cma_deref_id(id_priv);
@@ -1511,16 +1512,26 @@ static void addr_handler(int status, str
 	enum rdma_cm_event_type event;
 
 	atomic_inc(&id_priv->dev_remove);
-	if (!id_priv->cma_dev && !status)
+
+	/*
+	 * Grab mutex to block rdma_destroy_id() from removing the device while
+	 * we're trying to acquire it.
+	 */
+	mutex_lock(&lock);
+	if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
+		mutex_unlock(&lock);
+		goto out;
+	}
+
+	if (!status && !id_priv->cma_dev)
 		status = cma_acquire_dev(id_priv);
+	mutex_unlock(&lock);
 
 	if (status) {
-		if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND))
+		if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
 			goto out;
 		event = RDMA_CM_EVENT_ADDR_ERROR;
 	} else {
-		if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
-			goto out;
 		memcpy(&id_priv->id.route.addr.src_addr, src_addr,
 		       ip_addr_size(src_addr));
 		event = RDMA_CM_EVENT_ADDR_RESOLVED;
@@ -1747,8 +1758,11 @@ int rdma_bind_addr(struct rdma_cm_id *id
 
 	if (!cma_any_addr(addr)) {
 		ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
-		if (!ret)
+		if (!ret) {
+			mutex_lock(&lock);
 			ret = cma_acquire_dev(id_priv);
+			mutex_unlock(&lock);
+		}
 		if (ret)
 			goto err;
 	}





More information about the general mailing list