[openib-general] [PATCH 4/4] SA path record caching

Sean Hefty sean.hefty at intel.com
Wed Jan 25 11:55:59 PST 2006


Modify the CMA to use the local SA database for path record lookups.

Signed-off-by: Sean Hefty <sean.hefty at intel.com>
Index: core/cma.c
===================================================================
--- core/cma.c	(revision 5115)
+++ core/cma.c	(working copy)
@@ -34,7 +34,7 @@
 #include <rdma/rdma_cm.h>
 #include <rdma/ib_cache.h>
 #include <rdma/ib_cm.h>
-#include <rdma/ib_sa.h>
+#include <rdma/ib_local_sa.h>
 
 MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("Generic RDMA CM Agent");
@@ -115,6 +115,9 @@ struct rdma_id_private {
 struct cma_work {
 	struct work_struct	work;
 	struct rdma_id_private	*id;
+	enum cma_state		old_state;
+	enum cma_state		new_state;
+	struct rdma_cm_event	event;
 };
 
 union cma_ip_addr {
@@ -548,17 +551,6 @@ static void cma_cancel_addr(struct rdma_
 	}
 }
 
-static void cma_cancel_route(struct rdma_id_private *id_priv)
-{
-	switch (id_priv->id.device->node_type) {
-	case IB_NODE_CA:
-		ib_sa_cancel_query(id_priv->query_id, id_priv->query);
-		break;
-	default:
-		break;
-	}
-}
-
 static inline int cma_internal_listen(struct rdma_id_private *id_priv)
 {
 	return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev &&
@@ -610,9 +602,6 @@ static void cma_cancel_operation(struct 
 	case CMA_ADDR_QUERY:
 		cma_cancel_addr(id_priv);
 		break;
-	case CMA_ROUTE_QUERY:
-		cma_cancel_route(id_priv);
-		break;
 	case CMA_LISTEN:
 		if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
 		    !id_priv->cma_dev)
@@ -1019,65 +1008,65 @@ err:
 };
 EXPORT_SYMBOL(rdma_listen);
 
-static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
-			      void *context)
+static void cma_work_handler(void *data)
 {
-	struct rdma_id_private *id_priv = context;
-	struct rdma_route *route = &id_priv->id.route;
-	enum rdma_cm_event_type event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+	struct cma_work *work = data;
+	struct rdma_id_private *id_priv = work->id;
+	int destroy = 0;
 
 	atomic_inc(&id_priv->dev_remove);
-	if (!status) {
-		route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
-		if (route->path_rec) {
-			route->num_paths = 1;
-			*route->path_rec = *path_rec;
-			if (!cma_comp_exch(id_priv, CMA_ROUTE_QUERY,
-						    CMA_ROUTE_RESOLVED)) {
-				kfree(route->path_rec);
-				goto out;
-			}
-		} else
-			status = -ENOMEM;
-	}
-
-	if (status) {
-		if (!cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED))
-			goto out;
-		event = RDMA_CM_EVENT_ROUTE_ERROR;
-	}
+	if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+		goto out;
 
-	if (cma_notify_user(id_priv, event, status, NULL, 0)) {
+	if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
 		cma_exch(id_priv, CMA_DESTROYING);
-		cma_release_remove(id_priv);
-		cma_deref_id(id_priv);
-		rdma_destroy_id(&id_priv->id);
-		return;
+		destroy = 1;
 	}
 out:
 	cma_release_remove(id_priv);
 	cma_deref_id(id_priv);
+	if (destroy)
+		rdma_destroy_id(&id_priv->id);
+	kfree(work);
 }
 
 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
 {
+	struct rdma_route *route = &id_priv->id.route;
 	struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr;
-	struct ib_sa_path_rec path_rec;
+	struct cma_work *work;
+	int ret;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (!work)
+		return -ENOMEM;
+
+	route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
+	if (!route->path_rec) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	ret = ib_get_path_rec(id_priv->id.device, id_priv->id.port_num,
+			      ib_addr_get_sgid(addr), ib_addr_get_dgid(addr),
+			      ib_addr_get_pkey(addr), route->path_rec);
+	if (ret)
+		goto err2;
 
-	memset(&path_rec, 0, sizeof path_rec);
-	path_rec.sgid = *ib_addr_get_sgid(addr);
-	path_rec.dgid = *ib_addr_get_dgid(addr);
-	path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
-	path_rec.numb_path = 1;
-
-	id_priv->query_id = ib_sa_path_rec_get(id_priv->id.device,
-				id_priv->id.port_num, &path_rec,
-				IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
-				IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH,
-				timeout_ms, GFP_KERNEL,
-				cma_query_handler, id_priv, &id_priv->query);
-	
-	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
+	route->num_paths = 1;
+	work->id = id_priv;
+	INIT_WORK(&work->work, cma_work_handler, work);
+	work->old_state = CMA_ROUTE_QUERY;
+	work->new_state = CMA_ROUTE_RESOLVED;
+	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+	queue_work(rdma_wq, &work->work);
+	return 0;
+err2:
+	kfree(route->path_rec);
+	route->path_rec = NULL;
+err1:
+	kfree(work);
+	return ret;
 }
 
 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
@@ -1179,29 +1168,6 @@ out:
 	cma_deref_id(id_priv);
 }
 
-static void loopback_addr_handler(void *data)
-{
-	struct cma_work *work = data;
-	struct rdma_id_private *id_priv = work->id;
-
-	kfree(work);
-	atomic_inc(&id_priv->dev_remove);
-
-	if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
-		goto out;
-
-	if (cma_notify_user(id_priv, RDMA_CM_EVENT_ADDR_RESOLVED, 0, NULL, 0)) {
-		cma_exch(id_priv, CMA_DESTROYING);
-		cma_release_remove(id_priv);
-		cma_deref_id(id_priv);
-		rdma_destroy_id(&id_priv->id);
-		return;
-	}
-out:
-	cma_release_remove(id_priv);
-	cma_deref_id(id_priv);
-}
-
 static int cma_resolve_loopback(struct rdma_id_private *id_priv,
 				struct sockaddr *src_addr, enum cma_state state)
 {
@@ -1209,7 +1175,7 @@ static int cma_resolve_loopback(struct r
 	struct rdma_dev_addr *dev_addr;
 	int ret;
 
-	work = kmalloc(sizeof *work, GFP_KERNEL);
+	work = kzalloc(sizeof *work, GFP_KERNEL);
 	if (!work)
 		return -ENOMEM;
 
@@ -1226,7 +1192,10 @@ static int cma_resolve_loopback(struct r
 	}
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, loopback_addr_handler, work);
+	INIT_WORK(&work->work, cma_work_handler, work);
+	work->old_state = CMA_ADDR_QUERY;
+	work->new_state = CMA_ADDR_RESOLVED;
+	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
 	queue_work(rdma_wq, &work->work);
 	return 0;
 err:






More information about the general mailing list