[openib-general] [RFC/PATCH v2] librdmacm: add RDMA_PS_IPOIB port space

Or Gerlitz ogerlitz at voltaire.com
Tue Jan 23 07:40:54 PST 2007


Add to librdmacm an IPoIB port space (RDMA_PS_IPOIB) whose semantics are similar
to those of RDMA_PS_UDP where RDMA_PS_IPOIB IDs allow for inter operability with
IPoIB on some traffic patterns.

For RDMA_PS_UDP and RDMA_PS_IPOIB IDs, the qkey is provided by the kernel in
ADDR_RESOLVED and CONNECT_REQUEST events and is stored by the library in struct
cma_id_private. Later the library use the qkey when it is called to create a UD QP.

The udaddy test program was enhanced to work in either of the port spaces.

Signed-off-by: Or Gerlitz <ogerlitz at voltaire.com>

Index: librdmacm/src/cma.c
===================================================================
--- librdmacm.orig/src/cma.c	2007-01-22 21:21:37.000000000 +0200
+++ librdmacm/src/cma.c	2007-01-23 13:57:48.000000000 +0200
@@ -116,6 +116,7 @@ struct cma_id_private {
 	pthread_mutex_t	  mut;
 	uint32_t	  handle;
 	struct cma_multicast *mc_list;
+	uint32_t	  qkey;
 };

 struct cma_multicast {
@@ -687,7 +688,7 @@ static int ucma_init_ud_qp(struct cma_id

 	qp_attr.port_num = id_priv->id.port_num;
 	qp_attr.qp_state = IBV_QPS_INIT;
-	qp_attr.qkey = RDMA_UD_QKEY;
+	qp_attr.qkey = id_priv->qkey;
 	ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX |
 					  IBV_QP_PORT | IBV_QP_QKEY);
 	if (ret)
@@ -718,7 +719,7 @@ int rdma_create_qp(struct rdma_cm_id *id
 	if (!qp)
 		return -ENOMEM;

-	if (id->ps == RDMA_PS_UDP)
+	if (id->ps == RDMA_PS_UDP || id->ps == RDMA_PS_IPOIB)
 		ret = ucma_init_ud_qp(id_priv, qp);
 	else
 		ret = ucma_init_ib_qp(id_priv, qp);
@@ -809,7 +810,7 @@ int rdma_accept(struct rdma_cm_id *id, s
 	void *msg;
 	int ret, size;

-	if (id->ps != RDMA_PS_UDP) {
+	if (id->ps != RDMA_PS_UDP && id->ps != RDMA_PS_IPOIB) {
 		ret = ucma_modify_qp_rtr(id);
 		if (ret)
 			return ret;
@@ -1169,6 +1170,7 @@ int rdma_get_cm_event(struct rdma_event_
 	struct ucma_abi_get_event *cmd;
 	struct cma_event *evt;
 	void *msg;
+	struct cma_id_private *id_priv;
 	int ret, size;

 	ret = cma_dev_cnt ? 0 : ucma_init();
@@ -1197,8 +1199,11 @@ retry:
 		evt->id_priv = (void *) (uintptr_t) resp->uid;
 		evt->event.id = &evt->id_priv->id;
 		evt->event.status = ucma_query_route(&evt->id_priv->id);
+		id_priv = evt->id_priv;
 		if (evt->event.status)
 			evt->event.event = RDMA_CM_EVENT_ADDR_ERROR;
+		else if (id_priv->id.ps == RDMA_PS_UDP || id_priv->id.ps == RDMA_PS_IPOIB)
+                        	id_priv->qkey = resp->param.ud.qkey;
 		break;
 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
 		evt->id_priv = (void *) (uintptr_t) resp->uid;
@@ -1211,12 +1216,16 @@ retry:
 		evt->id_priv = (void *) (uintptr_t) resp->uid;
 		if (evt->id_priv->id.ps == RDMA_PS_TCP)
 			ucma_copy_conn_event(evt, &resp->param.conn);
-		else
+		else
 			ucma_copy_ud_event(evt, &resp->param.ud);

 		ret = ucma_process_conn_req(evt, resp->id);
 		if (ret)
 			goto retry;
+
+		id_priv = container_of(evt->event.id, struct cma_id_private, id);
+		if (id_priv->id.ps == RDMA_PS_UDP || id_priv->id.ps == RDMA_PS_IPOIB)
+			id_priv->qkey = resp->param.ud.qkey;
 		break;
 	case RDMA_CM_EVENT_CONNECT_RESPONSE:
 		evt->id_priv = (void *) (uintptr_t) resp->uid;
@@ -1233,7 +1242,8 @@ retry:
 	case RDMA_CM_EVENT_ESTABLISHED:
 		evt->id_priv = (void *) (uintptr_t) resp->uid;
 		evt->event.id = &evt->id_priv->id;
-		if (evt->id_priv->id.ps == RDMA_PS_UDP) {
+		id_priv = evt->id_priv;
+		if (id_priv->id.ps == RDMA_PS_UDP || id_priv->id.ps == RDMA_PS_IPOIB) {
 			ucma_copy_ud_event(evt, &resp->param.ud);
 			break;
 		}
Index: librdmacm/examples/udaddy.c
===================================================================
--- librdmacm.orig/examples/udaddy.c	2007-01-22 21:19:52.000000000 +0200
+++ librdmacm/examples/udaddy.c	2007-01-23 15:50:48.000000000 +0200
@@ -76,6 +76,7 @@ static int message_size = 100;
 static int message_count = 10;
 static char *dst_addr;
 static char *src_addr;
+static enum rdma_port_space port_space = RDMA_PS_UDP;

 static int create_message(struct cmatest_node *node)
 {
@@ -253,7 +254,7 @@ err:
 	return ret;
 }

-static int connect_handler(struct rdma_cm_id *cma_id)
+static int connect_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
 	struct cmatest_node *node;
 	struct rdma_conn_param conn_param;
@@ -272,6 +273,7 @@ static int connect_handler(struct rdma_c
 	if (ret)
 		goto err2;

+	node->remote_qkey = event->param.ud.qkey;
 	ret = post_recvs(node);
 	if (ret)
 		goto err2;
@@ -327,7 +329,7 @@ static int cma_handler(struct rdma_cm_id
 		ret = route_handler(cma_id->context);
 		break;
 	case RDMA_CM_EVENT_CONNECT_REQUEST:
-		ret = connect_handler(cma_id);
+		ret = connect_handler(cma_id, event);
 		break;
 	case RDMA_CM_EVENT_ESTABLISHED:
 		ret = resolved_handler(cma_id->context, event);
@@ -393,7 +395,7 @@ static int alloc_nodes(void)
 		if (dst_addr) {
 			ret = rdma_create_id(test.channel,
 					     &test.nodes[i].cma_id,
-					     &test.nodes[i], RDMA_PS_UDP);
+					     &test.nodes[i], port_space);
 			if (ret)
 				goto err;
 		}
@@ -420,7 +422,7 @@ static void create_reply_ah(struct cmate
 	node->ah = ibv_create_ah_from_wc(node->pd, wc, node->mem,
 					 node->cma_id->port_num);
 	node->remote_qpn = ntohl(wc->imm_data);
-	node->remote_qkey = RDMA_UD_QKEY;
+	/* passive sets node->remote_qkey during CONNECT_REQUEST event processing */
 }

 static int poll_cqs(void)
@@ -489,7 +491,7 @@ static int run_server(void)
 	int i, ret;

 	printf("udaddy: starting server\n");
-	ret = rdma_create_id(test.channel, &listen_id, &test, RDMA_PS_UDP);
+	ret = rdma_create_id(test.channel, &listen_id, &test, port_space);
 	if (ret) {
 		printf("udaddy: listen request failed\n");
 		return ret;
@@ -595,7 +597,7 @@ int main(int argc, char **argv)
 {
 	int op, ret;

-	while ((op = getopt(argc, argv, "s:b:c:C:S:")) != -1) {
+	while ((op = getopt(argc, argv, "s:b:c:C:S:p:")) != -1) {
 		switch (op) {
 		case 's':
 			dst_addr = optarg;
@@ -612,6 +614,9 @@ int main(int argc, char **argv)
 		case 'S':
 			message_size = atoi(optarg);
 			break;
+		case 'p':
+			port_space = strtol(optarg, NULL, 0);
+			break;
 		default:
 			printf("usage: %s\n", argv[0]);
 			printf("\t[-s server_address]\n");
@@ -619,6 +624,7 @@ int main(int argc, char **argv)
 			printf("\t[-c connections]\n");
 			printf("\t[-C message_count]\n");
 			printf("\t[-S message_size]\n");
+			printf("\t[-p port space - %#x for UDP %#x for IPoIB]\n",RDMA_PS_UDP,RDMA_PS_IPOIB);
 			exit(1);
 		}
 	}
Index: librdmacm/include/rdma/rdma_cma.h
===================================================================
--- librdmacm.orig/include/rdma/rdma_cma.h	2007-01-22 21:56:13.000000000 +0200
+++ librdmacm/include/rdma/rdma_cma.h	2007-01-23 13:48:30.000000000 +0200
@@ -61,16 +61,11 @@ enum rdma_cm_event_type {
 };

 enum rdma_port_space {
+	RDMA_PS_IPOIB = 0x0002,
 	RDMA_PS_TCP  = 0x0106,
 	RDMA_PS_UDP  = 0x0111,
 };

-/*
- * Global qkey value for all UD QPs and multicast groups created via the
- * RDMA CM.
- */
-#define RDMA_UD_QKEY 0x01234567
-
 struct ib_addr {
 	union ibv_gid	sgid;
 	union ibv_gid	dgid;





More information about the general mailing list