[openib-general] [RFC] [PATCH] RDMA CM: add UD QP support
Sean Hefty
sean.hefty at intel.com
Thu May 18 13:06:21 PDT 2006
Add base support for UD QPs to the RDMA CM. This allows users of
UD QPs access to the CMA's address translation services.
>From a usage model, UD QP support is provided through the UDP port space.
Client calls are essentially the same as that used to establish a
connection. That is, a client calls: resolve_addr, resolve_route, and
connect. A server calls: listen and accept. Connect and accept
correspond to SIDR REQ / SIDR REP, respectively.
This patch introduces a new protocol for SIDR that is the same as that
used by the CMA for connection REQs.
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
Index: include/rdma/rdma_cm.h
===================================================================
--- include/rdma/rdma_cm.h (revision 6993)
+++ include/rdma/rdma_cm.h (working copy)
@@ -212,9 +212,15 @@ struct rdma_conn_param {
/**
* rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
*
* Users must have resolved a route for the rdma_cm_id to connect with
* by having called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP
+ * information for unconnected rdma_cm_id's. The actual operation is
+ * based on the rdma_cm_id's port space.
*/
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
@@ -253,4 +259,3 @@ int rdma_reject(struct rdma_cm_id *id, c
int rdma_disconnect(struct rdma_cm_id *id);
#endif /* RDMA_CM_H */
-
Index: include/rdma/rdma_user_cm.h
===================================================================
--- include/rdma/rdma_user_cm.h (revision 6949)
+++ include/rdma/rdma_user_cm.h (working copy)
@@ -38,7 +38,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_user_sa.h>
-#define RDMA_USER_CM_ABI_VERSION 1
+#define RDMA_USER_CM_ABI_VERSION 2
#define RDMA_MAX_PRIVATE_DATA 256
@@ -72,6 +72,8 @@ struct rdma_ucm_cmd_hdr {
struct rdma_ucm_create_id {
__u64 uid;
__u64 response;
+ __u16 ps;
+ __u8 reserved[6];
};
struct rdma_ucm_create_id_resp {
Index: core/cma.c
===================================================================
--- core/cma.c (revision 7339)
+++ core/cma.c (working copy)
@@ -66,6 +66,7 @@ static DEFINE_MUTEX(lock);
static struct workqueue_struct *cma_wq;
static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
+static DEFINE_IDR(udp_ps);
struct cma_device {
struct list_head list;
@@ -491,9 +492,17 @@ static inline int cma_any_addr(struct so
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
+static inline __be16 cma_port(struct sockaddr *addr)
+{
+ if (addr->sa_family == AF_INET)
+ return ((struct sockaddr_in *) addr)->sin_port;
+ else
+ return ((struct sockaddr_in6 *) addr)->sin6_port;
+}
+
static inline int cma_any_port(struct sockaddr *addr)
{
- return !((struct sockaddr_in *) addr)->sin_port;
+ return !cma_port(addr);
}
static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
@@ -833,8 +842,8 @@ out:
return ret;
}
-static struct rdma_id_private* cma_new_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event)
+static struct rdma_id_private* cma_new_conn_id(struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
@@ -877,6 +886,42 @@ err:
return NULL;
}
+static struct rdma_id_private* cma_new_udp_id(struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event)
+{
+ struct rdma_id_private *id_priv;
+ struct rdma_cm_id *id;
+ union cma_ip_addr *src, *dst;
+ __u16 port;
+ u8 ip_ver;
+ int ret;
+
+ id = rdma_create_id(listen_id->event_handler, listen_id->context,
+ listen_id->ps);
+ if (IS_ERR(id))
+ return NULL;
+
+
+ if (cma_get_net_info(ib_event->private_data, listen_id->ps,
+ &ip_ver, &port, &src, &dst))
+ goto err;
+
+ cma_save_net_info(&id->route.addr, &listen_id->route.addr,
+ ip_ver, port, src, dst);
+
+ ret = rdma_translate_ip(&id->route.addr.src_addr,
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto err;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ id_priv->state = CMA_CONNECT;
+ return id_priv;
+err:
+ rdma_destroy_id(id);
+ return NULL;
+}
+
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
@@ -889,7 +934,10 @@ static int cma_req_handler(struct ib_cm_
goto out;
}
- conn_id = cma_new_id(&listen_id->id, ib_event);
+ if (listen_id->id.ps == RDMA_PS_UDP)
+ conn_id = cma_new_udp_id(&listen_id->id, ib_event);
+ else
+ conn_id = cma_new_conn_id(&listen_id->id, ib_event);
if (!conn_id) {
ret = -ENOMEM;
goto out;
@@ -926,8 +974,7 @@ out:
static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
{
- return cpu_to_be64(((u64)ps << 16) +
- be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
+ return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
}
static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
@@ -1508,6 +1555,9 @@ static int cma_get_port(struct rdma_id_p
case RDMA_PS_TCP:
ps = &tcp_ps;
break;
+ case RDMA_PS_UDP:
+ ps = &udp_ps;
+ break;
default:
return -EPROTONOSUPPORT;
}
@@ -1586,6 +1636,94 @@ static int cma_format_hdr(void *hdr, enu
return 0;
}
+static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *ib_event)
+{
+ struct rdma_id_private *id_priv = cm_id->context;
+ enum rdma_cm_event_type event;
+ struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
+ struct rdma_route *route;
+ int ret, status;
+
+ if (!cma_comp(id_priv, CMA_CONNECT))
+ return 0;
+
+ atomic_inc(&id_priv->dev_remove);
+ switch (ib_event->event) {
+ case IB_CM_SIDR_REQ_ERROR:
+ event = RDMA_CM_EVENT_UNREACHABLE;
+ status = -ETIMEDOUT;
+ break;
+ case IB_CM_SIDR_REP_RECEIVED:
+ if (rep->status != IB_SIDR_SUCCESS) {
+ event = RDMA_CM_EVENT_UNREACHABLE;
+ status = ib_event->param.sidr_rep_rcvd.status;
+ break;
+ }
+ route = &id_priv->id.route;
+ if (rep->qkey != ntohs(cma_port(&route->addr.dst_addr))) {
+ event = RDMA_CM_EVENT_UNREACHABLE;
+ status = -EINVAL;
+ break;
+ }
+ event = RDMA_CM_EVENT_ESTABLISHED;
+ status = 0;
+ break;
+ default:
+ printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
+ ib_event->event);
+ ret = 0;
+ goto out;
+ }
+
+ ret = cma_notify_user(id_priv, event, status, NULL, 0);
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.ib = NULL;
+ cma_exch(id_priv, CMA_DESTROYING);
+ cma_release_remove(id_priv);
+ rdma_destroy_id(&id_priv->id);
+ return ret;
+ }
+out:
+ cma_release_remove(id_priv);
+ return ret;
+}
+
+static int cma_resolve_ib_udp(struct rdma_id_private *id_priv)
+{
+ struct ib_cm_sidr_req_param req;
+ struct rdma_route *route;
+ struct cma_hdr hdr;
+ int ret;
+
+ id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
+ cma_sidr_rep_handler, id_priv);
+ if (IS_ERR(id_priv->cm_id.ib))
+ return PTR_ERR(id_priv->cm_id.ib);
+
+ route = &id_priv->id.route;
+ ret = cma_format_hdr(&hdr, id_priv->id.ps, route);
+ if (ret)
+ goto out;
+
+ req.path = route->path_rec;
+ req.service_id = cma_get_service_id(id_priv->id.ps,
+ &route->addr.dst_addr);
+ req.timeout_ms = CMA_CM_RESPONSE_TIMEOUT;
+ req.private_data = &hdr;
+ req.private_data_len = sizeof hdr;
+ req.max_cm_retries = CMA_MAX_CM_RETRIES;
+
+ ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
+out:
+ if (ret) {
+ ib_destroy_cm_id(id_priv->cm_id.ib);
+ id_priv->cm_id.ib = NULL;
+ }
+ return ret;
+}
+
static int cma_connect_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
@@ -1660,7 +1798,10 @@ int rdma_connect(struct rdma_cm_id *id,
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = cma_connect_ib(id_priv, conn_param);
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_resolve_ib_udp(id_priv);
+ else
+ ret = cma_connect_ib(id_priv, conn_param);
break;
default:
ret = -ENOSYS;
@@ -1702,6 +1843,21 @@ static int cma_accept_ib(struct rdma_id_
return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
}
+static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
+ enum ib_cm_sidr_status status)
+{
+ struct ib_cm_sidr_rep_param rep;
+
+ memset(&rep, 0, sizeof rep);
+ rep.status = status;
+ if (status == IB_SIDR_SUCCESS) {
+ rep.qp_num = id_priv->qp_num;
+ rep.qkey = ntohs(cma_port(&id_priv->id.route.addr.src_addr));
+ }
+
+ return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
+}
+
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv;
@@ -1719,7 +1875,9 @@ int rdma_accept(struct rdma_cm_id *id, s
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (conn_param)
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS);
+ else if (conn_param)
ret = cma_accept_ib(id_priv, conn_param);
else
ret = cma_rep_recv(id_priv);
@@ -1752,9 +1910,12 @@ int rdma_reject(struct rdma_cm_id *id, c
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = ib_send_cm_rej(id_priv->cm_id.ib,
- IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
- private_data, private_data_len);
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT);
+ else
+ ret = ib_send_cm_rej(id_priv->cm_id.ib,
+ IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, private_data, private_data_len);
break;
default:
ret = -ENOSYS;
@@ -1916,6 +2077,7 @@ static void cma_cleanup(void)
destroy_workqueue(cma_wq);
idr_destroy(&sdp_ps);
idr_destroy(&tcp_ps);
+ idr_destroy(&udp_ps);
}
module_init(cma_init);
Index: core/ucma.c
===================================================================
--- core/ucma.c (revision 7119)
+++ core/ucma.c (working copy)
@@ -291,7 +291,7 @@ static ssize_t ucma_create_id(struct ucm
return -ENOMEM;
ctx->uid = cmd.uid;
- ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, RDMA_PS_TCP);
+ ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
if (IS_ERR(ctx->cm_id)) {
ret = PTR_ERR(ctx->cm_id);
goto err1;
More information about the general
mailing list