[openib-general] [PATCH] RDMA / IB CM: support immediately sending replies to received messages

Sean Hefty sean.hefty at intel.com
Fri Aug 4 16:55:16 PDT 2006


This is what the patch would look like that would support sending
replies immediately after polling a receive before the QP has
"finished" connecting.  The changes require that the ib_cm support
returning QP attributes for the RTS transition after a REQ has
been received, and add an rdma_establish() call to the RDMA CM.

I'd like to decide on which approach to use by the end of next
week, so I can commit any changes and update userspace accordingly.

Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
Index: include/rdma/rdma_cm.h
===================================================================
--- include/rdma/rdma_cm.h	(revision 8822)
+++ include/rdma/rdma_cm.h	(working copy)
@@ -256,6 +256,16 @@ int rdma_listen(struct rdma_cm_id *id, i
 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
 
 /**
+ * rdma_cm_establish - Forces a connection state to established.
+ * @id: Connection identifier to transition to established.
+ *
+ * This routine should be invoked by users who receive messages on a
+ * QP before being notified that the connection has been established by the
+ * RDMA CM.
+ */
+int rdma_establish(struct rdma_cm_id *id);
+
+/**
  * rdma_reject - Called to reject a connection request or response.
  */
 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
Index: core/cm.c
===================================================================
--- core/cm.c	(revision 8823)
+++ core/cm.c	(working copy)
@@ -3207,6 +3207,10 @@ static int cm_init_qp_rts_attr(struct cm
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	switch (cm_id_priv->id.state) {
+	/* Allow transition to RTS before sending REP */
+	case IB_CM_REQ_RCVD:
+	case IB_CM_MRA_REQ_SENT:
+
 	case IB_CM_REP_RCVD:
 	case IB_CM_MRA_REP_SENT:
 	case IB_CM_REP_SENT:
Index: core/cma.c
===================================================================
--- core/cma.c	(revision 8822)
+++ core/cma.c	(working copy)
@@ -840,22 +840,6 @@ static int cma_verify_rep(struct rdma_id
 	return 0;
 }
 
-static int cma_rtu_recv(struct rdma_id_private *id_priv)
-{
-	int ret;
-
-	ret = cma_modify_qp_rts(&id_priv->id);
-	if (ret)
-		goto reject;
-
-	return 0;
-reject:
-	cma_modify_qp_err(&id_priv->id);
-	ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
-		       NULL, 0, NULL, 0);
-	return ret;
-}
-
 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 {
 	struct rdma_id_private *id_priv = cm_id->context;
@@ -886,9 +870,8 @@ static int cma_ib_handler(struct ib_cm_i
 		private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
 		break;
 	case IB_CM_RTU_RECEIVED:
-		status = cma_rtu_recv(id_priv);
-		event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
-				 RDMA_CM_EVENT_ESTABLISHED;
+	case IB_CM_USER_ESTABLISHED:
+		event = RDMA_CM_EVENT_ESTABLISHED;
 		break;
 	case IB_CM_DREQ_ERROR:
 		status = -ETIMEDOUT; /* fall through */
@@ -1981,11 +1964,25 @@ static int cma_accept_ib(struct rdma_id_
 			 struct rdma_conn_param *conn_param)
 {
 	struct ib_cm_rep_param rep;
-	int ret;
+	struct ib_qp_attr qp_attr;
+	int qp_attr_mask, ret;
 
-	ret = cma_modify_qp_rtr(&id_priv->id);
-	if (ret)
-		return ret;
+	if (id_priv->id.qp) {
+		ret = cma_modify_qp_rtr(&id_priv->id);
+		if (ret)
+			goto out;
+
+		qp_attr.qp_state = IB_QPS_RTS;
+		ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
+					 &qp_attr_mask);
+		if (ret)
+			goto out;
+
+		qp_attr.max_rd_atomic = conn_param->initiator_depth;
+		ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
+		if (ret)
+			goto out;
+	}
 
 	memset(&rep, 0, sizeof rep);
 	rep.qp_num = id_priv->qp_num;
@@ -2000,7 +1997,9 @@ static int cma_accept_ib(struct rdma_id_
 	rep.rnr_retry_count = conn_param->rnr_retry_count;
 	rep.srq = id_priv->srq ? 1 : 0;
 
-	return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+	ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+out:
+	return ret;
 }
 
 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
@@ -2058,6 +2057,27 @@ reject:
 }
 EXPORT_SYMBOL(rdma_accept);
 
+int rdma_establish(struct rdma_cm_id *id)
+{
+	struct rdma_id_private *id_priv;
+	int ret;
+
+	id_priv = container_of(id, struct rdma_id_private, id);
+	if (!cma_comp(id_priv, CMA_CONNECT))
+		return -EINVAL;
+
+	switch (rdma_node_get_transport(id->device->node_type)) {
+	case RDMA_TRANSPORT_IB:
+		ret = ib_cm_establish(id_priv->cm_id.ib);
+		break;
+	default:
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(rdma_establish);
+
 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
 		u8 private_data_len)
 {





More information about the general mailing list