[openib-general] [PATCH] CM: new call to return QP attributes for use modifying the QP

Sean Hefty mshefty at ichips.intel.com
Wed Jan 26 02:38:49 PST 2005


This patch implements the new CM call to return a default set of QP
attributes that can be used to call ib_modify_qp.  The code was
restructured in areas as a result of saving the necessary data.

I have test code that is nearly complete that will exercise the CM APIs
and can be used as sample code.  It runs in the kernel only, but shouldn't
be too difficult to port to user-mode.

signed-off-by: Sean Hefty <sean.hefty at intel.com>

Index: include/ib_cm.h
===================================================================
--- include/ib_cm.h	(revision 1653)
+++ include/ib_cm.h	(working copy)
@@ -473,6 +473,26 @@
 		   u8 private_data_len);
 
 /**
+ * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
+ *   to a specified QP state.
+ * @cm_id: Communication identifier associated with the QP attributes to
+ *   initialize.
+ * @qp_attr: On input, specifies the desired QP state.  On output, the
+ *   mandatory and desired optional attributes will be set in order to
+ *   modify the QP to the specified state.
+ * @qp_attr_mask: The QP attribute mask that may be used to transition the
+ *   QP to the specified state.
+ *
+ * Users must set the @qp_attr->qp_state to the desired QP state.  This call
+ * will set all required attributes for the given transition, along with
+ * known optional attributes.  Users may override the attributes returned from
+ * this call before calling ib_modify_qp.
+ */
+int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
+		       struct ib_qp_attr *qp_attr,
+		       int *qp_attr_mask);
+
+/**
  * ib_send_cm_apr - Sends an alternate path response message in response to
  *   a load alternate path request.
  * @cm_id: Connection identifier associated with the alternate path response.
Index: core/cm.c
===================================================================
--- core/cm.c	(revision 1653)
+++ core/cm.c	(working copy)
@@ -87,6 +87,13 @@
 	struct ib_mad mad;
 };
 
+struct cm_av {
+	struct cm_port *port;
+	union ib_gid dgid;
+	struct ib_ah_attr ah_attr;
+	u16 pkey_index;
+};
+
 struct cm_id_private {
 	struct ib_cm_id	id;
 
@@ -97,20 +104,25 @@
 	wait_queue_head_t wait;
 	atomic_t refcount;
 
-	/* todo: use alternate port on send failure */
-	struct cm_port *port;
 	struct cm_msg *msg;
+	/* todo: use alternate port on send failure */
+	struct cm_av av;
+	struct cm_av alt_av;
 
-	struct ib_ah_attr ah_attr;
-	u16 pkey_index;
 	u32 local_qpn;
 	u32 remote_qpn;
+	u32 send_psn;
+	u32 recv_psn;
 	u64 remote_ca_guid;
-	union ib_gid remote_port_gid;
 	int timeout_ms;
 	u8 max_cm_retries;
 	u8 passive;
 	u8 peer_to_peer;
+	u8 responder_resources;
+	u8 initiator_depth;
+	u8 local_ack_timeout;
+	u8 retry_count;
+	u8 rnr_retry_count;
 
 	struct list_head work_list;
 	atomic_t work_count;
@@ -143,9 +155,9 @@
 		return -ENOMEM;
 	memset(m, 0, sizeof *m);
 
-	mad_agent = cm_id_priv->port->mad_agent;
+	mad_agent = cm_id_priv->av.port->mad_agent;
 	m->send_wr.wr.ud.ah = ib_create_ah(mad_agent->qp->pd,
-					   &cm_id_priv->ah_attr);
+					   &cm_id_priv->av.ah_attr);
 	if (IS_ERR(m->send_wr.wr.ud.ah)) {
 		ret = PTR_ERR(m->send_wr.wr.ud.ah);
 		kfree(m);
@@ -157,7 +169,7 @@
 				     DMA_TO_DEVICE);
 	pci_unmap_addr_set(msg, mapping, m->sge.addr);
 	m->sge.length = sizeof m->mad;
-	m->sge.lkey = cm_id_priv->port->mr->lkey;
+	m->sge.lkey = cm_id_priv->av.port->mr->lkey;
 
 	m->send_wr.wr_id = (unsigned long) msg;
 	m->send_wr.sg_list = &m->sge;
@@ -168,7 +180,7 @@
 	m->send_wr.wr.ud.remote_qpn = 1;
 	m->send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
 	/* Timeout set by caller if response is expected. */
-	m->send_wr.wr.ud.pkey_index = cm_id_priv->pkey_index;
+	m->send_wr.wr.ud.pkey_index = cm_id_priv->av.pkey_index;
 
 	atomic_inc(&cm_id_priv->refcount);
 	m->cm_id_priv = cm_id_priv;
@@ -178,25 +190,16 @@
 
 static void cm_free_msg(struct cm_msg *msg)
 {
-	ib_destroy_ah(msg->send_wr.wr.ud.ah);
-	dma_unmap_single(msg->cm_id_priv->port->mad_agent->device->dma_device,
-			 pci_unmap_addr(msg, mapping), sizeof msg->mad,
-			 DMA_TO_DEVICE);
+	struct ib_device *device;
 
+	ib_destroy_ah(msg->send_wr.wr.ud.ah);
+	device = msg->cm_id_priv->av.port->mad_agent->device;
+	dma_unmap_single(device->dma_device, pci_unmap_addr(msg, mapping),
+			 sizeof msg->mad, DMA_TO_DEVICE);
 	cm_deref_id(msg->cm_id_priv);
 	kfree(msg);
 }
 
-static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
-			   u16 dlid, u8 sl, u16 src_path_bits)
-{
-	memset(ah_attr, 0, sizeof ah_attr);
-	ah_attr->dlid = be16_to_cpu(dlid);
-	ah_attr->sl = sl;
-	ah_attr->src_path_bits = src_path_bits;
-	ah_attr->port_num = port_num;
-}
-
 static struct cm_port * cm_find_port(struct ib_device *device,
 				     union ib_gid *gid)
 {
@@ -239,6 +242,36 @@
 	return 0;
 }
 
+static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
+			   u16 dlid, u8 sl, u16 src_path_bits)
+{
+	memset(ah_attr, 0, sizeof ah_attr);
+	ah_attr->dlid = be16_to_cpu(dlid);
+	ah_attr->sl = sl;
+	ah_attr->src_path_bits = src_path_bits;
+	ah_attr->port_num = port_num;
+}
+
+static int cm_init_av(struct ib_device *device, struct ib_sa_path_rec *path,
+		      struct cm_av *av)
+{
+	int ret;
+
+	av->port = cm_find_port(device, &path->sgid);
+	if (!av->port)
+		return -EINVAL;
+
+	ret = ib_find_cached_pkey(device, av->port->port_num, path->pkey,
+				  &av->pkey_index);
+	if (ret)
+		return ret;
+
+	av->dgid = path->dgid;
+	cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid,
+		       path->sl, path->slid & 0x7F);
+	return 0;
+}
+
 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
 {
 	unsigned long flags;
@@ -388,7 +421,7 @@
 	struct rb_node **link = &cm.remote_qp_table.rb_node;
 	struct rb_node *parent = NULL;
 	struct cm_id_private *cur_cm_id_priv;
-	union ib_gid *port_gid = &cm_id_priv->remote_port_gid;
+	union ib_gid *port_gid = &cm_id_priv->dgid;
 	u32 remote_qpn = cm_id_priv->remote_qpn;
 
 	while (*link) {
@@ -401,7 +434,7 @@
 			link = &(*link)->rb_right;
 		else {
 			int cmp;
-			cmp = memcmp(port_gid, &cur_cm_id_priv->remote_port_gid,
+			cmp = memcmp(port_gid, &cur_cm_id_priv->dgid,
 				     sizeof *port_gid);
 			if (cmp < 0)
 				link = &(*link)->rb_left;
@@ -431,7 +464,7 @@
 			node = node->rb_right;
 		else {
 			int cmp;
-			cmp = memcmp(port_gid, &cm_id_priv->remote_port_gid,
+			cmp = memcmp(port_gid, &cm_id_priv->dgid,
 				     sizeof *port_gid);
 			if (cmp < 0)
 				node = node->rb_left;
@@ -451,7 +484,7 @@
 	struct rb_node **link = &cm.remote_sidr_table.rb_node;
 	struct rb_node *parent = NULL;
 	struct cm_id_private *cur_cm_id_priv;
-	union ib_gid *port_gid = &cm_id_priv->remote_port_gid;
+	union ib_gid *port_gid = &cm_id_priv->av.dgid;
 	u32 remote_id = cm_id_priv->id.remote_id;
 
 	while (*link) {
@@ -464,7 +497,7 @@
 			link = &(*link)->rb_right;
 		else {
 			int cmp;
-			cmp = memcmp(port_gid, &cur_cm_id_priv->remote_port_gid,
+			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
 				     sizeof *port_gid);
 			if (cmp < 0)
 				link = &(*link)->rb_left;
@@ -558,7 +591,7 @@
 	case IB_CM_SIDR_REQ_SENT:
 		cm_id->state = IB_CM_IDLE;
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-		ib_cancel_mad(cm_id_priv->port->mad_agent,
+		ib_cancel_mad(cm_id_priv->av.port->mad_agent,
 			      (unsigned long) cm_id_priv->msg);
 		break;
 	case IB_CM_SIDR_REQ_RCVD:
@@ -575,8 +608,8 @@
 	case IB_CM_MRA_REP_RCVD:
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 		ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 
-			       &cm_id_priv->port->ca_guid,
-			       sizeof &cm_id_priv->port->ca_guid, NULL, 0);
+			       &cm_id_priv->av.port->ca_guid,
+			       sizeof &cm_id_priv->av.port->ca_guid, NULL, 0);
 		break;
 	case IB_CM_ESTABLISHED:
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -653,7 +686,7 @@
 	hdr->method	   = IB_MGMT_METHOD_SEND;
 	hdr->attr_id	   = attr_id;
 
-	hi_tid   = ((u64) cm_id_priv->port->mad_agent->hi_tid) << 32;
+	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
 	low_tid  = (u64) (cm_id_priv->id.local_id | (msg_seq << 30));
 	hdr->tid = cpu_to_be64(hi_tid | low_tid);
 }
@@ -667,7 +700,7 @@
 
 	req_msg->local_comm_id = cm_id_priv->id.local_id;
 	req_msg->service_id = param->service_id;
-	req_msg->local_ca_guid = cm_id_priv->port->ca_guid;
+	req_msg->local_ca_guid = cm_id_priv->av.port->ca_guid;
 	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp->qp_num));
 	cm_req_set_resp_res(req_msg, param->responder_resources);
 	cm_req_set_init_depth(req_msg, param->initiator_depth);
@@ -675,7 +708,7 @@
 				       param->remote_cm_response_timeout);
 	cm_req_set_qp_type(req_msg, param->qp->qp_type);
 	cm_req_set_flow_ctrl(req_msg, param->flow_control);
-	cm_req_set_starting_psn(req_msg, param->starting_send_psn);
+	cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_send_psn));
 	cm_req_set_local_resp_timeout(req_msg,
 				      param->local_cm_response_timeout);
 	cm_req_set_retry_count(req_msg, param->retry_count);
@@ -731,6 +764,10 @@
 	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
 		return -EINVAL;
 
+	if (param->alternate_path &&
+	    param->alternate_path->pkey != param->primary_path->pkey)
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -739,7 +776,7 @@
 {
 	struct cm_id_private *cm_id_priv;
 	struct ib_send_wr *bad_send_wr;
-	struct ib_sa_path_rec *path;
+	struct cm_req_msg *req_msg;
 	unsigned long flags;
 	int ret;
 
@@ -757,20 +794,16 @@
 	}
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-	cm_id_priv->port = cm_find_port(param->qp->device,
-					&param->primary_path->sgid);
-	if (!cm_id_priv->port) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	ret = ib_find_cached_pkey(param->qp->device,
-				  cm_id_priv->port->port_num,
-				  param->primary_path->pkey,
-				  &cm_id_priv->pkey_index);
+	ret = cm_init_av(param->qp->device, param->primary_path,
+			 &cm_id_priv->av);
 	if (ret)
 		goto out;
-
+	if (param->alternate_path) {
+		ret = cm_init_av(param->qp->device, param->alternate_path,
+				 &cm_id_priv->alt_av);
+		if (ret)
+			goto out;
+	}
 	cm_id->service_id = param->service_id;
 	cm_id->service_mask = ~0ULL;
 	cm_id_priv->timeout_ms = cm_convert_to_ms(
@@ -778,19 +811,24 @@
 				 cm_convert_to_ms(
 				    param->remote_cm_response_timeout);
 	cm_id_priv->max_cm_retries = param->max_cm_retries;
-	cm_id_priv->local_qpn = cpu_to_be32(param->qp->qp_num);
-	path = param->primary_path;
-	cm_set_ah_attr(&cm_id_priv->ah_attr, cm_id_priv->port->port_num,
-		       path->dlid, path->sl, path->slid & 0x7F);
+	cm_id_priv->initiator_depth = param->initiator_depth;
+	cm_id_priv->responder_resources = param->responder_resources;
+	cm_id_priv->retry_count = param->retry_count;
+
 	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
 	if (ret)
 		goto out;
 
-	cm_format_req((struct cm_req_msg *)&cm_id_priv->msg->mad,
-		      cm_id_priv, param);
+	req_msg = (struct cm_req_msg *)&cm_id_priv->msg->mad;
+	cm_format_req(req_msg, cm_id_priv, param);
 	cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
 	cm_id_priv->msg->sent_state = IB_CM_REQ_SENT;
 
+	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
+	cm_id_priv->send_psn = cm_req_get_starting_psn(req_msg);
+	cm_id_priv->local_ack_timeout =
+				cm_req_get_primary_local_ack_timeout(req_msg);
+
 	/*
 	 * Received REQs won't match until we're in REQ_SENT state.  This
 	 * simplifies error recovery if the send fails.
@@ -801,7 +839,7 @@
 	}
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
-	ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				&cm_id_priv->msg->send_wr, &bad_send_wr);
 
 	if (ret) {
@@ -828,9 +866,9 @@
 		 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
 }
 
-static inline void cm_format_paths_from_req(struct ib_sa_path_rec *primary_path,
-					    struct ib_sa_path_rec *alt_path,
-					    struct cm_req_msg *req_msg)
+static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
+					    struct ib_sa_path_rec *primary_path,
+					    struct ib_sa_path_rec *alt_path)
 {
 	memset(primary_path, 0, sizeof *primary_path);
 	primary_path->dgid = req_msg->primary_local_gid;
@@ -851,7 +889,7 @@
 	primary_path->packet_life_time =
 		cm_req_get_primary_local_ack_timeout(req_msg);
 
-	if (alt_path) {
+	if (req_msg->alt_local_lid) {
 		memset(alt_path, 0, sizeof *alt_path);
 		alt_path->dgid = req_msg->alt_local_gid;
 		alt_path->sgid = req_msg->alt_remote_gid;
@@ -874,6 +912,7 @@
 }
 
 static void cm_format_req_event(struct cm_work *work,
+				struct cm_id_private *cm_id_priv,
 				struct ib_cm_id *listen_id)
 {
 	struct cm_req_msg *req_msg;
@@ -883,22 +922,20 @@
 	work->cm_event.event = IB_CM_REQ_RECEIVED;
 	param = &work->cm_event.param.req_rcvd;
 	param->listen_id = listen_id;
-	param->device = work->port->mad_agent->device;
-	param->port = work->port->port_num;
+	param->device = cm_id_priv->av.port->mad_agent->device;
+	param->port = cm_id_priv->av.port->port_num;
 	param->primary_path = &work->path[0];
 	if (req_msg->alt_local_lid)
 		param->alternate_path = &work->path[1];
 	else
 		param->alternate_path = NULL;
-	cm_format_paths_from_req(param->primary_path, param->alternate_path,
-				 req_msg);
 	param->remote_ca_guid = req_msg->local_ca_guid;
 	param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
 	param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
 	param->qp_type = cm_req_get_qp_type(req_msg);
 	param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
-	param->responder_resources = cm_req_get_resp_res(req_msg);
-	param->initiator_depth = cm_req_get_init_depth(req_msg);
+	param->responder_resources = cm_req_get_init_depth(req_msg);
+	param->initiator_depth = cm_req_get_resp_res(req_msg);
 	param->local_cm_response_timeout = 
 					cm_req_get_remote_resp_timeout(req_msg);
 	param->flow_control = cm_req_get_flow_ctrl(req_msg);
@@ -934,93 +971,122 @@
 		ib_destroy_cm_id(&cm_id_priv->id);	
 }
 
+static int cm_peer_req_handler(struct cm_work *work)
+{
+	/* todo: fix peer-to-peer
+	spin_lock_irqsave(&cm.lock, flags);
+	if (cm_is_active_peer(work->port->ca_guid,
+				req_msg->local_ca_guid,
+				cur_cm_id_priv->local_qpn,
+				cm_req_get_local_qpn(req_msg))) {
+		spin_unlock_irqrestore(&cm.lock, flags);
+		ret = 1;
+		goto out;
+	}
+	atomic_inc(&cur_cm_id_priv->refcount);
+	cur_cm_id_priv->id.state = IB_CM_REQ_RCVD;
+	rb_erase(&cur_cm_id_priv->service_node, &cm.service_table);
+	cur_cm_id_priv->peer_to_peer = 0;
+	cur_cm_id_priv->remote_ca_guid = req_msg->local_ca_guid;
+	cur_cm_id_priv->id.remote_id = req_msg->local_comm_id;
+	cm_insert_remote_id(cur_cm_id_priv);
+	spin_unlock_irqrestore(&cm.lock, flags);
+
+	ib_cancel_mad(work->port->mad_agent,
+		(unsigned long) cur_cm_id_priv->msg);
+	ib_destroy_cm_id(&cm_id_priv->id);
+	cm_id_priv = cur_cm_id_priv;
+out:
+	return ret;
+	*/
+	return -ECONNREFUSED;	/* Does not match a peer-to-peer request */
+}
+
 static int cm_req_handler(struct cm_work *work)
 {
 	struct ib_cm_id *cm_id;
-	struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
+	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
 	struct cm_req_msg *req_msg;
 	unsigned long flags;
-	struct ib_wc *wc;
+	int ret;
+
+	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
+
+	/* Check for duplicate REQ. */
+	spin_lock_irqsave(&cm.lock, flags);
+	cm_id_priv = cm_find_id_by_remote_id(req_msg->local_ca_guid,
+					     req_msg->local_comm_id);
+	spin_unlock_irqrestore(&cm.lock, flags);
+	if (cm_id_priv)
+		return -EINVAL;
+
+	/* Check for peer-to-peer connection. */
+	ret = cm_peer_req_handler(work);
+	if (ret != -ECONNREFUSED)
+		return ret;
 
 	cm_id = ib_create_cm_id(NULL, NULL);
 	if (IS_ERR(cm_id))
 		return PTR_ERR(cm_id);
+	
 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-
-	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
 	cm_id_priv->remote_ca_guid = req_msg->local_ca_guid;
 	cm_id_priv->id.remote_id = req_msg->local_comm_id;
 	cm_id_priv->passive = 1;
 
+	/* Find matching listen request. */
 	spin_lock_irqsave(&cm.lock, flags);
-	/* Check for duplicate REQ. */
-	cur_cm_id_priv = cm_find_id_by_remote_id(req_msg->local_ca_guid,
-						 req_msg->local_comm_id);
-	if (cur_cm_id_priv) {
+	listen_cm_id_priv = cm_find_listen(req_msg->service_id);
+	if (!listen_cm_id_priv) {
 		spin_unlock_irqrestore(&cm.lock, flags);
-		goto out; /* Duplicate message. */
+		/* todo: reject with no match */
+		ret = -EINVAL;
+		goto out;
 	}
-	/* Find matching listen request. */
-	cur_cm_id_priv = cm_find_listen(req_msg->service_id);
-	if (cur_cm_id_priv) {
-		atomic_inc(&cur_cm_id_priv->refcount);
-		atomic_inc(&cm_id_priv->refcount);
-		cm_id_priv->id.state = IB_CM_REQ_RCVD;
-		atomic_inc(&cm_id_priv->work_count);
-		cm_insert_remote_id(cm_id_priv);
-		spin_unlock_irqrestore(&cm.lock, flags);
+	atomic_inc(&listen_cm_id_priv->refcount);
+	atomic_inc(&cm_id_priv->refcount);
+	cm_id_priv->id.state = IB_CM_REQ_RCVD;
+	atomic_inc(&cm_id_priv->work_count);
+	cm_insert_remote_id(cm_id_priv);
+	spin_unlock_irqrestore(&cm.lock, flags);
 
-		cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
-		cm_id_priv->id.context = cur_cm_id_priv->id.context;
-		cm_id_priv->id.service_id = req_msg->service_id;
-		cm_id_priv->id.service_mask = ~0ULL;
-	} else {
-		/* Search for a peer request. */
-                /* todo: fix peer-to-peer */
-		if (!cur_cm_id_priv) {
-			spin_unlock_irqrestore(&cm.lock, flags);
-			/* todo: reject with no match */
-			goto out;
-		}
-		if (cm_is_active_peer(work->port->ca_guid,
-				      req_msg->local_ca_guid,
-				      cur_cm_id_priv->local_qpn,
-				      cm_req_get_local_qpn(req_msg))) {
-			spin_unlock_irqrestore(&cm.lock, flags);
-			goto out; /* Stay active. */
-		}
-		atomic_inc(&cur_cm_id_priv->refcount);
-		cur_cm_id_priv->id.state = IB_CM_REQ_RCVD;
-		/*rb_erase(&cur_cm_id_priv->service_node, &cm.service_table);*/
-		cur_cm_id_priv->peer_to_peer = 0;
-		cur_cm_id_priv->remote_ca_guid = req_msg->local_ca_guid;
-		cur_cm_id_priv->id.remote_id = req_msg->local_comm_id;
-		cm_insert_remote_id(cur_cm_id_priv);
-		spin_unlock_irqrestore(&cm.lock, flags);
+	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+	cm_id_priv->id.context = listen_cm_id_priv->id.context;
+	cm_id_priv->id.service_id = req_msg->service_id;
+	cm_id_priv->id.service_mask = ~0ULL;
 
-		ib_cancel_mad(work->port->mad_agent,
-			(unsigned long) cur_cm_id_priv->msg);
-		ib_destroy_cm_id(&cm_id_priv->id);
-		cm_id_priv = cur_cm_id_priv;
+	cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
+	ret = cm_init_av(work->port->mad_agent->device, &work->path[0],
+			 &cm_id_priv->av);
+	if (ret)
+		goto bad_req;
+	if (req_msg->alt_local_lid) {
+		ret = cm_init_av(work->port->mad_agent->device, &work->path[1],
+				 &cm_id_priv->alt_av);
+		if (ret)
+			goto bad_req;
 	}
-	cm_id_priv->port = work->port;
 	cm_id_priv->timeout_ms = cm_convert_to_ms(
 					cm_req_get_local_resp_timeout(req_msg));
 	cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
 	cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
-	cm_id_priv->remote_port_gid = req_msg->primary_local_gid;
-	wc = work->mad_recv_wc->wc;
-	cm_id_priv->pkey_index = wc->pkey_index;
-	cm_set_ah_attr(&cm_id_priv->ah_attr, work->port->port_num,
-		       wc->slid, wc->sl, wc->dlid_path_bits);
+	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
+	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
+	cm_id_priv->recv_psn = cm_req_get_starting_psn(req_msg);
+	cm_id_priv->local_ack_timeout =
+				cm_req_get_primary_local_ack_timeout(req_msg);
+	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
+	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
 
-	cm_format_req_event(work, &cur_cm_id_priv->id);
+	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
 	cm_process_work(cm_id_priv, work);
-	cm_deref_id(cur_cm_id_priv);
+	cm_deref_id(listen_cm_id_priv);
 	return 0;
+bad_req:
+	cm_deref_id(listen_cm_id_priv);
 out:
 	ib_destroy_cm_id(&cm_id_priv->id);
-	return -EINVAL;
+	return ret;
 }
 
 static void cm_format_rep(struct cm_rep_msg *rep_msg,
@@ -1041,7 +1107,7 @@
 	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
 	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
 	cm_rep_set_srq(rep_msg, (param->qp->srq != NULL));
-	rep_msg->local_ca_guid = cm_id_priv->port->ca_guid;
+	rep_msg->local_ca_guid = cm_id_priv->av.port->ca_guid;
 
 	if (param->private_data && param->private_data_len)
 		memcpy(rep_msg->private_data, param->private_data,
@@ -1068,6 +1134,7 @@
 {
 	struct cm_id_private *cm_id_priv;
 	struct cm_msg *msg;
+	struct cm_rep_msg *rep_msg;
 	struct ib_send_wr *bad_send_wr;
 	unsigned long flags;
 	int ret;
@@ -1081,14 +1148,15 @@
 	if (ret)
 		goto out;
 
-	cm_format_rep((struct cm_rep_msg *)&msg->mad, cm_id_priv, param);
+	rep_msg = (struct cm_rep_msg *)&msg->mad;
+	cm_format_rep(rep_msg, cm_id_priv, param);
 	msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
 	msg->sent_state = IB_CM_REP_SENT;
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	if (cm_id->state == IB_CM_REQ_RCVD ||
 	    cm_id->state == IB_CM_MRA_REQ_SENT)
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 	else
 		ret = -EINVAL;
@@ -1101,6 +1169,10 @@
 
 	cm_id->state = IB_CM_REP_SENT;
 	cm_id_priv->msg = msg;
+	cm_id_priv->initiator_depth = param->initiator_depth;
+	cm_id_priv->responder_resources = param->responder_resources;
+	cm_id_priv->send_psn = cpu_to_be32(param->starting_send_psn &
+					   0x00FFFFFF);
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 out:
 	return ret;
@@ -1133,7 +1205,7 @@
 		return;
 
 	cm_format_rtu((struct cm_rtu_msg *)&msg->mad, cm_id_priv, NULL, 0);
-	ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 			       &msg->send_wr, &bad_send_wr);
 	if (ret)
 		cm_free_msg(cm_id_priv->msg);
@@ -1163,7 +1235,7 @@
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	if (cm_id->state == IB_CM_REP_RCVD ||
 	    cm_id->state == IB_CM_MRA_REP_SENT)
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 	else
 		ret = -EINVAL;
@@ -1193,8 +1265,8 @@
 	param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
 	param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
 	param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
-	param->responder_resources = rep_msg->resp_resources;
-	param->initiator_depth = rep_msg->initiator_depth;
+	param->responder_resources = rep_msg->initiator_depth;
+	param->initiator_depth = rep_msg->resp_resources;
 	param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
 	param->failover_accepted = cm_rep_get_failover(rep_msg);
 	param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
@@ -1235,6 +1307,10 @@
 	cm_id_priv->id.remote_id = rep_msg->local_comm_id;
 	cm_id_priv->remote_ca_guid = rep_msg->local_ca_guid;
 	cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
+	cm_id_priv->initiator_depth = rep_msg->resp_resources;
+	cm_id_priv->responder_resources = rep_msg->initiator_depth;
+	cm_id_priv->recv_psn = cm_rep_get_starting_psn(rep_msg);
+	cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
 
 	/* todo: handle peer_to_peer
 	if (cm_id_priv->peer_to_peer) {
@@ -1251,7 +1327,7 @@
 		list_add_tail(&work->list, &cm_id_priv->work_list);
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-	ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
+	ib_cancel_mad(cm_id_priv->av.port->mad_agent, wr_id);
 	if (ret)
 		cm_process_work(cm_id_priv, work);
 	else
@@ -1292,7 +1368,7 @@
 		list_add_tail(&work->list, &cm_id_priv->work_list);
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-	ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
+	ib_cancel_mad(cm_id_priv->av.port->mad_agent, wr_id);
 	if (ret)
 		cm_process_work(cm_id_priv, work);
 	else
@@ -1375,7 +1451,7 @@
 		goto out;
 	}
 	ret = msg_ret ? msg_ret :
-		ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				 &msg->send_wr, &bad_send_wr);
 	if (!ret) {
 		cm_id->state = IB_CM_DREQ_SENT;
@@ -1419,7 +1495,7 @@
 		return;
 
 	cm_format_drep((struct cm_drep_msg *)&msg->mad, cm_id_priv, NULL, 0);
-	ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+	ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 			       &msg->send_wr, &bad_send_wr);
 	if (ret)
 		cm_free_msg(cm_id_priv->msg);
@@ -1451,7 +1527,7 @@
 		goto out;
 	}
 	ret = msg_ret ? msg_ret :
-		ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				 &msg->send_wr, &bad_send_wr);
 
 	cm_id->state = IB_CM_TIMEWAIT;
@@ -1546,7 +1622,7 @@
 		list_add_tail(&work->list, &cm_id_priv->work_list);
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-	ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
+	ib_cancel_mad(cm_id_priv->av.port->mad_agent, wr_id);
 	if (cm_id_priv->passive)
 		cm_remove_remote_id(cm_id_priv);
 
@@ -1645,7 +1721,7 @@
 		cm_remove_remote_id(cm_id_priv);
 
 	ret = msg_ret ? msg_ret :
-		ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				 &msg->send_wr, &bad_send_wr);
 out:
 	if (!msg_ret && ret)
@@ -1729,7 +1805,7 @@
 	    (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
 	     cm_id_priv->id.lap_state != IB_CM_LAP_RCVD)) {
 
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 	} else {
 		ret = -EINVAL;
@@ -1826,7 +1902,7 @@
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	if (cm_id->state == IB_CM_ESTABLISHED &&
 	    cm_id->lap_state == IB_CM_LAP_IDLE)
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 	else
 		ret = -EINVAL;
@@ -1962,7 +2038,7 @@
 	if (cm_id->state == IB_CM_ESTABLISHED &&
 	    (cm_id->lap_state == IB_CM_LAP_RCVD ||
 	     cm_id->lap_state == IB_CM_MRA_LAP_SENT))
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 	else
 		ret = -EINVAL;
@@ -2014,7 +2090,7 @@
 		list_add_tail(&work->list, &cm_id_priv->work_list);
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-	ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
+	ib_cancel_mad(cm_id_priv->av.port->mad_agent, wr_id);
 
 	if (ret)
 		cm_process_work(cm_id_priv, work);
@@ -2057,12 +2133,11 @@
 		return -EINVAL;
 
 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-	ret = cm_find_device(&param->path->sgid, &device, &cm_id_priv->port);
+	ret = cm_find_device(&param->path->sgid, &device, &cm_id_priv->av.port);
 	if (ret)
 		goto out;
 
-	ret = ib_find_cached_pkey(device, cm_id_priv->port->port_num,
-				  param->path->pkey, &cm_id_priv->pkey_index);
+	ret = cm_init_av(device, param->path, &cm_id_priv->av);
 	if (ret)
 		goto out;
 
@@ -2070,9 +2145,6 @@
 	cm_id->service_mask = ~0ULL;
 	cm_id_priv->timeout_ms = param->timeout_ms;
 	cm_id_priv->max_cm_retries = param->max_cm_retries;
-	cm_set_ah_attr(&cm_id_priv->ah_attr, cm_id_priv->port->port_num,
-		       param->path->dlid, param->path->sl,
-		       param->path->slid & 0x7F);
 	ret = cm_alloc_msg(cm_id_priv, &msg);
 	if (ret)
 		goto out;
@@ -2084,7 +2156,7 @@
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	if (cm_id->state == IB_CM_IDLE)
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 	else
 		ret = -EINVAL;
@@ -2136,8 +2208,8 @@
 	sidr_req_msg = (struct cm_sidr_req_msg *)
 				work->mad_recv_wc->recv_buf.mad;
 	wc = work->mad_recv_wc->wc;
-	cm_id_priv->remote_port_gid.global.subnet_prefix = wc->slid;
-	cm_id_priv->remote_port_gid.global.interface_id = 0;
+	cm_id_priv->av.dgid.global.subnet_prefix = wc->slid;
+	cm_id_priv->av.dgid.global.interface_id = 0;
 	cm_id_priv->id.remote_id = sidr_req_msg->request_id;
 	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
 	atomic_inc(&cm_id_priv->work_count);
@@ -2162,9 +2234,9 @@
 	cm_id_priv->id.context = cur_cm_id_priv->id.context;
 	cm_id_priv->id.service_id = sidr_req_msg->service_id;
 	cm_id_priv->id.service_mask = ~0ULL;
-	cm_id_priv->port = work->port;
-	cm_id_priv->pkey_index = wc->pkey_index;
-	cm_set_ah_attr(&cm_id_priv->ah_attr, work->port->port_num,
+	cm_id_priv->av.port = work->port;
+	cm_id_priv->av.pkey_index = wc->pkey_index;
+	cm_set_ah_attr(&cm_id_priv->av.ah_attr, work->port->port_num,
 		       wc->slid, wc->sl, wc->dlid_path_bits);
 
 	cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
@@ -2221,7 +2293,7 @@
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	if (cm_id->state == IB_CM_SIDR_REQ_RCVD)
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 	else
 		ret = -EINVAL;
@@ -2279,7 +2351,7 @@
 	cm_id_priv->id.state = IB_CM_IDLE;
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-	ib_cancel_mad(cm_id_priv->port->mad_agent,
+	ib_cancel_mad(cm_id_priv->av.port->mad_agent,
 		      (unsigned long) cm_id_priv->msg);
 	cm_format_sidr_rep_event(work);
 	cm_process_work(cm_id_priv, work);
@@ -2356,7 +2428,7 @@
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
 	if (msg->retry++ < cm_id_priv->max_cm_retries) {
-		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+		ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
 				       &msg->send_wr, &bad_send_wr);
 		if (ret)
 			cm_process_send_error(msg, IB_WC_GENERAL_ERR);
@@ -2464,6 +2536,142 @@
 	queue_work(cm.wq, &work->work);
 }
 
+static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
+				struct ib_qp_attr *qp_attr,
+				int *qp_attr_mask)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	switch (cm_id_priv->id.state) {
+	case IB_CM_REQ_SENT:
+	case IB_CM_MRA_REQ_RCVD:
+	case IB_CM_REQ_RCVD:
+	case IB_CM_MRA_REQ_SENT:
+	case IB_CM_REP_RCVD:
+	case IB_CM_MRA_REP_SENT:
+	case IB_CM_REP_SENT:
+	case IB_CM_MRA_REP_RCVD:
+	case IB_CM_ESTABLISHED:
+		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
+				IB_QP_PKEY_INDEX | IB_QP_PORT;
+		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+		if (cm_id_priv->responder_resources)
+			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE |
+						    IB_ACCESS_REMOTE_READ;
+		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
+		qp_attr->port_num = cm_id_priv->av.port->port_num;
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	return ret;
+}
+
+static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
+			       struct ib_qp_attr *qp_attr,
+			       int *qp_attr_mask)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	switch (cm_id_priv->id.state) {
+	case IB_CM_REQ_RCVD:
+	case IB_CM_MRA_REQ_SENT:
+	case IB_CM_REP_RCVD:
+	case IB_CM_MRA_REP_SENT:
+	case IB_CM_REP_SENT:
+	case IB_CM_MRA_REP_RCVD:
+	case IB_CM_ESTABLISHED:
+		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_DEST_QPN |
+				IB_QP_RQ_PSN | IB_QP_MAX_QP_RD_ATOMIC |
+				IB_QP_MIN_RNR_TIMER;
+		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
+		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
+		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->recv_psn);
+		qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
+		qp_attr->min_rnr_timer = 0;
+		if (cm_id_priv->alt_av.ah_attr.dlid) {
+			*qp_attr_mask |= IB_QP_ALT_PATH;
+			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
+		}
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	return ret;
+}
+
+static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
+			       struct ib_qp_attr *qp_attr,
+			       int *qp_attr_mask)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	switch (cm_id_priv->id.state) {
+	case IB_CM_REP_RCVD:
+	case IB_CM_MRA_REP_SENT:
+	case IB_CM_REP_SENT:
+	case IB_CM_MRA_REP_RCVD:
+	case IB_CM_ESTABLISHED:
+		*qp_attr_mask = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+				IB_QP_RNR_RETRY | IB_QP_SQ_PSN |
+				IB_QP_MAX_DEST_RD_ATOMIC;
+		qp_attr->timeout = cm_id_priv->local_ack_timeout;
+		qp_attr->retry_cnt = cm_id_priv->retry_count;
+		qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
+		qp_attr->sq_psn = cm_id_priv->send_psn;
+		qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources;
+		if (cm_id_priv->alt_av.ah_attr.dlid) {
+			*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+			qp_attr->path_mig_state = IB_MIG_ARMED;
+		}
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	return ret;
+}
+
+int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
+		       struct ib_qp_attr *qp_attr,
+		       int *qp_attr_mask)
+{
+	struct cm_id_private *cm_id_priv;
+	int ret;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	switch (qp_attr->qp_state) {
+	case IB_QPS_INIT:
+		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
+		break;
+	case IB_QPS_RTR:
+		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
+		break;
+	case IB_QPS_RTS:
+		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(ib_cm_init_qp_attr);
+
 static u64 cm_get_ca_guid(struct ib_device *device)
 {
 	struct ib_device_attr *device_attr;



More information about the general mailing list