[openib-general] [PATCH] [RFC] intermediate CM development

Sean Hefty mshefty at ichips.intel.com
Mon Jan 10 18:06:05 PST 2005


The following adds a substantial amount of framework to the CM, but doesn't
actually increase its functionality.  (There's code to send a REQ message
and time it out, but I haven't tested this.)  Posting it for feedback before all
of the state transitions are in place.

If there are no substantial objections to the implementation, the CM should
be ready for testing near the end of the week.

- Sean

Index: core/cm.c
===================================================================
--- core/cm.c	(revision 1505)
+++ core/cm.c	(working copy)
@@ -33,11 +33,15 @@
  *
  * $Id$
  */
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/pci.h>
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 
+#include <ib_cache.h>
 #include <ib_cm.h>
+#include "cm_msgs.h"
 
 MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("InfiniBand CM");
@@ -55,30 +59,141 @@
 static struct ib_cm {
 	spinlock_t lock;
 	struct rb_root service_table;
+	u32 id;
 } cm;
 
 struct cm_port {
 	struct ib_mad_agent *mad_agent;
+	u64 ca_guid;
+	spinlock_t lock;
+	struct ib_mr *mr;
+	u8 port_num;
+};
+
+struct cm_msg {
+	struct cm_id_private *cm_id_priv;
+	struct ib_send_wr send_wr;
+	struct ib_sge sge;
+	u8 retry;
+	DECLARE_PCI_UNMAP_ADDR(mapping)
+	struct ib_mad mad;
 };
 
-struct ib_cm_id_private {
+struct cm_id_private {
 	struct ib_cm_id	id;
 
 	struct rb_node node;
 	spinlock_t lock;
 	wait_queue_head_t wait;
 	atomic_t refcount;
+
+	struct cm_port *port;
+	struct cm_msg *msg;
+
+	struct ib_ah_attr ah_attr;
+	u16 pkey_index;
+	u32 remote_qpn;
+	int timeout_ms;
+	u8 max_cm_retries;
 };
 
-static struct ib_cm_id_private *find_cm_service(u64 service_id)
+static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
+			struct cm_msg *msg)
+{
+	struct ib_mad_agent *mad_agent;
+	int ret;
+
+	msg = kmalloc(sizeof *msg, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+	memset(msg, 0, sizeof *msg);
+
+	/* need address handle cache... */
+	mad_agent = cm_id_priv->port->mad_agent;
+	msg->send_wr.wr.ud.ah = ib_create_ah(mad_agent->qp->pd,
+					     &cm_id_priv->ah_attr);
+	if (IS_ERR(msg->send_wr.wr.ud.ah)) {
+		ret = PTR_ERR(msg->send_wr.wr.ud.ah);
+		kfree(msg);
+		return ret;
+	}
+
+	msg->sge.addr = dma_map_single(mad_agent->device->dma_device,
+				       &msg->mad, sizeof msg->mad,
+				       DMA_TO_DEVICE);
+	pci_unmap_addr_set(msg, mapping, msg->sge.addr);
+	msg->sge.length = sizeof msg->mad;
+	msg->sge.lkey = cm_id_priv->port->mr->lkey;
+
+	msg->send_wr.wr_id = (unsigned long) msg;
+	msg->send_wr.sg_list = &msg->sge;
+	msg->send_wr.num_sge = 1;
+	msg->send_wr.opcode = IB_WR_SEND;
+	msg->send_wr.send_flags = IB_SEND_SIGNALED;
+	msg->send_wr.wr.ud.mad_hdr = &msg->mad.mad_hdr;
+	msg->send_wr.wr.ud.remote_qpn = 1; /* IB_QP1; use host order for now... */
+	msg->send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
+	msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
+	msg->send_wr.wr.ud.pkey_index = cm_id_priv->pkey_index;
+
+	atomic_inc(&cm_id_priv->refcount);
+	msg->cm_id_priv = cm_id_priv;
+	return 0;
+}
+
+static void cm_free_msg(struct cm_msg *msg)
+{
+	ib_destroy_ah(msg->send_wr.wr.ud.ah);
+	dma_unmap_single(msg->cm_id_priv->port->mad_agent->device->dma_device,
+			 pci_unmap_addr(msg, mapping), sizeof msg->mad,
+			 DMA_TO_DEVICE);
+
+	if (atomic_dec_and_test(&msg->cm_id_priv->refcount))
+		wake_up(&msg->cm_id_priv->wait);
+	kfree(msg);
+}
+
+static void cm_set_ah_attr(struct ib_ah_attr *ah_attr,
+			   struct cm_port *port,
+			   struct ib_sa_path_rec *path_rec)
+{
+	memset(ah_attr, 0, sizeof ah_attr);
+	ah_attr->dlid = be16_to_cpu(path_rec->dlid);
+	ah_attr->sl = path_rec->sl;
+	ah_attr->src_path_bits = path_rec->slid & 0x7F;
+	ah_attr->port_num = port->port_num;
+}
+
+static struct cm_port * cm_find_port(struct ib_device *device,
+				     union ib_gid *gid)
+{
+	struct cm_port *port;
+	int ret;
+	u8 p;
+
+	port = (struct cm_port *)ib_get_client_data(device, &cm_client);
+	if (!port)
+		return NULL;
+	
+	ret = ib_find_cached_gid(device, gid, &p, NULL);
+	if (ret)
+		port = NULL;
+	else
+		port = &port[p-1];
+
+	return port;
+}
+
+static struct cm_id_private *cm_find_service(u64 service_id)
 {
 	struct rb_node *node = cm.service_table.rb_node;
-	struct ib_cm_id_private *cm_id_priv;
+	struct cm_id_private *cm_id_priv;
 
 	while (node) {
-		cm_id_priv = rb_entry(node, struct ib_cm_id_private, node);
+		cm_id_priv = rb_entry(node, struct cm_id_private, node);
 		if ((cm_id_priv->id.service_mask & service_id) ==
-		    (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
+		    (cm_id_priv->id.service_mask & cm_id_priv->id.service_id) &&
+		    (cm_id_priv->id.state != IB_CM_IDLE))
 		    return cm_id_priv;
 
 		if (service_id < cm_id_priv->id.service_id)
@@ -89,17 +204,16 @@
 	return NULL;
 }
 
-static void insert_cm_service(struct ib_cm_id_private *cm_id_priv)
+static void cm_insert_service(struct cm_id_private *cm_id_priv)
 {
 	struct rb_node **link = &cm.service_table.rb_node;
 	struct rb_node *parent = NULL;
-	struct ib_cm_id_private *cur_cm_id_priv;
+	struct cm_id_private *cur_cm_id_priv;
 	u64 service_id = cm_id_priv->id.service_id;
 
 	while (*link) {
 		parent = *link;
-		cur_cm_id_priv = rb_entry(parent, struct ib_cm_id_private,
-					  node);
+		cur_cm_id_priv = rb_entry(parent, struct cm_id_private, node);
 		if (service_id < cur_cm_id_priv->id.service_id)
 			link = &(*link)->rb_left;
 		else
@@ -109,10 +223,22 @@
 	rb_insert_color(&cm_id_priv->node, &cm.service_table);
 }
 
+static u32 cm_get_id(void)
+{
+	unsigned long flags;
+	u32 id;
+
+	/* need to check for wrapping... */
+	spin_lock_irqsave(&cm.lock, flags);
+	id = cm.id++;
+	spin_unlock_irqrestore(&cm.lock, flags);
+	return id;
+}
+
 struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
 				 void *context)
 {
-	struct ib_cm_id_private *cm_id_priv;
+	struct cm_id_private *cm_id_priv;
 
 	cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
 	if (!cm_id_priv)
@@ -122,6 +248,7 @@
 	cm_id_priv->id.state = IB_CM_IDLE;
 	cm_id_priv->id.cm_handler = cm_handler;
 	cm_id_priv->id.context = context;
+	cm_id_priv->id.local_id = cm_get_id();
 
 	spin_lock_init(&cm_id_priv->lock);
 	init_waitqueue_head(&cm_id_priv->wait);
@@ -131,7 +258,7 @@
 }
 EXPORT_SYMBOL(ib_create_cm_id);
 
-static void reset_cm_state(struct ib_cm_id_private *cm_id_priv)
+static void reset_cm_state(struct cm_id_private *cm_id_priv)
 {
 	/* reject connections if establishing */
 	/* disconnect established connections */
@@ -140,17 +267,17 @@
 
 int ib_destroy_cm_id(struct ib_cm_id *cm_id)
 {
-	struct ib_cm_id_private *cm_id_priv;
+	struct cm_id_private *cm_id_priv;
 	unsigned long flags, flags2;
 
-	cm_id_priv = container_of(cm_id, struct ib_cm_id_private, id);
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	switch(cm_id->state) {
 	case IB_CM_LISTEN:
 		spin_lock_irqsave(&cm.lock, flags2);
 		rb_erase(&cm_id_priv->node, &cm.service_table);
-		spin_lock_irqrestore(&cm.lock, flags2);
+		spin_unlock_irqrestore(&cm.lock, flags2);
 		break;
 	case IB_CM_IDLE:
 		break;
@@ -174,11 +301,11 @@
 		 u64 service_id,
 		 u64 service_mask)
 {
-	struct ib_cm_id_private *cm_id_priv;
+	struct cm_id_private *cm_id_priv;
 	unsigned long flags;
 	int ret = 0;
 
-	cm_id_priv = container_of(cm_id, struct ib_cm_id_private, id);
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	if (cm_id->state != IB_CM_IDLE) {
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -192,53 +319,368 @@
 	cm_id->service_mask = service_mask ? service_mask : ~0ULL;
 
 	spin_lock_irqsave(&cm.lock, flags);
-	if (find_cm_service(service_id)) {
-		/* No one else is able to change the cm_id state. */
-		cm_id->state = IB_CM_IDLE;
+	if (cm_find_service(service_id)) {
+		/* No one else is able to change the cm_id_priv state. */
+		cm_id_priv->id.state = IB_CM_IDLE;
 		ret = -EBUSY;
 	} else
-		insert_cm_service(cm_id_priv);
+		cm_insert_service(cm_id_priv);
 	spin_unlock_irqrestore(&cm.lock, flags);
 out:
 	return ret;
 }
 EXPORT_SYMBOL(ib_cm_listen);
 
+static inline int cm_convert_to_ms(u8 iba_time)
+{
+	/* approximate conversion to ms from 4.096us x 2^iba_time */
+	return (1 << ((uint)min(iba_time, (u8) 8) - (uint)8));
+}
+
+static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
+			      struct cm_id_private *cm_id_priv,
+			      enum cm_msg_attr_id attr_id,
+			      enum cm_msg_sequence msg_seq)
+{
+	u64 hi_tid, low_tid;
+
+	hdr->base_version  = IB_MGMT_BASE_VERSION;
+	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
+	hdr->class_version = IB_CM_CLASS_VERSION;
+	hdr->method	   = IB_MGMT_METHOD_SEND;
+	hdr->attr_id	   = attr_id;
+
+	hi_tid   = ((u64) cm_id_priv->port->mad_agent->hi_tid) << 32;
+	low_tid  = (u64) (cm_id_priv->id.local_id | (msg_seq << 30));
+	hdr->tid = cpu_to_be64(hi_tid | low_tid);
+}
+
+static void cm_format_req(struct cm_req_msg *req_msg,
+			  struct cm_id_private *cm_id_priv,
+			  struct ib_cm_req_param *param)
+{
+	cm_format_mad_hdr(&req_msg->hdr, cm_id_priv,
+			  CM_REQ_ATTR_ID, CM_MSG_SEQUENCE_REQ);
+
+	req_msg->local_comm_id = cm_id_priv->id.local_id;
+	req_msg->service_id = param->service_id;
+	req_msg->local_ca_guid = cm_id_priv->port->ca_guid;
+	cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp->qp_num));
+	cm_req_set_resp_res(req_msg, param->responder_resources);
+	cm_req_set_init_depth(req_msg, param->initiator_depth);
+	cm_req_set_remote_resp_timeout(req_msg,
+				       param->remote_cm_response_timeout);
+	cm_req_set_qp_type(req_msg, param->qp->qp_type);
+	cm_req_set_flow_ctrl(req_msg, param->flow_control);
+	cm_req_set_starting_psn(req_msg, param->starting_send_psn);
+	cm_req_set_local_resp_timeout(req_msg,
+				      param->local_cm_response_timeout);
+	cm_req_set_retry_count(req_msg, param->retry_count);
+	req_msg->pkey = param->primary_path->pkey;
+	cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
+	cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
+	cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
+	cm_req_set_srq(req_msg, (param->qp->srq != NULL));
+
+	req_msg->primary_local_lid = param->primary_path->slid;
+	req_msg->primary_remote_lid = param->primary_path->dlid;
+	req_msg->primary_local_gid = param->primary_path->sgid;
+	req_msg->primary_remote_gid = param->primary_path->dgid;
+	cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
+	cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
+	req_msg->primary_traffic_class = param->primary_path->traffic_class;
+	req_msg->primary_hop_limit = param->primary_path->hop_limit;
+	cm_req_set_primary_sl(req_msg, param->primary_path->sl);
+	cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
+	cm_req_set_primary_local_ack_timeout(req_msg,
+		min(31, param->primary_path->packet_life_time + 1));
+
+	if (param->alternate_path) {
+		req_msg->alt_local_lid = param->alternate_path->slid;
+		req_msg->alt_remote_lid = param->alternate_path->dlid;
+		req_msg->alt_local_gid = param->alternate_path->sgid;
+		req_msg->alt_remote_gid = param->alternate_path->dgid;
+		cm_req_set_alt_flow_label(req_msg,
+					  param->alternate_path->flow_label);
+		cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
+		req_msg->alt_traffic_class = param->alternate_path->traffic_class;
+		req_msg->alt_hop_limit = param->alternate_path->hop_limit;
+		cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
+		cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
+		cm_req_set_alt_local_ack_timeout(req_msg,
+			min(31, param->alternate_path->packet_life_time + 1));
+	}
+
+	if (param->private_data && param->private_data_len)
+		memcpy(req_msg->private_data, param->private_data,
+		       param->private_data_len);
+}
+
+static inline int cm_validate_req_param(struct ib_cm_req_param *param)
+{
+	if (!param->qp || !param->primary_path)
+		return -EINVAL;
+
+	if (param->qp->qp_type != IB_QPT_RC && param->qp->qp_type != IB_QPT_UC)
+		return -EINVAL;
+
+	if (param->private_data &&
+	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
+		return -EINVAL;
+
+	return 0;
+}
+
 int ib_send_cm_req(struct ib_cm_id *cm_id,
 		   struct ib_cm_req_param *param)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	struct ib_send_wr *bad_send_wr;
+	unsigned long flags;
+	int ret;
+
+	ret = cm_validate_req_param(param);
+	if (ret)
+		goto out;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	cm_id_priv->port = cm_find_port(param->qp->device,
+					&param->primary_path->sgid);
+	if (!cm_id_priv->port) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = ib_find_cached_pkey(param->qp->device,
+				  cm_id_priv->port->port_num,
+				  param->primary_path->pkey,
+				  &cm_id_priv->pkey_index);
+	if (ret)
+		goto out;
+
+	cm_id_priv->timeout_ms = cm_convert_to_ms(
+				    param->primary_path->packet_life_time) * 2 +
+				 cm_convert_to_ms(
+				    param->remote_cm_response_timeout);
+	cm_id_priv->max_cm_retries = param->max_cm_retries;
+	cm_set_ah_attr(&cm_id_priv->ah_attr, cm_id_priv->port,
+		       param->primary_path);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_req((struct cm_req_msg *)&msg->mad, cm_id_priv, param);
+
+	if (param->peer_to_peer) {
+		spin_lock_irqsave(&cm.lock, flags);
+		cm_insert_service(cm_id_priv);
+		spin_unlock_irqrestore(&cm.lock, flags);
+	}
+
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	if (cm_id->state == IB_CM_IDLE)
+		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+				       &msg->send_wr, &bad_send_wr);
+	else
+		ret = -EINVAL;
+
+	if (ret) {
+		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		cm_free_msg(cm_id_priv->msg);
+		goto out;
+	}
+
+	cm_id->state = IB_CM_REQ_SENT;
+	cm_id_priv->msg = msg;
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_req);
 
+static void cm_format_rep(struct cm_rep_msg *rep_msg,
+			  struct cm_id_private *cm_id_priv,
+			  struct ib_cm_rep_param *param)
+{
+	cm_format_mad_hdr(&rep_msg->hdr, cm_id_priv,
+			  CM_REP_ATTR_ID, CM_MSG_SEQUENCE_REQ);
+
+	rep_msg->local_comm_id = cm_id_priv->id.local_id;
+	rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
+	cm_rep_set_local_qpn(rep_msg, param->qp->qp_num);
+	cm_rep_set_starting_psn(rep_msg, param->starting_send_psn);
+	rep_msg->resp_resources = param->responder_resources;
+	rep_msg->initiator_depth = param->initiator_depth;
+	cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
+	cm_rep_set_failover(rep_msg, param->failover_accepted);
+	cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
+	cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
+	cm_rep_set_srq(rep_msg, (param->qp->srq != NULL));
+	rep_msg->local_ca_guid = cm_id_priv->port->ca_guid;
+
+	if (param->private_data && param->private_data_len)
+		memcpy(rep_msg->private_data, param->private_data,
+		       param->private_data_len);
+}
+
+static inline int cm_validate_rep_param(struct ib_cm_rep_param *param)
+{
+	if (!param->qp)
+		return -EINVAL;
+
+	if (param->qp->qp_type != IB_QPT_RC && param->qp->qp_type != IB_QPT_UC)
+		return -EINVAL;
+
+	if (param->private_data &&
+	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
+		return -EINVAL;
+
+	return 0;
+}
+
 int ib_send_cm_rep(struct ib_cm_id *cm_id,
-		   struct ib_cm_req_param *param)
+		   struct ib_cm_rep_param *param)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	ret = cm_validate_rep_param(param);
+	if (ret)
+		goto out;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_rep((struct cm_rep_msg *)&msg->mad, cm_id_priv, param);
+	
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_rep);
 
+static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
+			  struct cm_id_private *cm_id_priv,
+			  void *private_data,
+			  u8 private_data_len)
+{
+	cm_format_mad_hdr(&rtu_msg->hdr, cm_id_priv,
+			  CM_RTU_ATTR_ID, CM_MSG_SEQUENCE_REQ);
+
+	rtu_msg->local_comm_id = cm_id_priv->id.local_id;
+	rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
+
+	if (private_data && private_data_len)
+		memcpy(rtu_msg->private_data, private_data, private_data_len);
+}
+
 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
 		   void *private_data,
 		   u8 private_data_len)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
+		return -EINVAL;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_rtu((struct cm_rtu_msg *)&msg->mad, cm_id_priv,
+		      private_data, private_data_len);
+
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_rtu);
 
+static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
+			  struct cm_id_private *cm_id_priv,
+			  void *private_data,
+			  u8 private_data_len)
+{
+	cm_format_mad_hdr(&dreq_msg->hdr, cm_id_priv,
+			  CM_DREQ_ATTR_ID, CM_MSG_SEQUENCE_DREQ);
+
+	dreq_msg->local_comm_id = cm_id_priv->id.local_id;
+	dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
+	cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
+
+	if (private_data && private_data_len)
+		memcpy(dreq_msg->private_data, private_data, private_data_len);
+}
+
 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
 		    void *private_data,
 		    u8 private_data_len)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
+		return -EINVAL;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_dreq((struct cm_dreq_msg *)&msg->mad, cm_id_priv,
+		       private_data, private_data_len);
+
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_dreq);
 
+static void cm_format_drep(struct cm_drep_msg *drep_msg,
+			  struct cm_id_private *cm_id_priv,
+			  void *private_data,
+			  u8 private_data_len)
+{
+	cm_format_mad_hdr(&drep_msg->hdr, cm_id_priv,
+			  CM_DREP_ATTR_ID, CM_MSG_SEQUENCE_DREQ);
+
+	drep_msg->local_comm_id = cm_id_priv->id.local_id;
+	drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
+
+	if (private_data && private_data_len)
+		memcpy(drep_msg->private_data, private_data, private_data_len);
+}
+
 int ib_send_cm_drep(struct ib_cm_id *cm_id,
 		    void *private_data,
 		    u8 private_data_len)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
+		return -EINVAL;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_drep((struct cm_drep_msg *)&msg->mad, cm_id_priv,
+		       private_data, private_data_len);
+
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_drep);
 
@@ -248,6 +690,42 @@
 }
 EXPORT_SYMBOL(ib_cm_establish);
 
+static void cm_format_rej(struct cm_rej_msg *rej_msg,
+			  struct cm_id_private *cm_id_priv,
+			  enum ib_cm_rej_reason reason,
+			  void *ari,
+			  u8 ari_length,
+			  void *private_data,
+			  u8 private_data_len)
+{
+	cm_format_mad_hdr(&rej_msg->hdr, cm_id_priv,
+			  CM_REJ_ATTR_ID, CM_MSG_SEQUENCE_REQ);
+
+	rej_msg->local_comm_id = cm_id_priv->id.local_id;
+	rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
+
+	switch(cm_id_priv->id.state) {
+	case IB_CM_REQ_RCVD:
+		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+		break;
+	case IB_CM_REP_RCVD:
+		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
+		break;
+	default:
+		cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
+		break;
+	}
+
+	rej_msg->reason = reason;
+	if (ari && ari_length) {
+		cm_rej_set_reject_info_len(rej_msg, ari_length);
+		memcpy(rej_msg->ari, ari, ari_length);
+	}
+
+	if (private_data && private_data_len)
+		memcpy(rej_msg->private_data, private_data, private_data_len);
+}
+
 int ib_send_cm_rej(struct ib_cm_id *cm_id,
 		   enum ib_cm_rej_reason reason,
 		   void *ari,
@@ -255,28 +733,134 @@
 		   void *private_data,
 		   u8 private_data_len)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
+	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
+		return -EINVAL;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_rej((struct cm_rej_msg *)&msg->mad, cm_id_priv, reason,
+		      ari, ari_length, private_data, private_data_len);
+
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_rej);
 
+static void cm_format_mra(struct cm_mra_msg *mra_msg,
+			  struct cm_id_private *cm_id_priv,
+			  u8 service_timeout,
+			  void *private_data,
+			  u8 private_data_len)
+{
+	enum cm_msg_sequence msg_sequence;
+
+	switch(cm_id_priv->id.state) {
+	case IB_CM_REQ_RCVD:
+		msg_sequence = CM_MSG_SEQUENCE_REQ;
+		cm_mra_set_msg_mraed(mra_msg, CM_MSG_RESPONSE_REQ);
+		break;
+	case IB_CM_REP_RCVD:
+		msg_sequence = CM_MSG_SEQUENCE_REQ;
+		cm_mra_set_msg_mraed(mra_msg, CM_MSG_RESPONSE_REP);
+		break;
+	case IB_CM_ESTABLISHED:
+		msg_sequence = CM_MSG_SEQUENCE_LAP;
+		cm_mra_set_msg_mraed(mra_msg, CM_MSG_RESPONSE_OTHER);
+		break;
+	default:
+		msg_sequence = CM_MSG_SEQUENCE_REQ;
+		break;
+	}
+	cm_format_mad_hdr(&mra_msg->hdr, cm_id_priv,
+			  CM_MRA_ATTR_ID, msg_sequence);
+
+	mra_msg->local_comm_id = cm_id_priv->id.local_id;
+	mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
+	cm_mra_set_service_timeout(mra_msg, service_timeout);
+
+	if (private_data && private_data_len)
+		memcpy(mra_msg->private_data, private_data, private_data_len);
+}
+
 int ib_send_cm_mra(struct ib_cm_id *cm_id,
 		   u8 service_timeout,
 		   void *private_data,
 		   u8 private_data_len)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
+		return -EINVAL;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_mra((struct cm_mra_msg *)&msg->mad, cm_id_priv,
+		      service_timeout, private_data, private_data_len);
+
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_mra);
 
+/*
+static void cm_format_lap(struct cm_lap_msg *lap_msg,
+			  struct cm_id_private *cm_id_priv,
+			  struct ib_sa_path_rec *alternate_path,
+			  void *private_data,
+			  u8 private_data_len);
+{
+	cm_format_mad_hdr(&lap_msg->hdr, cm_id_priv,
+			  CM_LAP_ATTR_ID, CM_MSG_SEQUENCE_LAP);
+
+	if (private_data && private_data_len)
+		memcpy(lap_msg->private_data, private_data, private_data_len);
+}
+*/
+
 int ib_send_cm_lap(struct ib_cm_id *cm_id,
-		   struct ib_path_record *alternate_path,
+		   struct ib_sa_path_rec *alternate_path,
 		   void *private_data,
 		   u8 private_data_len)
 {
+	if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
+		return -EINVAL;
+
 	return -EINVAL;
 }
 EXPORT_SYMBOL(ib_send_cm_lap);
 
+/*
+static void cm_format_apr(struct cm_apr_msg *apr_msg,
+			  struct cm_id_private *cm_id_priv,
+			  enum ib_cm_apr_status status,
+			  void *info,
+			  u8 info_length,
+			  void *private_data,
+			  u8 private_data_len);
+{
+	cm_format_mad_hdr(&apr_msg->hdr, cm_id_priv,
+			  CM_APR_ATTR_ID, CM_MSG_SEQUENCE_LAP);
+
+	if (private_data && private_data_len)
+		memcpy(apr_msg->private_data, private_data, private_data_len);
+}
+*/
+
 int ib_send_cm_apr(struct ib_cm_id *cm_id,
 		   enum ib_cm_apr_status status,
 		   void *info,
@@ -284,30 +868,209 @@
 		   void *private_data,
 		   u8 private_data_len)
 {
+	if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
+	    (info && info_length > IB_CM_APR_INFO_LENGTH))
+		return -EINVAL;
+
 	return -EINVAL;
 }
 EXPORT_SYMBOL(ib_send_cm_apr);
 
+static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
+			       struct cm_id_private *cm_id_priv,
+			       struct ib_cm_sidr_req_param *param)
+{
+	cm_format_mad_hdr(&sidr_req_msg->hdr, cm_id_priv,
+			  CM_SIDR_REQ_ATTR_ID, CM_MSG_SEQUENCE_SIDR);
+
+	sidr_req_msg->request_id = cm_id_priv->id.local_id;
+	sidr_req_msg->pkey = param->pkey;
+	sidr_req_msg->service_id = param->service_id;
+
+	if (param->private_data && param->private_data_len)
+		memcpy(sidr_req_msg->private_data, param->private_data,
+		       param->private_data_len);
+}
+
 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
 			struct ib_cm_sidr_req_param *param)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	if (!param->path ||
+	    (param->private_data && 
+	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
+		return -EINVAL;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_sidr_req((struct cm_sidr_req_msg *)&msg->mad, cm_id_priv,
+			   param);
+
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_sidr_req);
 
+static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
+			       struct cm_id_private *cm_id_priv,
+			       struct ib_cm_sidr_rep_param *param)
+{
+	cm_format_mad_hdr(&sidr_rep_msg->hdr, cm_id_priv,
+			  CM_SIDR_REP_ATTR_ID, CM_MSG_SEQUENCE_SIDR);
+
+	sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
+	sidr_rep_msg->status = param->status;
+	cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
+	sidr_rep_msg->service_id = cm_id_priv->id.service_id;
+	sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
+
+	if (param->info && param->info_length)
+		memcpy(sidr_rep_msg->info, param->info, param->info_length);
+
+	if (param->private_data && param->private_data_len)
+		memcpy(sidr_rep_msg->private_data, param->private_data,
+		       param->private_data_len);
+}
+
 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
 			struct ib_cm_sidr_rep_param *param)
 {
-	return -EINVAL;
+	struct cm_id_private *cm_id_priv;
+	struct cm_msg *msg = NULL;
+	int ret;
+
+	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
+	    (param->private_data &&
+	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
+		return -EINVAL;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	ret = cm_alloc_msg(cm_id_priv, msg);
+	if (ret)
+		goto out;
+
+	cm_format_sidr_rep((struct cm_sidr_rep_msg *)&msg->mad, cm_id_priv,
+			   param);
+
+	ret = -EINVAL;
+out:
+	return ret;
 }
 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
 
+/*
+static inline void cm_notify_client(struct cm_id_private *cm_id_priv,
+				    struct ib_cm_event *event)
+{
+	cm_id_priv->id.cm_handler(&cm_id_priv->id, event);
+
+	if (atomic_dec_and_test(&cm_id_priv->refcount))
+		wake_up(&msg->cm_id_priv->wait);
+}
+*/
+
+static void cm_process_send_error(struct cm_msg *msg,
+				  enum ib_wc_status wc_status)
+{
+	struct cm_id_private *cm_id_priv;
+	struct ib_cm_event cm_event;
+	unsigned long flags;
+
+	memset(&cm_event, 0, sizeof cm_event);
+	cm_id_priv = msg->cm_id_priv;
+
+	/* Discard old sends. */
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	if (msg != cm_id_priv->msg)
+		goto discard;
+
+	switch (cm_id_priv->id.state) {
+	case IB_CM_REQ_SENT:
+		cm_id_priv->id.state = IB_CM_IDLE;
+		cm_event.event = IB_CM_REQ_ERROR;
+		break;
+	case IB_CM_REP_SENT:
+		cm_id_priv->id.state = IB_CM_IDLE;
+		cm_event.event = IB_CM_REP_ERROR;
+		break;
+	case IB_CM_DREQ_SENT:
+		cm_id_priv->id.state = IB_CM_TIMEWAIT;
+		cm_event.event = IB_CM_DREQ_ERROR;
+		break;
+	case IB_CM_SIDR_REQ_SENT:
+		cm_id_priv->id.state = IB_CM_IDLE;
+		cm_event.event = IB_CM_SIDR_REQ_ERROR;
+		break;
+	default:
+		BUG_ON(1);
+		goto discard;
+	}
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	cm_event.param.send_status = wc_status;
+	cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
+	cm_free_msg(msg);
+	return;
+
+discard:
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	cm_free_msg(msg);
+}
+
+static void cm_process_send_timeout(struct cm_msg *msg)
+{
+	struct cm_id_private *cm_id_priv;
+	struct ib_send_wr *bad_send_wr;
+	unsigned long flags;
+	int ret;
+
+	cm_id_priv = msg->cm_id_priv;
+
+	/* Discard old sends. */
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	if (msg != cm_id_priv->msg) {
+		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		cm_free_msg(msg);
+		return;
+	}
+
+	if (msg->retry++ < cm_id_priv->max_cm_retries) {
+		ret = ib_post_send_mad(cm_id_priv->port->mad_agent,
+				       &msg->send_wr, &bad_send_wr);
+		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		if (ret)
+			cm_process_send_error(msg, IB_WC_GENERAL_ERR);
+	} else {
+		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		cm_process_send_error(msg, IB_WC_RESP_TIMEOUT_ERR);
+	}
+}
+
 static void send_handler(struct ib_mad_agent *mad_agent,
 			 struct ib_mad_send_wc *mad_send_wc)
 {
-	struct cm_port *port;
+	struct cm_msg *msg;
 
-	port = (struct cm_port *)mad_agent->context;
+	msg = (struct cm_msg *)(unsigned long)mad_send_wc->wr_id;
+
+	switch (mad_send_wc->status) {
+	case IB_WC_SUCCESS:
+	case IB_WC_WR_FLUSH_ERR:
+		cm_free_msg(msg);
+		break;
+	case IB_WC_RESP_TIMEOUT_ERR:
+		cm_process_send_timeout(msg);
+		break;
+	default:
+		cm_process_send_error(msg, mad_send_wc->status);
+		break;
+	}
 }
 
 static void recv_handler(struct ib_mad_agent *mad_agent,
@@ -316,17 +1079,40 @@
 	struct cm_port *port;
 
 	port = (struct cm_port *)mad_agent->context;
+
+	ib_free_recv_mad(mad_recv_wc);
+}
+
+static u64 cm_get_ca_guid(struct ib_device *device)
+{
+	struct ib_device_attr *device_attr;
+	u64 guid;
+	int ret;
+
+	device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
+	if (!device_attr)
+		return 0;
+
+	ret = ib_query_device(device, device_attr);
+	guid = ret ? 0 : device_attr->node_guid;
+	kfree(device_attr);
+	return guid;
 }
 
 static void cm_add_one(struct ib_device *device)
 {
-	struct cm_port *port;
+	struct cm_port *port_array, *port;
 	struct ib_mad_reg_req reg_req;
+	u64 ca_guid;
 	u8 i;
 
-	port = kmalloc(sizeof *port * device->phys_port_cnt, GFP_KERNEL);
-	if (!port)
-		goto out;
+	ca_guid = cm_get_ca_guid(device);
+	if (!ca_guid)
+		return;
+
+	port_array = kmalloc(sizeof *port * device->phys_port_cnt, GFP_KERNEL);
+	if (!port_array)
+		return;
 
 	memset(&reg_req, 0, sizeof reg_req);
 	reg_req.mgmt_class = IB_MGMT_CLASS_CM;
@@ -335,34 +1121,53 @@
 	set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
 	set_bit(IB_MGMT_METHOD_GET_RESP, reg_req.method_mask);
 	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
-	for (i = 1; i <= device->phys_port_cnt; i++) {
-		port[i].mad_agent = ib_register_mad_agent(device, i,
-							  IB_QPT_GSI,
-							  &reg_req,
-							  0,
-							  send_handler,
-							  recv_handler,
-							  &port[i]);
+	for (i = 1, port = port_array; i <= device->phys_port_cnt; i++, port++){
+		spin_lock_init(&port->lock);
+		port->ca_guid = ca_guid;
+		port->port_num = i;
+		port->mad_agent = ib_register_mad_agent(device, i,
+							IB_QPT_GSI,
+							&reg_req,
+							0,
+							send_handler,
+							recv_handler,
+							port);
+		if (IS_ERR(port->mad_agent))
+			goto error;
+
+		port->mr = ib_get_dma_mr(port->mad_agent->qp->pd,
+					 IB_ACCESS_LOCAL_WRITE);
+		if (IS_ERR(port->mr)) {
+			ib_unregister_mad_agent(port->mad_agent);
+			goto error;
+		}
 	}
+	ib_set_client_data(device, &cm_client, port_array);
+	return;
 
-out:
-	ib_set_client_data(device, &cm_client, port);
+error:
+	while (port != port_array) {
+		--port;
+		ib_dereg_mr(port->mr);
+		ib_unregister_mad_agent(port->mad_agent);
+	}
+	kfree(port_array);
 }
 
 static void cm_remove_one(struct ib_device *device)
 {
-	struct cm_port *port;
+	struct cm_port *port_array, *port;
 	int i;
 
-	port = (struct cm_port *)ib_get_client_data(device, &cm_client);
-	if (!port)
+	port_array = (struct cm_port *)ib_get_client_data(device, &cm_client);
+	if (!port_array)
 		return;
 
-	for (i = 1; i <= device->phys_port_cnt; i++) {
-		if (!IS_ERR(port[i].mad_agent))
-			ib_unregister_mad_agent(port[i].mad_agent);
+	for (i = 1, port = port_array; i <= device->phys_port_cnt; i++, port++){
+		ib_dereg_mr(port->mr);
+		ib_unregister_mad_agent(port->mad_agent);
 	}
-	kfree(port);
+	kfree(port_array);
 }
 
 static int __init ib_cm_init(void)
Index: core/cm_msgs.h
===================================================================
--- core/cm_msgs.h	(revision 0)
+++ core/cm_msgs.h	(revision 0)
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2004 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2004 TopspCorporation.  All rights reserved.
+ * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING the madirectory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use source and binary forms, with or
+ *     withmodification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retathe above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
+ * SOFTWARE.
+ */
+#if !defined(CM_MSGS_H)
+#define CM_MSGS_H
+
+#include <ib_mad.h>
+
+/*
+ * Parameters to routines below should be in network-byte order, and values
+ * are returned in network-byte order.
+ */
+
+#define IB_CM_CLASS_VERSION	2 /* IB specification 1.2 */
+
+enum cm_msg_attr_id {
+	CM_REQ_ATTR_ID	    = __constant_htons(0x0010),
+	CM_MRA_ATTR_ID	    = __constant_htons(0x0011),
+	CM_REJ_ATTR_ID	    = __constant_htons(0x0012),
+	CM_REP_ATTR_ID	    = __constant_htons(0x0013),
+	CM_RTU_ATTR_ID	    = __constant_htons(0x0014),
+	CM_DREQ_ATTR_ID	    = __constant_htons(0x0015),
+	CM_DREP_ATTR_ID	    = __constant_htons(0x0016),
+	CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017),
+	CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018),
+	CM_LAP_ATTR_ID      = __constant_htons(0x0019),
+	CM_APR_ATTR_ID      = __constant_htons(0x001A)
+};
+
+enum cm_msg_sequence {
+	CM_MSG_SEQUENCE_REQ,
+	CM_MSG_SEQUENCE_LAP,
+	CM_MSG_SEQUENCE_DREQ,
+	CM_MSG_SEQUENCE_SIDR
+};
+
+struct cm_req_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 local_comm_id;
+	u32 rsvd4;
+	u64 service_id;
+	u64 local_ca_guid;
+	u32 rsvd24;
+	u32 local_qkey;
+	/* local QPN:24, responder resources:8 */
+	u32 offset32;
+	/* local EECN:24, initiator depth:8 */
+	u32 offset36;
+	/*
+	 * remote EECN:24, remote CM response timeout:5,
+	 * transport service type:2, end-to-end flow control:1
+	 */
+	u32 offset40;
+	/* starting PSN:24, local CM response timeout:5, retry count:3 */
+	u32 offset44;
+	u16 pkey;
+	/* path MTU:4, RDC exists:1, RNR retry count:3. */
+	u8 offset50;
+	/* max CM Retries:4, SRQ:1, rsvd:3 */
+	u8 offset51;
+
+	u16 primary_local_lid;
+	u16 primary_remote_lid;
+	union ib_gid primary_local_gid;
+	union ib_gid primary_remote_gid;
+	/* flow label:20, rsvd:6, packet rate:6 */
+	u32 primary_offset88;
+	u8 primary_traffic_class;
+	u8 primary_hop_limit;
+	/* SL:4, subnet local:1, rsvd:3 */
+	u8 primary_offset94;
+	/* local ACK timeout:5, rsvd:3 */
+	u8 primary_offset95;
+
+	u16 alt_local_lid;
+	u16 alt_remote_lid;
+	union ib_gid alt_local_gid;
+	union ib_gid alt_remote_gid;
+	/* flow label:20, rsvd:6, packet rate:6 */
+	u32 alt_offset132;
+	u8 alt_traffic_class;
+	u8 alt_hop_limit;
+	/* SL:4, subnet local:1, rsvd:3 */
+	u8 alt_offset138;
+	/* local ACK timeout:5, rsvd:3 */
+	u8 alt_offset139;
+
+	u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+
+} __attribute__ ((packed));
+
+static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
+{
+	return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
+}
+
+static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn)
+{
+	req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
+					 (be32_to_cpu(req_msg->offset32) &
+					  0x000000FF));
+}
+
+static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
+{
+	return (u8) be32_to_cpu(req_msg->offset32);
+}
+
+static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
+{
+	req_msg->offset32 = cpu_to_be32(resp_res |
+					(be32_to_cpu(req_msg->offset32) &
+					 0xFFFFFF00));
+}
+
+static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
+{
+	return (u8) be32_to_cpu(req_msg->offset36);
+}
+
+static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
+					 u8 init_depth)
+{
+	req_msg->offset36 = cpu_to_be32(init_depth |
+					(be32_to_cpu(req_msg->offset36) &
+					 0xFFFFFF00));
+}
+
+static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
+{
+	return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
+}
+
+static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
+						  u8 resp_timeout)
+{
+	req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
+					 (be32_to_cpu(req_msg->offset40) &
+					  0xFFFFFF07));
+}
+
+static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
+{
+	u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
+	switch(transport_type) {
+	case 0: return IB_QPT_RC;
+	case 1: return IB_QPT_UC;
+	default: return 0;
+	}
+}
+
+static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
+				      enum ib_qp_type qp_type)
+{
+	switch(qp_type) {
+	case IB_QPT_UC:
+		req_msg->offset40 = cpu_to_be32((be32_to_cpu(
+						  req_msg->offset40) &
+						   0xFFFFFFF9) | 0x2);
+	default: 
+		req_msg->offset40 = cpu_to_be32(be32_to_cpu(
+						 req_msg->offset40) &
+						  0xFFFFFFF9);
+	}
+}
+
+static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
+{
+	return be32_to_cpu(req_msg->offset40) & 0x1;
+}
+
+static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
+					u8 flow_ctrl)
+{
+	req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
+					 (be32_to_cpu(req_msg->offset40) &
+					  0xFFFFFFFE));
+}
+
+static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
+{
+	return be32_to_cpu(req_msg->offset44) >> 8;
+}
+
+static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
+					   u32 starting_psn)
+{
+	req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
+			    (be32_to_cpu(req_msg->offset44) & 0x000000FF));
+}
+
+static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
+{
+	return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
+}
+
+static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
+						 u8 resp_timeout)
+{
+	req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
+			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
+}
+
+static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
+{
+	return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
+}
+
+static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
+					  u8 retry_count)
+{
+	req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
+			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
+}
+
+static inline u8 cm_req_get_mtu(struct cm_req_msg *req_msg)
+{
+	return req_msg->offset50 >> 4;
+}
+
+static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
+{
+	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
+}
+
+static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
+{
+	return req_msg->offset50 & 0x7;
+}
+
+static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
+					      u8 rnr_retry_count)
+{
+	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
+				  (rnr_retry_count & 0x7));
+}
+
+static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
+{
+	return req_msg->offset51 >> 4;
+}
+
+static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
+					     u8 retries)
+{
+	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
+}
+
+static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
+{
+	return (req_msg->offset51 & 0x8) >> 3;
+}
+
+static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
+{
+	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
+				  ((srq & 0x1) << 3));
+}
+
+static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
+{
+	return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12));
+}
+
+static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
+						 u32 flow_label)
+{
+	req_msg->primary_offset88 = cpu_to_be32(
+				    (be32_to_cpu(req_msg->primary_offset88) &
+				     0x00000FFF) |
+				     (be32_to_cpu(flow_label) << 12));
+}
+
+static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
+{
+	return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
+}
+
+static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
+						  u8 rate)
+{
+	req_msg->primary_offset88 = cpu_to_be32(
+				    (be32_to_cpu(req_msg->primary_offset88) &
+				     0xFFFFFFC0) | (rate & 0x3F));
+}
+
+static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
+{
+	return (u8) (req_msg->primary_offset94 >> 4);
+}
+
+static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
+{
+	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
+					  (sl << 4));
+}
+
+static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
+{
+	return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
+}
+
+static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
+						   u8 subnet_local)
+{
+	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
+					  ((subnet_local & 0x1) << 3));
+}
+
+static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
+{
+	return (u8) (req_msg->primary_offset95 >> 3);
+}
+
+static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
+							u8 local_ack_timeout)
+{
+	req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
+					  (local_ack_timeout << 3));
+}
+
+static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
+{
+	return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12));
+}
+
+static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
+					     u32 flow_label)
+{
+	req_msg->alt_offset132 = cpu_to_be32(
+				 (be32_to_cpu(req_msg->alt_offset132) &
+				  0x00000FFF) |
+				  (be32_to_cpu(flow_label) << 12));
+}
+
+static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
+{
+	return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
+}
+
+static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
+					      u8 rate)
+{
+	req_msg->alt_offset132 = cpu_to_be32(
+				 (be32_to_cpu(req_msg->alt_offset132) &
+				  0xFFFFFFC0) | (rate & 0x3F));
+}
+
+static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
+{
+	return (u8) (req_msg->alt_offset138 >> 4);
+}
+
+static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
+{
+	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
+				       (sl << 4));
+}
+
+static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
+{
+	return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
+}
+
+static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
+					       u8 subnet_local)
+{
+	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
+				       ((subnet_local & 0x1) << 3));
+}
+
+static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
+{
+	return (u8) (req_msg->alt_offset139 >> 3);
+}
+
+static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
+						    u8 local_ack_timeout)
+{
+	req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
+				       (local_ack_timeout << 3));
+}
+
+/* Message REJected or MRAed */
+enum cm_msg_response {
+	CM_MSG_RESPONSE_REQ = 0x0,
+	CM_MSG_RESPONSE_REP = 0x1,
+	CM_MSG_RESPONSE_OTHER = 0x2
+};
+
+ struct cm_mra_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 local_comm_id;
+	u32 remote_comm_id;
+	/* message MRAed:2, rsvd:6 */
+	u8 offset8;
+	/* service timeout:5, rsvd:3 */
+	u8 offset9;
+
+	u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
+
+} __attribute__ ((packed));
+
+static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
+{
+	return (u8) (mra_msg->offset8 >> 6);
+}
+
+static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
+{
+	mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
+}
+
+static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
+{
+	return (u8) (mra_msg->offset9 >> 3);
+}
+
+static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
+					      u8 service_timeout)
+{
+	mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
+				 (service_timeout << 3));
+}
+
+struct cm_rej_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 local_comm_id;
+	u32 remote_comm_id;
+	/* message REJected:2, rsvd:6 */
+	u8 offset8;
+	/* reject info length:7, rsvd:1. */
+	u8 offset9;
+	u16 reason;
+	u8 ari[IB_CM_REJ_ARI_LENGTH];
+
+	u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
+
+} __attribute__ ((packed));
+
+static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
+{
+	return (u8) (rej_msg->offset8 >> 6);
+}
+
+static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
+{
+	rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
+}
+
+static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
+{
+	return (u8) (rej_msg->offset9 >> 1);
+}
+
+static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
+					      u8 len)
+{
+	rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
+}
+
+struct cm_rep_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 local_comm_id;
+	u32 remote_comm_id;
+	u32 local_qkey;
+	/* local QPN:24, rsvd:8 */
+	u32 offset12;
+	/* local EECN:24, rsvd:8 */
+	u32 offset16;
+	/* starting PSN:24 rsvd:8 */
+	u32 offset20;
+	u8 resp_resources;
+	u8 initiator_depth;
+	/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
+	u8 offset26;
+	/* RNR retry count:3, SRQ:1, rsvd:5 */
+	u8 offset27;
+	u64 local_ca_guid;
+
+	u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
+
+} __attribute__ ((packed));
+
+static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
+{
+	return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
+}
+
+static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn)
+{
+	rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
+			    (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
+}
+
+static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
+{
+	return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
+}
+
+static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
+					   u32 starting_psn)
+{
+	rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
+			    (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
+}
+
+static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
+{
+	return (u8) (rep_msg->offset26 >> 3);
+}
+
+static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
+					       u8 target_ack_delay)
+{
+	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
+				  (target_ack_delay << 3));
+}
+
+static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
+{
+	return (u8) ((rep_msg->offset26 & 0x06) >> 1);
+}
+
+static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
+{
+	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
+				  ((failover & 0x3) << 1));
+}
+
+static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
+{
+	return (u8) (rep_msg->offset26 & 0x01);
+}
+
+static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
+					    u8 flow_ctrl)
+{
+	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
+				  (flow_ctrl & 0x1));
+}
+
+static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
+{
+	return (u8) (rep_msg->offset27 >> 5);
+}
+
+static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
+					      u8 rnr_retry_count)
+{
+	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
+				  (rnr_retry_count << 5));
+}
+
+static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
+{
+	return (u8) ((rep_msg->offset27 >> 4) & 0x1);
+}
+
+static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
+{
+	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
+				  ((srq & 0x1) << 4));
+}
+
+struct cm_rtu_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 local_comm_id;
+	u32 remote_comm_id;
+
+	u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
+
+} __attribute__ ((packed));
+
+struct cm_dreq_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 local_comm_id;
+	u32 remote_comm_id;
+	/* remote QPN/EECN:24, rsvd:8 */
+	u32 offset8;
+
+	u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
+
+} __attribute__ ((packed));
+
+static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
+{
+	return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
+}
+
+static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
+{
+	dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
+			    (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
+}
+
+struct cm_drep_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 local_comm_id;
+	u32 remote_comm_id;
+
+	u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
+
+} __attribute__ ((packed));
+
+/*
+struct cm_lap_msg {
+} __attribute__  ((packed));
+
+struct cm_apr_msg {
+} __attribute__ ((packed));
+*/
+
+struct cm_sidr_req_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 request_id;
+	u16 pkey;
+	u16 rsvd;
+	u64 service_id;
+
+	u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
+} __attribute__ ((packed));
+
+struct cm_sidr_rep_msg {
+	struct ib_mad_hdr hdr;
+
+	u32 request_id;
+	u8 status;
+	u8 info_length;
+	u16 rsvd;
+	/* QPN:24, rsvd:8 */
+	u32 offset8;
+	u64 service_id;
+	u32 qkey;
+	u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
+
+	u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
+} __attribute__ ((packed));
+
+static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
+{
+	return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
+}
+
+static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
+				       u32 qpn)
+{
+	sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
+					(be32_to_cpu(sidr_rep_msg->offset8) &
+					 0x000000FF));
+}
+
+#endif /* CM_MSGS_H */



More information about the general mailing list