[openib-general] [PATCH 3 of 3] mthca: static rate encoding change support

Jack Morgenstein jackm at mellanox.co.il
Thu Mar 9 07:01:15 PST 2006


mthca provider changes to support new verbs layer static rate encoding.
The change enables proper support for DDR HCAs.

Signed-off-by: Jack Morgenstein <jackm at mellanox.co.il>

Index: src/drivers/infiniband/hw/mthca/mthca_cmd.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_cmd.c	2006-03-07 
10:12:42.650684000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_cmd.c	2006-03-07 10:12:47.235576000 
+0200
@@ -995,6 +995,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev
 #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET      0x36
 #define QUERY_DEV_LIM_VL_PORT_OFFSET        0x37
 #define QUERY_DEV_LIM_MAX_GID_OFFSET        0x3b
+#define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET   0x3c
 #define QUERY_DEV_LIM_MAX_PKEY_OFFSET       0x3f
 #define QUERY_DEV_LIM_FLAGS_OFFSET          0x44
 #define QUERY_DEV_LIM_RSVD_UAR_OFFSET       0x48
@@ -1086,6 +1087,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev
 	dev_lim->num_ports = field & 0xf;
 	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
 	dev_lim->max_gids = 1 << (field & 0xf);
+	MTHCA_GET(size, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET);
+	dev_lim->stat_rate_support = size;
 	MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
 	dev_lim->max_pkeys = 1 << (field & 0xf);
 	MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
Index: src/drivers/infiniband/hw/mthca/mthca_cmd.h
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_cmd.h	2006-03-07 
10:12:42.663686000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_cmd.h	2006-03-07 10:12:47.244578000 
+0200
@@ -146,6 +146,7 @@ struct mthca_dev_lim {
 	int max_vl;
 	int num_ports;
 	int max_gids;
+	u16 stat_rate_support;
 	int max_pkeys;
 	u32 flags;
 	int reserved_uars;
Index: src/drivers/infiniband/hw/mthca/mthca_dev.h
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_dev.h	2006-03-07 
10:12:42.756685000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_dev.h	2006-03-08 16:02:17.877655000 
+0200
@@ -127,6 +127,24 @@ enum {
 	MTHCA_CMD_NUM_DBELL_DWORDS = 8
 };
 
+enum {
+	MTHCA_RATE_SUPP_TAVOR_ALL = 0xF
+};
+
+enum {
+      MTHCA_RATE_TAVOR_FULL   = 0, /* 4X SDR / DDR depending on HCA and 
link*/
+      MTHCA_RATE_TAVOR_1X     = 1,
+      MTHCA_RATE_TAVOR_4X     = 2,
+      MTHCA_RATE_TAVOR_1X_DDR = 3
+};
+
+enum {
+      MTHCA_RATE_MEMFREE_FULL    = 0, /* 4X SDR / DDR depending on HCA and 
link*/
+      MTHCA_RATE_MEMFREE_QUARTER = 1,
+      MTHCA_RATE_MEMFREE_EIGHTH  = 2,
+      MTHCA_RATE_MEMFREE_HALF    = 3
+};
+
 struct mthca_cmd {
 	struct pci_pool          *pool;
 	struct mutex              hcr_mutex;
@@ -179,6 +197,7 @@ struct mthca_limits {
 	int      reserved_pds;
 	u32      page_size_cap;
 	u32      flags;
+	u16      stat_rate_support;
 	u8       port_width_cap;
 };
 
@@ -360,6 +379,7 @@ struct mthca_dev {
 	struct ib_mad_agent  *send_agent[MTHCA_MAX_PORTS][2];
 	struct ib_ah         *sm_ah[MTHCA_MAX_PORTS];
 	spinlock_t            sm_lock;
+	u8                    rate[MTHCA_MAX_PORTS];
 };
 
 #define mthca_dbg(mdev, format, arg...) \
@@ -560,6 +580,8 @@ int mthca_process_mad(struct ib_device *
 		      struct ib_grh *in_grh,
 		      struct ib_mad *in_mad,
 		      struct ib_mad *out_mad);
+int mthca_update_rate(struct mthca_dev *dev, u8 port_num);
+u8 mthca_get_rate(struct mthca_dev *dev, struct ib_ah_attr *attr, u8 port);
 int mthca_create_agents(struct mthca_dev *dev);
 void mthca_free_agents(struct mthca_dev *dev);
 
Index: src/drivers/infiniband/hw/mthca/mthca_main.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_main.c	2006-03-07 
10:12:42.849684000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_main.c	2006-03-09 16:29:51.153809000 
+0200
@@ -191,6 +191,7 @@ static int __devinit mthca_dev_lim(struc
 	mdev->limits.port_width_cap     = dev_lim->max_port_width;
 	mdev->limits.page_size_cap      = ~(u32) (dev_lim->min_page_sz - 1);
 	mdev->limits.flags              = dev_lim->flags;
+	mdev->limits.stat_rate_support  = dev_lim->stat_rate_support;
 
 	/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
 	   May be doable since hardware supports it for SRQ.
@@ -957,6 +958,7 @@ static int __devinit mthca_init_one(stru
 	int ddr_hidden = 0;
 	int err;
 	struct mthca_dev *mdev;
+	int i;
 
 	if (!mthca_version_printed) {
 		printk(KERN_INFO "%s", mthca_version);
@@ -1095,8 +1097,19 @@ static int __devinit mthca_init_one(stru
 
 	pci_set_drvdata(pdev, mdev);
 
+	if (mthca_is_memfree(mdev))
+		for (i = 1; i <= mdev->limits.num_ports; ++i)
+			if (mthca_update_rate(mdev, i)) {
+				mthca_err(mdev, "Failed to obtain port %d rate."
+					  " aborting.\n", i);
+				goto err_free_agents;
+			}
+
 	return 0;
 
+err_free_agents:
+	mthca_free_agents(mdev);
+
 err_unregister:
 	mthca_unregister_device(mdev);
 
Index: src/drivers/infiniband/hw/mthca/mthca_mad.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_mad.c	2006-03-07 
10:12:42.940685000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_mad.c	2006-03-08 16:56:43.545317000 
+0200
@@ -46,6 +46,26 @@ enum {
 	MTHCA_VENDOR_CLASS2 = 0xa
 };
 
+int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
+{
+	struct ib_port_attr       *tprops = NULL;
+	int                        ret;
+
+	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
+	if (!tprops)
+		return -ENOMEM;
+
+	ret = ib_query_port(&dev->ib_dev, port_num, tprops);
+	if (ret) {
+		printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
+		       ret, dev->ib_dev.name, port_num);
+		return ret;
+	}
+	dev->rate[port_num - 1] = tprops->active_speed *
+				  ib_width_enum_to_int(tprops->active_width);
+	return 0;
+}
+
 static void update_sm_ah(struct mthca_dev *dev,
 			 u8 port_num, u16 lid, u8 sl)
 {
@@ -87,6 +107,8 @@ static void smp_snoop(struct ib_device *
 	     mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
 	    mad->mad_hdr.method     == IB_MGMT_METHOD_SET) {
 		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
+			if (mthca_is_memfree(to_mdev(ibdev)))
+				mthca_update_rate(to_mdev(ibdev), port_num);
 			update_sm_ah(to_mdev(ibdev), port_num,
 				     be16_to_cpup((__be16 *) (mad->data + 58)),
 				     (*(u8 *) (mad->data + 76)) & 0xf);
Index: src/drivers/infiniband/hw/mthca/mthca_av.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_av.c	2006-03-07 
10:12:43.116687000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_av.c	2006-03-09 11:55:35.417221000 
+0200
@@ -53,6 +53,52 @@ struct mthca_av {
 	__be32 dgid[4];
 };
 
+static inline u8 ib_rate_to_memfree(struct mthca_dev *dev, u8 req_rate,
+				    u8 curr_rate)
+{
+	u8 ipd; /* Inter Packet Delay.  See IB Spec Vol 1, 9.11.1 */
+
+	if (curr_rate <= req_rate)
+		return 0;
+
+	ipd = (curr_rate - 1) / req_rate;
+	switch (ipd) {
+	case 0:	 return MTHCA_RATE_MEMFREE_FULL;
+	case 1:	 return MTHCA_RATE_MEMFREE_HALF;
+	case 2:	 /* fall through */
+	case 3:	 return MTHCA_RATE_MEMFREE_QUARTER;
+	default: return MTHCA_RATE_MEMFREE_EIGHTH;
+	}
+}
+
+u8 mthca_get_rate(struct mthca_dev *dev, struct ib_ah_attr *attr, u8 port)
+{
+	if (!attr->static_rate)
+		return 0;
+
+	if (mthca_is_memfree(dev))
+		return ib_rate_to_memfree(dev,
+					  ib_rate_to_mult(attr->static_rate),
+					  dev->rate[port - 1]);
+
+	if ((dev->limits.stat_rate_support & MTHCA_RATE_SUPP_TAVOR_ALL) ==
+	    MTHCA_RATE_SUPP_TAVOR_ALL)
+		/* full Tavor absolute rates*/
+		switch (attr->static_rate) {
+		case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
+		case IB_RATE_5_GBPS:   return MTHCA_RATE_TAVOR_1X_DDR;
+		case IB_RATE_10_GBPS:  return MTHCA_RATE_TAVOR_4X;
+		default:	       return MTHCA_RATE_TAVOR_FULL;
+		}
+	else
+		/* old (partial) Tavor absolute rates */
+		switch (attr->static_rate) {
+		case IB_RATE_2_5_GBPS: /* fall through */
+		case IB_RATE_5_GBPS:   return MTHCA_RATE_TAVOR_1X;
+		default:	       return MTHCA_RATE_TAVOR_FULL;
+		}
+}
+
 int mthca_create_ah(struct mthca_dev *dev,
 		    struct mthca_pd *pd,
 		    struct ib_ah_attr *ah_attr,
@@ -105,7 +151,7 @@ on_hca_fail:
 	av->g_slid  = ah_attr->src_path_bits;
 	av->dlid    = cpu_to_be16(ah_attr->dlid);
 	av->msg_sr  = (3 << 4) | /* 2K message */
-		ah_attr->static_rate;
+		mthca_get_rate(dev, ah_attr, ah_attr->port_num);
 	av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
 	if (ah_attr->ah_flags & IB_AH_GRH) {
 		av->g_slid |= 0x80;
Index: src/drivers/infiniband/hw/mthca/mthca_qp.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_qp.c	2006-03-07 
10:12:43.028686000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_qp.c	2006-03-09 12:41:07.783783000 
+0200
@@ -246,6 +246,9 @@ void mthca_qp_event(struct mthca_dev *de
 		return;
 	}
 
+	if (event_type == IB_EVENT_PATH_MIG)
+		qp->port = qp->alt_port;
+
 	event.device      = &dev->ib_dev;
 	event.event       = event_type;
 	event.element.qp  = &qp->ibqp;
@@ -476,11 +479,12 @@ out:
 	return err;
 }
 
-static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
+static void mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
+			   struct mthca_qp_path *path, u8 port)
 {
 	path->g_mylmc     = ah->src_path_bits & 0x7f;
 	path->rlid        = cpu_to_be16(ah->dlid);
-	path->static_rate = !!ah->static_rate;
+	path->static_rate = mthca_get_rate(dev, ah, port);
 
 	if (ah->ah_flags & IB_AH_GRH) {
 		path->g_mylmc   |= 1 << 7;
@@ -612,7 +616,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, 
 
 	if (qp->transport == MLX)
 		qp_context->pri_path.port_pkey |=
-			cpu_to_be32(to_msqp(qp)->port << 24);
+			cpu_to_be32(qp->port << 24);
 	else {
 		if (attr_mask & IB_QP_PORT) {
 			qp_context->pri_path.port_pkey |=
@@ -635,7 +639,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, 
 	}
 
 	if (attr_mask & IB_QP_AV) {
-		mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
+		mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
+			       attr_mask & IB_QP_PORT ? attr->port_num : qp->port);
 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
 	}
 
@@ -657,7 +662,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, 
 			return -EINVAL;
 		}
 
-		mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
+		mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
+			       attr->alt_ah_attr.port_num);
 		qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
 							      attr->alt_port_num << 24);
 		qp_context->alt_path.ackto = attr->alt_timeout << 3;
@@ -763,6 +769,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, 
 			qp->atomic_rd_en = attr->qp_access_flags;
 		if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
 			qp->resp_depth = attr->max_dest_rd_atomic;
+		if (attr_mask & IB_QP_PORT)
+			qp->port = attr->port_num;
+		if (attr_mask & IB_QP_ALT_PATH)
+			qp->alt_port = attr->alt_port_num;
 	}
 
 	mthca_free_mailbox(dev, mailbox);
@@ -777,13 +787,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, 
 	if (is_qp0(dev, qp)) {
 		if (cur_state != IB_QPS_RTR &&
 		    new_state == IB_QPS_RTR)
-			init_port(dev, to_msqp(qp)->port);
+			init_port(dev, qp->port);
 
 		if (cur_state != IB_QPS_RESET &&
 		    cur_state != IB_QPS_ERR &&
 		    (new_state == IB_QPS_RESET ||
 		     new_state == IB_QPS_ERR))
-			mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
+			mthca_CLOSE_IB(dev, qp->port, &status);
 	}
 
 	/*
@@ -1185,6 +1195,9 @@ int mthca_alloc_qp(struct mthca_dev *dev
 	if (qp->qpn == -1)
 		return -ENOMEM;
 
+	/* initialize port to zero for error-catching. */
+	qp->port = 0;
+
 	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
 				    send_policy, qp);
 	if (err) {
@@ -1233,7 +1246,7 @@ int mthca_alloc_sqp(struct mthca_dev *de
 	if (err)
 		goto err_out;
 
-	sqp->port = port;
+	sqp->qp.port = port;
 	sqp->qp.qpn       = mqpn;
 	sqp->qp.transport = MLX;
 
@@ -1376,10 +1389,10 @@ static int build_mlx_header(struct mthca
 		sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
 	sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
 	if (!sqp->qp.ibqp.qp_num)
-		ib_get_cached_pkey(&dev->ib_dev, sqp->port,
+		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
 				   sqp->pkey_index, &pkey);
 	else
-		ib_get_cached_pkey(&dev->ib_dev, sqp->port,
+		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
 				   wr->wr.ud.pkey_index, &pkey);
 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
 	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
Index: src/drivers/infiniband/hw/mthca/mthca_provider.h
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_provider.h	2006-03-07 
10:12:43.200684000 +0200
+++ src/drivers/infiniband/hw/mthca/mthca_provider.h	2006-03-08 
09:14:30.509852000 +0200
@@ -257,6 +257,8 @@ struct mthca_qp {
 	atomic_t               refcount;
 	u32                    qpn;
 	int                    is_direct;
+	u16                    port; /* for SQP and memfree use only */
+	u16                    alt_port; /* for memfree use only */
 	u8                     transport;
 	u8                     state;
 	u8                     atomic_rd_en;
@@ -278,7 +280,6 @@ struct mthca_qp {
 
 struct mthca_sqp {
 	struct mthca_qp qp;
-	int             port;
 	int             pkey_index;
 	u32             qkey;
 	u32             send_psn;



More information about the general mailing list