[openib-general] Re: static rate encoding change support
Jack Morgenstein
jackm at mellanox.co.il
Mon Apr 3 02:41:54 PDT 2006
On Sunday 02 April 2006 23:14, Roland Dreier wrote:
> Michael> Hmm, seems like a waste of memory ... recoding back from
> Michael> hardware seems cheaper - its global, not per QP. No?
>
> One byte per QP doesn't seem like much of a waste. We can probably
> lay out the QP struct so it's free in fact.
>
> - R.
In memfree, static rate changes are tracked (the original patch) while in
Tavor, they are not -- that was the reason for performing the update inside
query-qp.
Easiest thing to do is to track static rate changes for
Tavor and memfree both (modification of the original large patch). Then, you
can get rid of the port static rate update in query-qp.
I've indicated the required changes below in a new combined patch (replacement
for the patch you proposed). The logic is simpler than before (no memfree
check for updating port static rate, and no update at all in query-qp).
The merged mthca patch, which tracks static rate changes in Tavor as well,
includes the static-rate changes for Query-QP, and gets rid of the update_rate
in query-qp, is below. Note that there is no need to store the static rates
now. All static rate changes are tracked, so we can used the stored port
static rate for the path and alt-path static rate computation.
One dividend of this is that you are returned the actual static rate in
operation, rather than zero (if you created the path with a default static
rate).
Sorry I did not see your correspondence/questions last night.
I added your header comments to the patch below, replacing mine.
I didn't add your name to Signed-off-by list, although I think it belongs
there.
- Jack
--------------------------------
IB: simplify static rate encoding
Push translation of static rate to HCA format into low-level drivers,
where it belongs. For static rate encoding, use encoding of rate
field from IB standard PathRecord, with addition of value 0, for
backwards compatibility with current usage. The changes are:
- Add enum ib_rate to midlayer includes.
- Get rid of static rate translation in IPoIB; just use static rate
directly from Path and MulticastGroup records.
- Update mthca driver to translate absolute static rate into the
format used by hardware.
Signed-off-by: Jack Morgenstein <jackm at mellanox.co.il>
Index: src/drivers/infiniband/include/rdma/ib_verbs.h
===================================================================
--- src.orig/drivers/infiniband/include/rdma/ib_verbs.h 2006-04-02
09:41:54.478573000 +0300
+++ src/drivers/infiniband/include/rdma/ib_verbs.h 2006-04-03
11:23:44.078150000 +0300
@@ -337,6 +337,52 @@ enum ib_ah_flags {
IB_AH_GRH = 1
};
+enum ib_rate {
+ IB_RATE_PORT_CURRENT = 0,
+ IB_RATE_2_5_GBPS = 2,
+ IB_RATE_5_GBPS = 5,
+ IB_RATE_10_GBPS = 3,
+ IB_RATE_20_GBPS = 6,
+ IB_RATE_30_GBPS = 4,
+ IB_RATE_40_GBPS = 7,
+ IB_RATE_60_GBPS = 8,
+ IB_RATE_80_GBPS = 9,
+ IB_RATE_120_GBPS = 10
+};
+
+static inline int ib_rate_to_mult(enum ib_rate rate)
+{
+ switch (rate) {
+ case IB_RATE_2_5_GBPS: return 1;
+ case IB_RATE_5_GBPS: return 2;
+ case IB_RATE_10_GBPS: return 4;
+ case IB_RATE_20_GBPS: return 8;
+ case IB_RATE_30_GBPS: return 12;
+ case IB_RATE_40_GBPS: return 16;
+ case IB_RATE_60_GBPS: return 24;
+ case IB_RATE_80_GBPS: return 32;
+ case IB_RATE_120_GBPS: return 48;
+ default: return -1;
+ }
+}
+
+static inline enum ib_rate mult_to_ib_rate(u8 mult)
+{
+ switch (mult) {
+ case 1: return IB_RATE_2_5_GBPS;
+ case 2: return IB_RATE_5_GBPS;
+ case 4: return IB_RATE_10_GBPS;
+ case 8: return IB_RATE_20_GBPS;
+ case 12: return IB_RATE_30_GBPS;
+ case 16: return IB_RATE_40_GBPS;
+ case 24: return IB_RATE_60_GBPS;
+ case 32: return IB_RATE_80_GBPS;
+ case 48: return IB_RATE_120_GBPS;
+ default: return IB_RATE_PORT_CURRENT;
+ }
+}
+
+
struct ib_ah_attr {
struct ib_global_route grh;
u16 dlid;
Index: src/drivers/infiniband/include/rdma/ib_sa.h
===================================================================
--- src.orig/drivers/infiniband/include/rdma/ib_sa.h 2006-04-02
09:41:54.595569000 +0300
+++ src/drivers/infiniband/include/rdma/ib_sa.h 2006-04-02 11:47:26.174973000
+0300
@@ -91,34 +91,6 @@ enum ib_sa_selector {
IB_SA_BEST = 3
};
-enum ib_sa_rate {
- IB_SA_RATE_2_5_GBPS = 2,
- IB_SA_RATE_5_GBPS = 5,
- IB_SA_RATE_10_GBPS = 3,
- IB_SA_RATE_20_GBPS = 6,
- IB_SA_RATE_30_GBPS = 4,
- IB_SA_RATE_40_GBPS = 7,
- IB_SA_RATE_60_GBPS = 8,
- IB_SA_RATE_80_GBPS = 9,
- IB_SA_RATE_120_GBPS = 10
-};
-
-static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate)
-{
- switch (rate) {
- case IB_SA_RATE_2_5_GBPS: return 1;
- case IB_SA_RATE_5_GBPS: return 2;
- case IB_SA_RATE_10_GBPS: return 4;
- case IB_SA_RATE_20_GBPS: return 8;
- case IB_SA_RATE_30_GBPS: return 12;
- case IB_SA_RATE_40_GBPS: return 16;
- case IB_SA_RATE_60_GBPS: return 24;
- case IB_SA_RATE_80_GBPS: return 32;
- case IB_SA_RATE_120_GBPS: return 48;
- default: return -1;
- }
-}
-
/*
* Structures for SA records are named "struct ib_sa_xxx_rec." No
* attempt is made to pack structures to match the physical layout of
Index: src/drivers/infiniband/ulp/ipoib/ipoib_fs.c
===================================================================
--- src.orig/drivers/infiniband/ulp/ipoib/ipoib_fs.c 2006-04-03
11:16:33.615619000 +0300
+++ src/drivers/infiniband/ulp/ipoib/ipoib_fs.c 2006-04-03 11:22:19.792587000
+0300
@@ -213,7 +213,7 @@ static int ipoib_path_seq_show(struct se
gid_buf, path.pathrec.dlid ? "yes" : "no");
if (path.pathrec.dlid) {
- rate = ib_sa_rate_enum_to_int(path.pathrec.rate) * 25;
+ rate = ib_rate_to_mult(path.pathrec.rate) * 25;
seq_printf(file,
" DLID: 0x%04x\n"
Index: src/drivers/infiniband/ulp/ipoib/ipoib_main.c
===================================================================
--- src.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c 2006-04-03
11:16:33.427621000 +0300
+++ src/drivers/infiniband/ulp/ipoib/ipoib_main.c 2006-04-03
11:22:19.773588000 +0300
@@ -378,16 +378,9 @@ static void path_rec_completion(int stat
struct ib_ah_attr av = {
.dlid = be16_to_cpu(pathrec->dlid),
.sl = pathrec->sl,
- .port_num = priv->port
+ .port_num = priv->port,
+ .static_rate = pathrec->rate
};
- int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
-
- if (path_rate > 0 && priv->local_rate > path_rate)
- av.static_rate = (priv->local_rate - 1) / path_rate;
-
- ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
- av.static_rate, priv->local_rate,
- ib_sa_rate_enum_to_int(pathrec->rate));
ah = ipoib_create_ah(dev, priv->pd, &av);
}
Index: src/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
===================================================================
--- src.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2006-04-03
11:16:33.522619000 +0300
+++ src/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2006-04-03
11:22:19.784588000 +0300
@@ -250,6 +250,7 @@ static int ipoib_mcast_join_finish(struc
.port_num = priv->port,
.sl = mcast->mcmember.sl,
.ah_flags = IB_AH_GRH,
+ .static_rate = mcast->mcmember.rate,
.grh = {
.flow_label = be32_to_cpu(mcast->mcmember.flow_label),
.hop_limit = mcast->mcmember.hop_limit,
@@ -257,17 +258,8 @@ static int ipoib_mcast_join_finish(struc
.traffic_class = mcast->mcmember.traffic_class
}
};
- int path_rate = ib_sa_rate_enum_to_int(mcast->mcmember.rate);
-
av.grh.dgid = mcast->mcmember.mgid;
- if (path_rate > 0 && priv->local_rate > path_rate)
- av.static_rate = (priv->local_rate - 1) / path_rate;
-
- ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n",
- av.static_rate, priv->local_rate,
- ib_sa_rate_enum_to_int(mcast->mcmember.rate));
-
ah = ipoib_create_ah(dev, priv->pd, &av);
if (!ah) {
ipoib_warn(priv, "ib_address_create failed\n");
Index: src/drivers/infiniband/hw/mthca/mthca_av.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_av.c 2006-04-03
11:16:21.388995000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_av.c 2006-04-03 11:23:43.843152000
+0300
@@ -53,6 +53,83 @@ struct mthca_av {
__be32 dgid[4];
};
+static inline u8 ib_rate_to_memfree(struct mthca_dev *dev, u8 req_rate,
+ u8 curr_rate)
+{
+ u8 ipd; /* Inter Packet Delay. See IB Spec Vol 1, 9.11.1 */
+
+ if (curr_rate <= req_rate)
+ return 0;
+
+ ipd = (curr_rate - 1) / req_rate;
+ switch (ipd) {
+ case 0: return MTHCA_RATE_MEMFREE_FULL;
+ case 1: return MTHCA_RATE_MEMFREE_HALF;
+ case 2: /* fall through */
+ case 3: return MTHCA_RATE_MEMFREE_QUARTER;
+ default: return MTHCA_RATE_MEMFREE_EIGHTH;
+ }
+}
+
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
+{
+ if (!mthca_rate)
+ return mult_to_ib_rate(dev->rate[port - 1]);
+
+ if (mthca_is_memfree(dev)) {
+ switch (mthca_rate) {
+ case MTHCA_RATE_MEMFREE_EIGHTH:
+ return mult_to_ib_rate(dev->rate[port - 1] / 8);
+ case MTHCA_RATE_MEMFREE_QUARTER:
+ return mult_to_ib_rate(dev->rate[port - 1] / 4);
+ case MTHCA_RATE_MEMFREE_HALF:
+ return mult_to_ib_rate(dev->rate[port - 1] / 2);
+ case MTHCA_RATE_MEMFREE_FULL:
+ default:
+ return mult_to_ib_rate(dev->rate[port - 1]);
+ }
+ }
+
+ switch (mthca_rate) {
+ case MTHCA_RATE_TAVOR_1X:
+ return IB_RATE_2_5_GBPS;
+ case MTHCA_RATE_TAVOR_1X_DDR:
+ return IB_RATE_5_GBPS;
+ case MTHCA_RATE_TAVOR_4X:
+ return IB_RATE_10_GBPS;
+ default:
+ return mult_to_ib_rate(dev->rate[port - 1]);
+ }
+}
+
+u8 mthca_get_rate(struct mthca_dev *dev, struct ib_ah_attr *attr, u8 port)
+{
+ if (!attr->static_rate)
+ return 0;
+
+ if (mthca_is_memfree(dev))
+ return ib_rate_to_memfree(dev,
+ ib_rate_to_mult(attr->static_rate),
+ dev->rate[port - 1]);
+
+ if ((dev->limits.stat_rate_support & MTHCA_RATE_SUPP_TAVOR_ALL) ==
+ MTHCA_RATE_SUPP_TAVOR_ALL)
+ /* full Tavor absolute rates*/
+ switch (attr->static_rate) {
+ case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
+ case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR;
+ case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X;
+ default: return MTHCA_RATE_TAVOR_FULL;
+ }
+ else
+ /* old (partial) Tavor absolute rates */
+ switch (attr->static_rate) {
+ case IB_RATE_2_5_GBPS: /* fall through */
+ case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X;
+ default: return MTHCA_RATE_TAVOR_FULL;
+ }
+}
+
int mthca_create_ah(struct mthca_dev *dev,
struct mthca_pd *pd,
struct ib_ah_attr *ah_attr,
@@ -105,7 +182,7 @@ on_hca_fail:
av->g_slid = ah_attr->src_path_bits;
av->dlid = cpu_to_be16(ah_attr->dlid);
av->msg_sr = (3 << 4) | /* 2K message */
- ah_attr->static_rate;
+ mthca_get_rate(dev, ah_attr, ah_attr->port_num);
av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
if (ah_attr->ah_flags & IB_AH_GRH) {
av->g_slid |= 0x80;
Index: src/drivers/infiniband/hw/mthca/mthca_cmd.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_cmd.c 2006-04-03
11:16:20.935996000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_cmd.c 2006-04-03 11:23:28.618616000
+0300
@@ -995,6 +995,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev
#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36
#define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37
#define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b
+#define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_LIM_FLAGS_OFFSET 0x44
#define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48
@@ -1086,6 +1087,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev
dev_lim->num_ports = field & 0xf;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
dev_lim->max_gids = 1 << (field & 0xf);
+ MTHCA_GET(size, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET);
+ dev_lim->stat_rate_support = size;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
dev_lim->max_pkeys = 1 << (field & 0xf);
MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
Index: src/drivers/infiniband/hw/mthca/mthca_cmd.h
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_cmd.h 2006-04-03
11:16:21.025998000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_cmd.h 2006-04-03 11:23:28.627616000
+0300
@@ -146,6 +146,7 @@ struct mthca_dev_lim {
int max_vl;
int num_ports;
int max_gids;
+ u16 stat_rate_support;
int max_pkeys;
u32 flags;
int reserved_uars;
Index: src/drivers/infiniband/hw/mthca/mthca_dev.h
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_dev.h 2006-04-03
11:16:21.116996000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_dev.h 2006-04-03 11:23:43.852153000
+0300
@@ -120,6 +120,24 @@ enum {
MTHCA_CMD_NUM_DBELL_DWORDS = 8
};
+enum {
+ MTHCA_RATE_SUPP_TAVOR_ALL = 0xF
+};
+
+enum {
+ MTHCA_RATE_TAVOR_FULL = 0, /* 4X SDR / DDR depending on HCA and
link*/
+ MTHCA_RATE_TAVOR_1X = 1,
+ MTHCA_RATE_TAVOR_4X = 2,
+ MTHCA_RATE_TAVOR_1X_DDR = 3
+};
+
+enum {
+ MTHCA_RATE_MEMFREE_FULL = 0, /* 4X SDR / DDR depending on HCA and
link*/
+ MTHCA_RATE_MEMFREE_QUARTER = 1,
+ MTHCA_RATE_MEMFREE_EIGHTH = 2,
+ MTHCA_RATE_MEMFREE_HALF = 3
+};
+
struct mthca_cmd {
struct pci_pool *pool;
struct mutex hcr_mutex;
@@ -172,6 +190,7 @@ struct mthca_limits {
int reserved_pds;
u32 page_size_cap;
u32 flags;
+ u16 stat_rate_support;
u8 port_width_cap;
};
@@ -353,6 +372,7 @@ struct mthca_dev {
struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2];
struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
spinlock_t sm_lock;
+ u8 rate[MTHCA_MAX_PORTS];
};
#define mthca_dbg(mdev, format, arg...) \
@@ -553,6 +573,9 @@ int mthca_process_mad(struct ib_device *
struct ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad);
+int mthca_update_rate(struct mthca_dev *dev, u8 port_num);
+u8 mthca_get_rate(struct mthca_dev *dev, struct ib_ah_attr *attr, u8 port);
+enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
Index: src/drivers/infiniband/hw/mthca/mthca_mad.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_mad.c 2006-04-03
11:16:21.297997000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_mad.c 2006-04-03 11:23:28.886586000
+0300
@@ -46,6 +46,26 @@ enum {
MTHCA_VENDOR_CLASS2 = 0xa
};
+int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
+{
+ struct ib_port_attr *tprops = NULL;
+ int ret;
+
+ tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
+ if (!tprops)
+ return -ENOMEM;
+
+ ret = ib_query_port(&dev->ib_dev, port_num, tprops);
+ if (ret) {
+ printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
+ ret, dev->ib_dev.name, port_num);
+ return ret;
+ }
+ dev->rate[port_num - 1] = tprops->active_speed *
+ ib_width_enum_to_int(tprops->active_width);
+ return 0;
+}
+
static void update_sm_ah(struct mthca_dev *dev,
u8 port_num, u16 lid, u8 sl)
{
@@ -87,6 +107,7 @@ static void smp_snoop(struct ib_device *
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
+ mthca_update_rate(to_mdev(ibdev), port_num);
update_sm_ah(to_mdev(ibdev), port_num,
be16_to_cpup((__be16 *) (mad->data + 58)),
(*(u8 *) (mad->data + 76)) & 0xf);
Index: src/drivers/infiniband/hw/mthca/mthca_main.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_main.c 2006-04-03
11:16:21.207998000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_main.c 2006-04-03 11:23:28.877589000
+0300
@@ -191,6 +191,7 @@ static int __devinit mthca_dev_lim(struc
mdev->limits.port_width_cap = dev_lim->max_port_width;
mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
mdev->limits.flags = dev_lim->flags;
+ mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
May be doable since hardware supports it for SRQ.
@@ -957,6 +958,7 @@ static int __devinit mthca_init_one(stru
int ddr_hidden = 0;
int err;
struct mthca_dev *mdev;
+ int i;
if (!mthca_version_printed) {
printk(KERN_INFO "%s", mthca_version);
@@ -1095,8 +1097,18 @@ static int __devinit mthca_init_one(stru
pci_set_drvdata(pdev, mdev);
+ for (i = 1; i <= mdev->limits.num_ports; ++i)
+ if (mthca_update_rate(mdev, i)) {
+ mthca_err(mdev, "Failed to obtain port %d rate."
+ " aborting.\n", i);
+ goto err_free_agents;
+ }
+
return 0;
+err_free_agents:
+ mthca_free_agents(mdev);
+
err_unregister:
mthca_unregister_device(mdev);
Index: src/drivers/infiniband/hw/mthca/mthca_provider.h
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_provider.h 2006-04-03
11:16:21.568967000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_provider.h 2006-04-03
11:23:29.034584000 +0300
@@ -257,6 +257,8 @@ struct mthca_qp {
atomic_t refcount;
u32 qpn;
int is_direct;
+ u16 port;
+ u16 alt_port;
u8 transport;
u8 state;
u8 atomic_rd_en;
@@ -278,7 +280,6 @@ struct mthca_qp {
struct mthca_sqp {
struct mthca_qp qp;
- int port;
int pkey_index;
u32 qkey;
u32 send_psn;
Index: src/drivers/infiniband/hw/mthca/mthca_qp.c
===================================================================
--- src.orig/drivers/infiniband/hw/mthca/mthca_qp.c 2006-04-03
11:16:21.478968000 +0300
+++ src/drivers/infiniband/hw/mthca/mthca_qp.c 2006-04-03 11:28:23.361059000
+0300
@@ -246,6 +246,9 @@ void mthca_qp_event(struct mthca_dev *de
return;
}
+ if (event_type == IB_EVENT_PATH_MIG)
+ qp->port = qp->alt_port;
+
event.device = &dev->ib_dev;
event.event = event_type;
event.element.qp = &qp->ibqp;
@@ -390,10 +393,17 @@ static void to_ib_ah_attr(struct mthca_d
{
memset(ib_ah_attr, 0, sizeof *path);
ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+
+ if (ib_ah_attr->port_num == 0 ||
+ ib_ah_attr->port_num > dev->limits.num_ports )
+ return;
+
ib_ah_attr->dlid = be16_to_cpu(path->rlid);
ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
- ib_ah_attr->static_rate = path->static_rate & 0x7;
+ ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
+ path->static_rate & 0x7,
+ ib_ah_attr->port_num);
ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
if (ib_ah_attr->ah_flags) {
ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len
- 1);
@@ -436,6 +446,7 @@ int mthca_query_qp(struct ib_qp *ibqp, s
context = &qp_param->context;
mthca_state = be32_to_cpu(context->flags) >> 28;
+ memset(qp_attr, 0, sizeof *qp_attr);
qp_attr->qp_state = to_ib_qp_state(mthca_state);
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->path_mtu = context->mtu_msgmax >> 5;
@@ -453,8 +464,10 @@ int mthca_query_qp(struct ib_qp *ibqp, s
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
qp_attr->cap.max_inline_data = qp->max_inline_data;
- to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
- to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+ if (qp->transport == RC || qp->transport == UC){
+ to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+ to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+ }
qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
@@ -482,11 +495,11 @@ out:
}
static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
- struct mthca_qp_path *path)
+ struct mthca_qp_path *path, u8 port)
{
path->g_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
- path->static_rate = !!ah->static_rate;
+ path->static_rate = mthca_get_rate(dev, ah, port);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
@@ -632,7 +645,7 @@ int mthca_modify_qp(struct ib_qp *ibqp,
if (qp->transport == MLX)
qp_context->pri_path.port_pkey |=
- cpu_to_be32(to_msqp(qp)->port << 24);
+ cpu_to_be32(qp->port << 24);
else {
if (attr_mask & IB_QP_PORT) {
qp_context->pri_path.port_pkey |=
@@ -655,7 +668,8 @@ int mthca_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_AV) {
- if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path))
+ if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
return -EINVAL;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
@@ -679,7 +693,8 @@ int mthca_modify_qp(struct ib_qp *ibqp,
return -EINVAL;
}
- if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path))
+ if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
+ attr->alt_ah_attr.port_num))
return -EINVAL;
qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
@@ -789,6 +804,10 @@ int mthca_modify_qp(struct ib_qp *ibqp,
qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT)
+ qp->port = attr->port_num;
+ if (attr_mask & IB_QP_ALT_PATH)
+ qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask);
@@ -800,13 +819,13 @@ int mthca_modify_qp(struct ib_qp *ibqp,
if (is_qp0(dev, qp)) {
if (cur_state != IB_QPS_RTR &&
new_state == IB_QPS_RTR)
- init_port(dev, to_msqp(qp)->port);
+ init_port(dev, qp->port);
if (cur_state != IB_QPS_RESET &&
cur_state != IB_QPS_ERR &&
(new_state == IB_QPS_RESET ||
new_state == IB_QPS_ERR))
- mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
+ mthca_CLOSE_IB(dev, qp->port, &status);
}
/*
@@ -1210,6 +1229,9 @@ int mthca_alloc_qp(struct mthca_dev *dev
if (qp->qpn == -1)
return -ENOMEM;
+ /* initialize port to zero for error-catching. */
+ qp->port = 0;
+
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
send_policy, qp);
if (err) {
@@ -1259,7 +1281,7 @@ int mthca_alloc_sqp(struct mthca_dev *de
if (err)
goto err_out;
- sqp->port = port;
+ sqp->qp.port = port;
sqp->qp.qpn = mqpn;
sqp->qp.transport = MLX;
@@ -1402,10 +1424,10 @@ static int build_mlx_header(struct mthca
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(&dev->ib_dev, sqp->port,
+ ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
sqp->pkey_index, &pkey);
else
- ib_get_cached_pkey(&dev->ib_dev, sqp->port,
+ ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
wr->wr.ud.pkey_index, &pkey);
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
More information about the general
mailing list