[ofw] patch: Allow send_bw to work with grh.
Tzachi Dar
tzachid at mellanox.co.il
Wed Nov 24 00:40:10 PST 2010
Applied on 3002.
Thanks
Tzachi
From: ofw-bounces at lists.openfabrics.org [mailto:ofw-bounces at lists.openfabrics.org] On Behalf Of Tzachi Dar
Sent: Monday, November 22, 2010 7:58 PM
To: ofw at lists.openfabrics.org
Subject: [ofw] patch: Allow send_bw to work with grh.
This checkin is used to allow the send_bw to work with RoCE which requires GRH.
If anyone has idea what I should use for ib_grh_set_ver_class_flow() I'll be happy to know.
Thanks
Tzachi
Index: user/perf_defs.h
===================================================================
--- user/perf_defs.h (revision 6853)
+++ user/perf_defs.h (working copy)
@@ -55,6 +55,13 @@
#define KEY_MSG_SIZE (sizeof "0000:000000:000000:00000000:0000000000000000")
#define KEY_PRINT_FMT "%04x:%06x:%06x:%08x:%016I64x"
+
+// The Format of the message we pass through sockets (With Gid).
+#define KEY_PRINT_FMT_GID "%04x:%06x:%06x:%08x:%016I64x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
+
+#define KEY_MSG_SIZE_GID 98 // Message size with gid (MGID as well).
+
+
#define KEY_SCAN_FMT "%x:%x:%x:%x:%x"
#define VERSION 2.0
@@ -122,8 +129,9 @@
ib_net16_t lid;
ib_net32_t qpn;
ib_net32_t psn;
- uint32_t rkey;
- uint64_t vaddr;
+ uint32_t rkey;
+ uint64_t vaddr;
+ ib_gid_t gid;
};
Index: user/perf_utils.c
===================================================================
--- user/perf_utils.c (revision 6853)
+++ user/perf_utils.c (working copy)
@@ -74,10 +74,18 @@
static int pp_write_keys(SOCKET sockfd, const struct pingpong_dest *my_dest)
{
- char msg[KEY_MSG_SIZE];
+ char msg[KEY_MSG_SIZE_GID];
PERF_ENTER;
- sprintf(msg, KEY_PRINT_FMT,cl_hton16(my_dest->lid), cl_hton32(my_dest->qpn),
- cl_hton32(my_dest->psn), cl_hton32(my_dest->rkey), my_dest->vaddr);
+ sprintf(msg, KEY_PRINT_FMT_GID,cl_hton16(my_dest->lid), cl_hton32(my_dest->qpn),
+ cl_hton32(my_dest->psn), cl_hton32(my_dest->rkey), my_dest->vaddr,
+ my_dest->gid.raw[0],my_dest->gid.raw[1],
+ my_dest->gid.raw[2],my_dest->gid.raw[3],
+ my_dest->gid.raw[4],my_dest->gid.raw[5],
+ my_dest->gid.raw[6],my_dest->gid.raw[7],
+ my_dest->gid.raw[8],my_dest->gid.raw[9],
+ my_dest->gid.raw[10],my_dest->gid.raw[11],
+ my_dest->gid.raw[12],my_dest->gid.raw[13],
+ my_dest->gid.raw[14],my_dest->gid.raw[15]);
if (send(sockfd, msg, sizeof msg,0) != sizeof msg) {
perror("pp_write_keys");
@@ -93,7 +101,7 @@
{
int parsed;
- char msg[KEY_MSG_SIZE];
+ char msg[KEY_MSG_SIZE_GID];
PERF_ENTER;
if (recv(sockfd, msg, sizeof msg, 0) != sizeof msg) {
perror("pp_read_keys");
@@ -101,14 +109,23 @@
return -1;
}
- parsed = sscanf(msg, KEY_PRINT_FMT, &rem_dest->lid, &rem_dest->qpn,
- &rem_dest->psn,&rem_dest->rkey, &rem_dest->vaddr);
+ parsed = sscanf(msg, KEY_PRINT_FMT_GID, &rem_dest->lid, &rem_dest->qpn,
+ &rem_dest->psn,&rem_dest->rkey, &rem_dest->vaddr,
+ &rem_dest->gid.raw[0],&rem_dest->gid.raw[1],
+ &rem_dest->gid.raw[2],&rem_dest->gid.raw[3],
+ &rem_dest->gid.raw[4],&rem_dest->gid.raw[5],
+ &rem_dest->gid.raw[6],&rem_dest->gid.raw[7],
+ &rem_dest->gid.raw[8],&rem_dest->gid.raw[9],
+ &rem_dest->gid.raw[10],&rem_dest->gid.raw[11],
+ &rem_dest->gid.raw[12],&rem_dest->gid.raw[13],
+ &rem_dest->gid.raw[14],&rem_dest->gid.raw[15]);
+
rem_dest->lid = cl_ntoh16(rem_dest->lid);
rem_dest->qpn = cl_ntoh32(rem_dest->qpn);
rem_dest->psn = cl_ntoh32(rem_dest->psn);
rem_dest->rkey = cl_ntoh32(rem_dest->rkey);
- if (parsed != 5) {
+ if (parsed != 21) {
fprintf(stderr, "Couldn't parse line <%.*s > parsed = %d %s (%x)\n",
(int)sizeof msg, msg,parsed,sock_get_error_str(), WSAGetLastError());
return -1;
@@ -242,5 +259,3 @@
-
-
Index: user/send_bw/send_bw.c
===================================================================
--- user/send_bw/send_bw.c (revision 6853)
+++ user/send_bw/send_bw.c (working copy)
@@ -53,6 +53,7 @@
int tx_depth;
int duplex;
int use_event;
+ int use_grh;
};
static int page_size;
@@ -402,6 +403,13 @@
IB_MOD_QP_RESP_RES |
IB_MOD_QP_PRIMARY_AV;
+ if(user_parm->use_grh)
+ {
+ attr.state.rtr.primary_av.grh_valid = 1;
+ attr.state.rtr.primary_av.grh.hop_limit = 1;
+ attr.state.rtr.primary_av.grh.dest_gid =dest->gid;
+ attr.state.rtr.primary_av.grh.src_gid = ctx->ca_attr->p_port_attr->p_gid_table[0];
+ }
ib_status = ib_modify_qp(ctx->qp[0], &attr);
if(ib_status != IB_SUCCESS){
@@ -412,7 +420,17 @@
if (user_parm->connection_type == UD) {
ib_av_attr_t av_attr;
- av_attr.grh_valid = 0;
+ if(user_parm->use_grh){
+ av_attr.grh_valid = 1;
+ av_attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(6, 0 ,0);
+ av_attr.grh.resv1 = 0;
+ av_attr.grh.resv2 = 0;
+ av_attr.grh.hop_limit = 1;
+ av_attr.grh.src_gid = ctx->ca_attr->p_port_attr->p_gid_table[0];
+ av_attr.grh.dest_gid = dest->gid;
+ } else {
+ av_attr.grh_valid = 0;
+ }
av_attr.dlid = dest->lid;
av_attr.sl = 0;
av_attr.path_bits = 0;
@@ -534,6 +552,7 @@
/* TBD this should be changed inot VA and different key to each qp */
my_dest[i].rkey = ctx->rkey;
my_dest[i].vaddr = (uintptr_t)ctx->buf + ctx->size;
+ my_dest[i].gid = ctx->ca_attr->p_port_attr->p_gid_table[0];
printf(" local address: LID %#04x, QPN %#06x, PSN %#06x, "
"RKey %#08x VAddr %#016Lx\n",
@@ -581,6 +600,7 @@
printf(" -t, --tx-depth=<dep> size of tx queue (default 300)\n");
printf(" -n, --iters=<iters> number of exchanges (at least 2, default 1000)\n");
printf(" -b, --bidirectional measure bidirectional bandwidth (default unidirectional)\n");
+ printf(" -g, --grh Use GRH with packets (mandatory for RoCE)\n");
printf(" -V, --version display version number\n");
}
@@ -1005,10 +1025,11 @@
{ "bidirectional", 0, NULL, 'b' },
{ "version", 0, NULL, 'V' },
{ "events", 0, NULL, 'e' },
+ { "grh", 0, NULL, 'g' },
{ 0 }
};
- c = getopt_long(argc, argv, "p:d:i:m:c:s:n:t:ebaVh", long_options, NULL);
+ c = getopt_long(argc, argv, "p:d:i:m:c:s:n:t:ebaVhg", long_options, NULL);
if (c == -1)
break;
@@ -1077,6 +1098,11 @@
case 'b':
user_param.duplex = 1;
break;
+
+ case 'g':
+ user_param.use_grh = 1;
+ break;
+
case 'h':
default:
usage(argv[0]);
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20101124/03a65dca/attachment.html>
More information about the ofw
mailing list