[ofa-general] ***SPAM*** [PATCH 2/12 V2] libibverbs: implement XRC RCV qps
Jack Morgenstein
jackm at dev.mellanox.co.il
Thu Jul 10 08:51:22 PDT 2008
From c33e87deccc1c59278e259c59ac82e3e548b0981 Mon Sep 17 00:00:00 2001
From: Jack Morgenstein <jackm at dev.mellanox.co.il>
Date: Wed, 9 Jul 2008 13:01:31 +0300
Subject: [PATCH] Added support for XRC receive-only QPs.
(OFED 1.3 libibverbs commit 6e99cddf835d4715ea7ca3641944e6285f27f2df)
V2:
1. checkpatch.pl cleanups
2. Fixed u64 alignment problems in kern-abi.h
3. eliminated unneeded default_symvers
4. Added ibv_xrc_rcv_xxx lines to libibverbs.map IBVERBS_1.1
Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
---
include/infiniband/driver.h | 12 ++-
include/infiniband/kern-abi.h | 99 +++++++++++++++++++-
include/infiniband/verbs.h | 123 +++++++++++++++++++++++
src/cmd.c | 215 +++++++++++++++++++++++++++++++++++++++++
src/device.c | 52 +++++-----
src/libibverbs.map | 10 ++
src/verbs.c | 59 +++++++++++
7 files changed, 543 insertions(+), 27 deletions(-)
diff --git a/include/infiniband/driver.h b/include/infiniband/driver.h
index 30ba79f..f8138ef 100644
--- a/include/infiniband/driver.h
+++ b/include/infiniband/driver.h
@@ -144,7 +144,17 @@ int ibv_cmd_open_xrc_domain(struct ibv_context *context, int fd, int oflag,
struct ibv_open_xrc_domain_resp *resp,
size_t resp_size);
int ibv_cmd_close_xrc_domain(struct ibv_xrc_domain *d);
-
+int ibv_cmd_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
+ uint32_t *xrc_rcv_qpn);
+int ibv_cmd_modify_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_rcv_qpn,
+ struct ibv_qp_attr *attr, int attr_mask);
+int ibv_cmd_query_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_rcv_qpn,
+ struct ibv_qp_attr *attr, int attr_mask,
+ struct ibv_qp_init_attr *init_attr);
+int ibv_cmd_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num);
+int ibv_cmd_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num);
/*
* sysfs helper functions
diff --git a/include/infiniband/kern-abi.h b/include/infiniband/kern-abi.h
index 94cf3f2..8b5cd9a 100644
--- a/include/infiniband/kern-abi.h
+++ b/include/infiniband/kern-abi.h
@@ -88,7 +88,12 @@ enum {
IB_USER_VERBS_CMD_POST_SRQ_RECV,
IB_USER_VERBS_CMD_CREATE_XRC_SRQ,
IB_USER_VERBS_CMD_OPEN_XRC_DOMAIN,
- IB_USER_VERBS_CMD_CLOSE_XRC_DOMAIN
+ IB_USER_VERBS_CMD_CLOSE_XRC_DOMAIN,
+ IB_USER_VERBS_CMD_CREATE_XRC_RCV_QP,
+ IB_USER_VERBS_CMD_MODIFY_XRC_RCV_QP,
+ IB_USER_VERBS_CMD_QUERY_XRC_RCV_QP,
+ IB_USER_VERBS_CMD_REG_XRC_RCV_QP,
+ IB_USER_VERBS_CMD_UNREG_XRC_RCV_QP,
};
/*
@@ -570,6 +575,93 @@ struct ibv_destroy_qp_resp {
__u32 events_reported;
};
+struct ibv_create_xrc_rcv_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 user_handle;
+ __u32 xrc_domain_handle;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u8 sq_sig_all;
+ __u8 qp_type;
+ __u8 reserved[6];
+ __u64 driver_data[0];
+};
+
+struct ibv_create_xrc_rcv_qp_resp {
+ __u32 qpn;
+ __u32 reserved;
+};
+
+struct ibv_modify_xrc_rcv_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 xrc_domain_handle;
+ __u32 qp_num;
+ struct ibv_qp_dest dest;
+ struct ibv_qp_dest alt_dest;
+ __u32 attr_mask;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 qp_state;
+ __u8 cur_qp_state;
+ __u8 path_mtu;
+ __u8 path_mig_state;
+ __u8 en_sqd_async_notify;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 reserved[6];
+ __u64 driver_data[0];
+};
+
+struct ibv_query_xrc_rcv_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 xrc_domain_handle;
+ __u32 qp_num;
+ __u32 attr_mask;
+ __u32 reserved;
+ __u64 driver_data[0];
+};
+
+struct ibv_reg_xrc_rcv_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 xrc_domain_handle;
+ __u32 qp_num;
+ __u64 driver_data[0];
+};
+
+struct ibv_unreg_xrc_rcv_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 xrc_domain_handle;
+ __u32 qp_num;
+ __u64 driver_data[0];
+};
+
struct ibv_kern_send_wr {
__u64 wr_id;
__u32 num_sge;
@@ -848,6 +940,11 @@ enum {
IB_USER_VERBS_CMD_CREATE_XRC_SRQ_V2 = -1,
IB_USER_VERBS_CMD_OPEN_XRC_DOMAIN_V2 = -1,
IB_USER_VERBS_CMD_CLOSE_XRC_DOMAIN_V2 = -1,
+ IB_USER_VERBS_CMD_CREATE_XRC_RCV_QP_V2 = -1,
+ IB_USER_VERBS_CMD_MODIFY_XRC_RCV_QP_V2 = -1,
+ IB_USER_VERBS_CMD_QUERY_XRC_RCV_QP_V2 = -1,
+ IB_USER_VERBS_CMD_REG_XRC_RCV_QP_V2 = -1,
+ IB_USER_VERBS_CMD_UNREG_XRC_RCV_QP_V2 = -1,
};
struct ibv_destroy_cq_v1 {
diff --git a/include/infiniband/verbs.h b/include/infiniband/verbs.h
index 6c9a3b3..036a0c5 100644
--- a/include/infiniband/verbs.h
+++ b/include/infiniband/verbs.h
@@ -205,12 +205,17 @@ enum ibv_event_type {
IBV_EVENT_CLIENT_REREGISTER
};
+enum ibv_event_flags {
+ IBV_XRC_QP_EVENT_FLAG = 0x80000000,
+};
+
struct ibv_async_event {
union {
struct ibv_cq *cq;
struct ibv_qp *qp;
struct ibv_srq *srq;
int port_num;
+ uint32_t xrc_qp_num;
} element;
enum ibv_event_type event_type;
};
@@ -648,6 +653,22 @@ struct ibv_more_ops {
struct ibv_xrc_domain * (*open_xrc_domain)(struct ibv_context *context,
int fd, int oflag);
int (*close_xrc_domain)(struct ibv_xrc_domain *d);
+ int (*create_xrc_rcv_qp)(struct ibv_qp_init_attr *init_attr,
+ uint32_t *xrc_qp_num);
+ int (*modify_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num,
+ struct ibv_qp_attr *attr,
+ int attr_mask);
+ int (*query_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num,
+ struct ibv_qp_attr *attr,
+ int attr_mask,
+ struct ibv_qp_init_attr *init_attr);
+ int (*reg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num);
+ int (*unreg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num);
+
};
struct ibv_context_ops {
@@ -1174,6 +1195,108 @@ struct ibv_xrc_domain *ibv_open_xrc_domain(struct ibv_context *context,
*/
int ibv_close_xrc_domain(struct ibv_xrc_domain *d);
+/**
+ * ibv_create_xrc_rcv_qp - creates an XRC QP for serving as a receive-side only QP,
+ *
+ * This QP is created in kernel space, and persists until the last process
+ * registered for the QP calls ibv_unreg_xrc_rcv_qp() (at which time the QP
+ * is destroyed).
+ *
+ * @init_attr: init attributes to use for QP. xrc domain MUST be included here.
+ * All other fields are ignored.
+ *
+ * @xrc_rcv_qpn: qp_num of created QP (if success). To be passed to the
+ * remote node (sender). The remote node will use xrc_rcv_qpn
+ * in ibv_post_send when sending to XRC SRQ's on this host
+ * in the same xrc domain.
+ *
+ * RETURNS: success (0), or a (negative) error value.
+ *
+ * NOTE: this verb also registers the calling user-process with the QP at its
+ * creation time (implicit call to ibv_reg_xrc_rcv_qp), to avoid race
+ * conditions. The creating process will need to call ibv_unreg_xrc_qp()
+ * for the QP to release it from this process.
+ */
+int ibv_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
+ uint32_t *xrc_rcv_qpn);
+
+/**
+ * ibv_modify_xrc_rcv_qp - modifies an xrc_rcv qp.
+ *
+ * @xrc_domain: xrc domain the QP belongs to (for verification).
+ * @xrc_qp_num: The (24 bit) number of the XRC QP.
+ * @attr: modify-qp attributes. The following fields must be specified:
+ * for RESET_2_INIT: qp_state, pkey_index , port, qp_access_flags
+ * for INIT_2_RTR: qp_state, path_mtu, dest_qp_num, rq_psn,
+ * max_dest_rd_atomic, min_rnr_timer, ah_attr
+ * The QP need not be brought to RTS for the QP to operate as a
+ * receive-only QP.
+ * @attr_mask: bitmap indicating which attributes are provided in the attr
+ * struct. Used for validity checking.
+ * The following bits must be set:
+ * for RESET_2_INIT: IBV_QP_PKEY_INDEX, IBV_QP_PORT,
+ * IBV_QP_ACCESS_FLAGS, IBV_QP_STATE
+ * for INIT_2_RTR: IBV_QP_AV, IBV_QP_PATH_MTU, IBV_QP_DEST_QPN,
+ * IBV_QP_RQ_PSN, IBV_QP_MAX_DEST_RD_ATOMIC,
+ * IBV_QP_MIN_RNR_TIMER, IBV_QP_STATE
+ *
+ * RETURNS: success (0), or a (positive) error value.
+ *
+ */
+int ibv_modify_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num,
+ struct ibv_qp_attr *attr, int attr_mask);
+
+/**
+ * ibv_query_xrc_rcv_qp - queries an xrc_rcv qp.
+ *
+ * @xrc_domain: xrc domain the QP belongs to (for verification).
+ * @xrc_qp_num: The (24 bit) number of the XRC QP.
+ * @attr: for returning qp attributes.
+ * @attr_mask: bitmap indicating which attributes to return.
+ * @init_attr: for returning the init attributes
+ *
+ * RETURNS: success (0), or a (positive) error value.
+ *
+ */
+int ibv_query_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num,
+ struct ibv_qp_attr *attr, int attr_mask,
+ struct ibv_qp_init_attr *init_attr);
+
+/**
+ * ibv_reg_xrc_rcv_qp: registers a user process with an XRC QP which serves as
+ * a receive-side only QP.
+ *
+ * @xrc_domain: xrc domain the QP belongs to (for verification).
+ * @xrc_qp_num: The (24 bit) number of the XRC QP.
+ *
+ * RETURNS: success (0),
+ * or error (EINVAL), if:
+ * 1. There is no such QP_num allocated.
+ * 2. The QP is allocated, but is not an receive XRC QP
+ * 3. The XRC QP does not belong to the given domain.
+ */
+int ibv_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num);
+
+/**
+ * ibv_unreg_xrc_rcv_qp: detaches a user process from an XRC QP serving as
+ * a receive-side only QP. If as a result, there are no remaining
+ * userspace processes registered for this XRC QP, it is destroyed.
+ *
+ * @xrc_domain: xrc domain the QP belongs to (for verification).
+ * @xrc_qp_num: The (24 bit) number of the XRC QP.
+ *
+ * RETURNS: success (0),
+ * or error (EINVAL), if:
+ * 1. There is no such QP_num allocated.
+ * 2. The QP is allocated, but is not an XRC QP
+ * 3. The XRC QP does not belong to the given domain.
+ * NOTE: There is no reason to return a special code if the QP is destroyed.
+ * The unregister simply succeeds.
+ */
+int ibv_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
+ uint32_t xrc_qp_num);
+
END_C_DECLS
# undef __attribute_const
diff --git a/src/cmd.c b/src/cmd.c
index 66e3f2d..cedf55e 100644
--- a/src/cmd.c
+++ b/src/cmd.c
@@ -828,6 +828,188 @@ int ibv_cmd_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
return 0;
}
+int ibv_cmd_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
+ uint32_t *xrc_rcv_qpn)
+{
+ struct ibv_create_xrc_rcv_qp cmd;
+ struct ibv_create_xrc_rcv_qp_resp resp;
+
+ if (abi_ver < 6)
+ return ENOSYS;
+
+ IBV_INIT_CMD_RESP(&cmd, sizeof cmd, CREATE_XRC_RCV_QP, &resp,
+ sizeof resp);
+
+ cmd.xrc_domain_handle = init_attr->xrc_domain->handle;
+ cmd.max_send_wr = init_attr->cap.max_send_wr;
+ cmd.max_recv_wr = init_attr->cap.max_recv_wr;
+ cmd.max_send_sge = init_attr->cap.max_send_sge;
+ cmd.max_recv_sge = init_attr->cap.max_recv_sge;
+ cmd.max_inline_data = init_attr->cap.max_inline_data;
+ cmd.sq_sig_all = init_attr->sq_sig_all;
+ cmd.qp_type = init_attr->qp_type;
+ cmd.reserved[0] = cmd.reserved[1] = 0;
+
+ if (write(init_attr->xrc_domain->context->cmd_fd, &cmd, sizeof cmd) !=
+ sizeof cmd)
+ return errno;
+
+ *xrc_rcv_qpn = resp.qpn;
+
+ return 0;
+}
+
+int ibv_cmd_modify_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num,
+ struct ibv_qp_attr *attr, int attr_mask)
+{
+ struct ibv_modify_xrc_rcv_qp cmd;
+
+ if (abi_ver < 6)
+ return ENOSYS;
+
+ IBV_INIT_CMD(&cmd, sizeof cmd, MODIFY_XRC_RCV_QP);
+
+ cmd.xrc_domain_handle = d->handle;
+ cmd.qp_num = xrc_qp_num;
+ cmd.attr_mask = attr_mask;
+ cmd.qkey = attr->qkey;
+ cmd.rq_psn = attr->rq_psn;
+ cmd.sq_psn = attr->sq_psn;
+ cmd.dest_qp_num = attr->dest_qp_num;
+ cmd.qp_access_flags = attr->qp_access_flags;
+ cmd.pkey_index = attr->pkey_index;
+ cmd.alt_pkey_index = attr->alt_pkey_index;
+ cmd.qp_state = attr->qp_state;
+ cmd.cur_qp_state = attr->cur_qp_state;
+ cmd.path_mtu = attr->path_mtu;
+ cmd.path_mig_state = attr->path_mig_state;
+ cmd.en_sqd_async_notify = attr->en_sqd_async_notify;
+ cmd.max_rd_atomic = attr->max_rd_atomic;
+ cmd.max_dest_rd_atomic = attr->max_dest_rd_atomic;
+ cmd.min_rnr_timer = attr->min_rnr_timer;
+ cmd.port_num = attr->port_num;
+ cmd.timeout = attr->timeout;
+ cmd.retry_cnt = attr->retry_cnt;
+ cmd.rnr_retry = attr->rnr_retry;
+ cmd.alt_port_num = attr->alt_port_num;
+ cmd.alt_timeout = attr->alt_timeout;
+
+ memcpy(cmd.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
+ cmd.dest.flow_label = attr->ah_attr.grh.flow_label;
+ cmd.dest.dlid = attr->ah_attr.dlid;
+ cmd.dest.reserved = 0;
+ cmd.dest.sgid_index = attr->ah_attr.grh.sgid_index;
+ cmd.dest.hop_limit = attr->ah_attr.grh.hop_limit;
+ cmd.dest.traffic_class = attr->ah_attr.grh.traffic_class;
+ cmd.dest.sl = attr->ah_attr.sl;
+ cmd.dest.src_path_bits = attr->ah_attr.src_path_bits;
+ cmd.dest.static_rate = attr->ah_attr.static_rate;
+ cmd.dest.is_global = attr->ah_attr.is_global;
+ cmd.dest.port_num = attr->ah_attr.port_num;
+
+ memcpy(cmd.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
+ cmd.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
+ cmd.alt_dest.dlid = attr->alt_ah_attr.dlid;
+ cmd.alt_dest.reserved = 0;
+ cmd.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
+ cmd.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
+ cmd.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
+ cmd.alt_dest.sl = attr->alt_ah_attr.sl;
+ cmd.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
+ cmd.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
+ cmd.alt_dest.is_global = attr->alt_ah_attr.is_global;
+ cmd.alt_dest.port_num = attr->alt_ah_attr.port_num;
+
+ cmd.reserved[0] = cmd.reserved[1] = 0;
+
+ if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
+ return errno;
+
+ return 0;
+}
+
+int ibv_cmd_query_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num,
+ struct ibv_qp_attr *attr, int attr_mask,
+ struct ibv_qp_init_attr *init_attr)
+{
+ struct ibv_query_xrc_rcv_qp cmd;
+ struct ibv_query_qp_resp resp;
+
+ if (abi_ver < 6)
+ return ENOSYS;
+
+ IBV_INIT_CMD_RESP(&cmd, sizeof cmd, QUERY_XRC_RCV_QP, &resp,
+ sizeof resp);
+ cmd.xrc_domain_handle = d->handle;
+ cmd.qp_num = xrc_qp_num;
+ cmd.attr_mask = attr_mask;
+
+ if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
+ return errno;
+
+ VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
+
+ attr->qkey = resp.qkey;
+ attr->rq_psn = resp.rq_psn;
+ attr->sq_psn = resp.sq_psn;
+ attr->dest_qp_num = resp.dest_qp_num;
+ attr->qp_access_flags = resp.qp_access_flags;
+ attr->pkey_index = resp.pkey_index;
+ attr->alt_pkey_index = resp.alt_pkey_index;
+ attr->qp_state = resp.qp_state;
+ attr->cur_qp_state = resp.cur_qp_state;
+ attr->path_mtu = resp.path_mtu;
+ attr->path_mig_state = resp.path_mig_state;
+ attr->sq_draining = resp.sq_draining;
+ attr->max_rd_atomic = resp.max_rd_atomic;
+ attr->max_dest_rd_atomic = resp.max_dest_rd_atomic;
+ attr->min_rnr_timer = resp.min_rnr_timer;
+ attr->port_num = resp.port_num;
+ attr->timeout = resp.timeout;
+ attr->retry_cnt = resp.retry_cnt;
+ attr->rnr_retry = resp.rnr_retry;
+ attr->alt_port_num = resp.alt_port_num;
+ attr->alt_timeout = resp.alt_timeout;
+ attr->cap.max_send_wr = resp.max_send_wr;
+ attr->cap.max_recv_wr = resp.max_recv_wr;
+ attr->cap.max_send_sge = resp.max_send_sge;
+ attr->cap.max_recv_sge = resp.max_recv_sge;
+ attr->cap.max_inline_data = resp.max_inline_data;
+
+ memcpy(attr->ah_attr.grh.dgid.raw, resp.dest.dgid, 16);
+ attr->ah_attr.grh.flow_label = resp.dest.flow_label;
+ attr->ah_attr.dlid = resp.dest.dlid;
+ attr->ah_attr.grh.sgid_index = resp.dest.sgid_index;
+ attr->ah_attr.grh.hop_limit = resp.dest.hop_limit;
+ attr->ah_attr.grh.traffic_class = resp.dest.traffic_class;
+ attr->ah_attr.sl = resp.dest.sl;
+ attr->ah_attr.src_path_bits = resp.dest.src_path_bits;
+ attr->ah_attr.static_rate = resp.dest.static_rate;
+ attr->ah_attr.is_global = resp.dest.is_global;
+ attr->ah_attr.port_num = resp.dest.port_num;
+
+ memcpy(attr->alt_ah_attr.grh.dgid.raw, resp.alt_dest.dgid, 16);
+ attr->alt_ah_attr.grh.flow_label = resp.alt_dest.flow_label;
+ attr->alt_ah_attr.dlid = resp.alt_dest.dlid;
+ attr->alt_ah_attr.grh.sgid_index = resp.alt_dest.sgid_index;
+ attr->alt_ah_attr.grh.hop_limit = resp.alt_dest.hop_limit;
+ attr->alt_ah_attr.grh.traffic_class = resp.alt_dest.traffic_class;
+ attr->alt_ah_attr.sl = resp.alt_dest.sl;
+ attr->alt_ah_attr.src_path_bits = resp.alt_dest.src_path_bits;
+ attr->alt_ah_attr.static_rate = resp.alt_dest.static_rate;
+ attr->alt_ah_attr.is_global = resp.alt_dest.is_global;
+ attr->alt_ah_attr.port_num = resp.alt_dest.port_num;
+
+ init_attr->cap.max_send_wr = resp.max_send_wr;
+ init_attr->cap.max_recv_wr = resp.max_recv_wr;
+ init_attr->cap.max_send_sge = resp.max_send_sge;
+ init_attr->cap.max_recv_sge = resp.max_recv_sge;
+ init_attr->cap.max_inline_data = resp.max_inline_data;
+ init_attr->sq_sig_all = resp.sq_sig_all;
+
+ return 0;
+}
+
static int ibv_cmd_destroy_qp_v1(struct ibv_qp *qp)
{
struct ibv_destroy_qp_v1 cmd;
@@ -1192,3 +1374,36 @@ int ibv_cmd_close_xrc_domain(struct ibv_xrc_domain *d)
return 0;
}
+int ibv_cmd_reg_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num)
+{
+ struct ibv_reg_xrc_rcv_qp cmd;
+
+ if (abi_ver < 6)
+ return ENOSYS;
+
+ IBV_INIT_CMD(&cmd, sizeof cmd, REG_XRC_RCV_QP);
+ cmd.xrc_domain_handle = d->handle;
+ cmd.qp_num = xrc_qp_num;
+
+ if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
+ return errno;
+ return 0;
+}
+
+int ibv_cmd_unreg_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num)
+{
+ struct ibv_unreg_xrc_rcv_qp cmd;
+
+ if (abi_ver < 6)
+ return ENOSYS;
+
+ IBV_INIT_CMD(&cmd, sizeof cmd, UNREG_XRC_RCV_QP);
+ cmd.xrc_domain_handle = d->handle;
+ cmd.qp_num = xrc_qp_num;
+
+ if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
+ return errno;
+ return 0;
+}
+
+
diff --git a/src/device.c b/src/device.c
index 3abc1eb..55ed3d7 100644
--- a/src/device.c
+++ b/src/device.c
@@ -182,31 +182,33 @@ int __ibv_get_async_event(struct ibv_context *context,
event->event_type = ev.event_type;
- switch (event->event_type) {
- case IBV_EVENT_CQ_ERR:
- event->element.cq = (void *) (uintptr_t) ev.element;
- break;
-
- case IBV_EVENT_QP_FATAL:
- case IBV_EVENT_QP_REQ_ERR:
- case IBV_EVENT_QP_ACCESS_ERR:
- case IBV_EVENT_COMM_EST:
- case IBV_EVENT_SQ_DRAINED:
- case IBV_EVENT_PATH_MIG:
- case IBV_EVENT_PATH_MIG_ERR:
- case IBV_EVENT_QP_LAST_WQE_REACHED:
- event->element.qp = (void *) (uintptr_t) ev.element;
- break;
-
- case IBV_EVENT_SRQ_ERR:
- case IBV_EVENT_SRQ_LIMIT_REACHED:
- event->element.srq = (void *) (uintptr_t) ev.element;
- break;
-
- default:
- event->element.port_num = ev.element;
- break;
- }
+ if (event->event_type & IBV_XRC_QP_EVENT_FLAG) {
+ event->element.xrc_qp_num = ev.element;
+ } else
+ switch (event->event_type) {
+ case IBV_EVENT_CQ_ERR:
+ event->element.cq = (void *) (uintptr_t) ev.element;
+ break;
+
+ case IBV_EVENT_QP_FATAL:
+ case IBV_EVENT_QP_REQ_ERR:
+ case IBV_EVENT_QP_ACCESS_ERR:
+ case IBV_EVENT_COMM_EST:
+ case IBV_EVENT_SQ_DRAINED:
+ case IBV_EVENT_PATH_MIG:
+ case IBV_EVENT_PATH_MIG_ERR:
+ case IBV_EVENT_QP_LAST_WQE_REACHED:
+ event->element.qp = (void *) (uintptr_t) ev.element;
+ break;
+
+ case IBV_EVENT_SRQ_ERR:
+ case IBV_EVENT_SRQ_LIMIT_REACHED:
+ event->element.srq = (void *) (uintptr_t) ev.element;
+ break;
+ default:
+ event->element.port_num = ev.element;
+ break;
+ }
if (context->ops.async_event)
context->ops.async_event(event);
diff --git a/src/libibverbs.map b/src/libibverbs.map
index d1a6a47..fce6965 100644
--- a/src/libibverbs.map
+++ b/src/libibverbs.map
@@ -97,6 +97,16 @@ IBVERBS_1.1 {
ibv_cmd_open_xrc_domain;
ibv_close_xrc_domain;
ibv_cmd_close_xrc_domain;
+ ibv_create_xrc_rcv_qp;
+ ibv_cmd_create_xrc_rcv_qp;
+ ibv_modify_xrc_rcv_qp;
+ ibv_cmd_modify_xrc_rcv_qp;
+ ibv_query_xrc_rcv_qp;
+ ibv_cmd_query_xrc_rcv_qp;
+ ibv_reg_xrc_rcv_qp;
+ ibv_cmd_reg_xrc_rcv_qp;
+ ibv_unreg_xrc_rcv_qp;
+ ibv_cmd_unreg_xrc_rcv_qp;
ibv_node_type_str;
ibv_port_state_str;
diff --git a/src/verbs.c b/src/verbs.c
index f09542f..bfc6805 100644
--- a/src/verbs.c
+++ b/src/verbs.c
@@ -597,3 +597,62 @@ int ibv_close_xrc_domain(struct ibv_xrc_domain *d)
return d->context->more_ops->close_xrc_domain(d);
}
+
+int ibv_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
+ uint32_t *xrc_rcv_qpn)
+{
+ struct ibv_context *c;
+ if (!init_attr || !(init_attr->xrc_domain))
+ return EINVAL;
+
+ c = init_attr->xrc_domain->context;
+ if (!c->more_ops)
+ return ENOSYS;
+
+ return c->more_ops->create_xrc_rcv_qp(init_attr,
+ xrc_rcv_qpn);
+}
+
+int ibv_modify_xrc_rcv_qp(struct ibv_xrc_domain *d,
+ uint32_t xrc_rcv_qpn,
+ struct ibv_qp_attr *attr,
+ int attr_mask)
+{
+ if (!d || !attr)
+ return EINVAL;
+
+ if (!d->context->more_ops)
+ return ENOSYS;
+
+ return d->context->more_ops->modify_xrc_rcv_qp(d, xrc_rcv_qpn, attr,
+ attr_mask);
+}
+
+int ibv_query_xrc_rcv_qp(struct ibv_xrc_domain *d,
+ uint32_t xrc_rcv_qpn,
+ struct ibv_qp_attr *attr,
+ int attr_mask,
+ struct ibv_qp_init_attr *init_attr)
+{
+ if (!d)
+ return EINVAL;
+
+ if (!d->context->more_ops)
+ return ENOSYS;
+
+ return d->context->more_ops->query_xrc_rcv_qp(d, xrc_rcv_qpn, attr,
+ attr_mask, init_attr);
+}
+
+int ibv_reg_xrc_rcv_qp(struct ibv_xrc_domain *d,
+ uint32_t xrc_rcv_qpn)
+{
+ return d->context->more_ops->reg_xrc_rcv_qp(d, xrc_rcv_qpn);
+}
+
+int ibv_unreg_xrc_rcv_qp(struct ibv_xrc_domain *d,
+ uint32_t xrc_rcv_qpn)
+{
+ return d->context->more_ops->unreg_xrc_rcv_qp(d, xrc_rcv_qpn);
+}
+
--
1.5.1.6
More information about the general
mailing list