[openib-general] [PATCH 3/7] IB/ipath - performance improvements via mmap of queues
Ralph Campbell
ralphc at pathscale.com
Fri Aug 11 14:57:02 PDT 2006
Improve the performance of the userspace verbs ibv_poll_cq(),
ibv_post_recv(), and ibv_post_srq_recv().
Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
Index: src/userspace/libipathverbs/src/verbs.c
===================================================================
--- src/userspace/libipathverbs/src/verbs.c (revision 8843)
+++ src/userspace/libipathverbs/src/verbs.c (working copy)
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005. PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -43,8 +44,11 @@
#include <string.h>
#include <pthread.h>
#include <netinet/in.h>
+#include <sys/mman.h>
+#include <errno.h>
#include "ipathverbs.h"
+#include "ipath-abi.h"
int ipath_query_device(struct ibv_context *context,
struct ibv_device_attr *attr)
@@ -54,7 +58,8 @@
unsigned major, minor, sub_minor;
int ret;
- ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, sizeof cmd);
+ ret = ibv_cmd_query_device(context, attr, &raw_fw_ver,
+ &cmd, sizeof cmd);
if (ret)
return ret;
@@ -142,55 +147,147 @@
struct ibv_comp_channel *channel,
int comp_vector)
{
- struct ibv_cq *cq;
- struct ibv_create_cq cmd;
- struct ibv_create_cq_resp resp;
- int ret;
+ struct ipath_cq *cq;
+ struct ibv_create_cq cmd;
+ struct ipath_create_cq_resp resp;
+ int ret;
+ size_t size;
cq = malloc(sizeof *cq);
if (!cq)
return NULL;
- ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector, cq,
- &cmd, sizeof cmd, &resp, sizeof resp);
+ ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
+ &cq->ibv_cq, &cmd, sizeof cmd,
+ &resp.ibv_resp, sizeof resp);
if (ret) {
free(cq);
return NULL;
}
- return cq;
+ size = sizeof(struct ipath_cq_wc) + sizeof(struct ipath_wc) * cqe;
+ cq->queue = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ context->cmd_fd, resp.offset);
+ if ((void *) cq->queue == MAP_FAILED) {
+ free(cq);
+ return NULL;
+ }
+
+ pthread_spin_init(&cq->lock, PTHREAD_PROCESS_PRIVATE);
+ return &cq->ibv_cq;
}
-int ipath_destroy_cq(struct ibv_cq *cq)
+int ipath_resize_cq(struct ibv_cq *ibcq, int cqe)
{
+ struct ipath_cq *cq = to_icq(ibcq);
+ struct ibv_resize_cq cmd;
+ struct ipath_resize_cq_resp resp;
+ size_t size;
+ int ret;
+
+ pthread_spin_lock(&cq->lock);
+ /* Save the old size so we can unmmap the queue. */
+ size = sizeof(struct ipath_cq_wc) +
+ (sizeof(struct ipath_wc) * cq->ibv_cq.cqe);
+ ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd,
+ &resp.ibv_resp, sizeof resp);
+ if (ret) {
+ pthread_spin_unlock(&cq->lock);
+ return ret;
+ }
+ (void) munmap(cq->queue, size);
+ size = sizeof(struct ipath_cq_wc) +
+ (sizeof(struct ipath_wc) * cq->ibv_cq.cqe);
+ cq->queue = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ ibcq->context->cmd_fd, resp.offset);
+ ret = errno;
+ pthread_spin_unlock(&cq->lock);
+ if ((void *) cq->queue == MAP_FAILED)
+ return ret;
+ return 0;
+}
+
+int ipath_destroy_cq(struct ibv_cq *ibcq)
+{
+ struct ipath_cq *cq = to_icq(ibcq);
int ret;
- ret = ibv_cmd_destroy_cq(cq);
+ ret = ibv_cmd_destroy_cq(ibcq);
if (ret)
return ret;
+ (void) munmap(cq->queue, sizeof(struct ipath_cq_wc) +
+ (sizeof(struct ipath_wc) * cq->ibv_cq.cqe));
free(cq);
return 0;
}
+int ipath_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
+{
+ struct ipath_cq *cq = to_icq(ibcq);
+ struct ipath_cq_wc *q;
+ int npolled;
+ uint32_t tail;
+
+ pthread_spin_lock(&cq->lock);
+ q = cq->queue;
+ tail = q->tail;
+ for (npolled = 0; npolled < ne; ++npolled, ++wc) {
+ if (tail == q->head)
+ break;
+ memcpy(wc, &q->queue[tail], sizeof(*wc));
+ if (tail == cq->ibv_cq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ q->tail = tail;
+ pthread_spin_unlock(&cq->lock);
+
+ return npolled;
+}
+
struct ibv_qp *ipath_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
{
- struct ibv_create_qp cmd;
- struct ibv_create_qp_resp resp;
- struct ibv_qp *qp;
- int ret;
+ struct ibv_create_qp cmd;
+ struct ipath_create_qp_resp resp;
+ struct ipath_qp *qp;
+ int ret;
+ size_t size;
qp = malloc(sizeof *qp);
if (!qp)
return NULL;
- ret = ibv_cmd_create_qp(pd, qp, attr, &cmd, sizeof cmd, &resp, sizeof resp);
+ ret = ibv_cmd_create_qp(pd, &qp->ibv_qp, attr, &cmd, sizeof cmd,
+ &resp.ibv_resp, sizeof resp);
if (ret) {
free(qp);
return NULL;
}
- return qp;
+ if (attr->srq) {
+ qp->rq.size = 0;
+ qp->rq.max_sge = 0;
+ qp->rq.rwq = NULL;
+ } else {
+ qp->rq.size = attr->cap.max_recv_wr + 1;
+ qp->rq.max_sge = attr->cap.max_recv_sge;
+ size = sizeof(struct ipath_rwq) +
+ (sizeof(struct ipath_rwqe) +
+ (sizeof(struct ibv_sge) * qp->rq.max_sge)) *
+ qp->rq.size;
+ qp->rq.rwq = mmap(NULL, size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ pd->context->cmd_fd, resp.offset);
+ if ((void *) qp->rq.rwq == MAP_FAILED) {
+ free(qp);
+ return NULL;
+ }
+ }
+
+ pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE);
+ return &qp->ibv_qp;
}
int ipath_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
@@ -211,47 +308,152 @@
return ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof cmd);
}
-int ipath_destroy_qp(struct ibv_qp *qp)
+int ipath_destroy_qp(struct ibv_qp *ibqp)
{
+ struct ipath_qp *qp = to_iqp(ibqp);
int ret;
- ret = ibv_cmd_destroy_qp(qp);
+ ret = ibv_cmd_destroy_qp(ibqp);
if (ret)
return ret;
+ if (qp->rq.rwq) {
+ size_t size;
+
+ size = sizeof(struct ipath_rwq) +
+ (sizeof(struct ipath_rwqe) +
+ (sizeof(struct ibv_sge) * qp->rq.max_sge)) *
+ qp->rq.size;
+ (void) munmap(qp->rq.rwq, size);
+ }
free(qp);
return 0;
}
+static int post_recv(struct ipath_rq *rq, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr)
+{
+ struct ibv_recv_wr *i;
+ struct ipath_rwq *rwq;
+ struct ipath_rwqe *wqe;
+ uint32_t head;
+ int n, ret;
+
+ pthread_spin_lock(&rq->lock);
+ rwq = rq->rwq;
+ head = rwq->head;
+ for (i = wr; i; i = i->next) {
+ if ((unsigned) i->num_sge > rq->max_sge)
+ goto bad;
+ wqe = get_rwqe_ptr(rq, head);
+ if (++head >= rq->size)
+ head = 0;
+ if (head == rwq->tail)
+ goto bad;
+ wqe->wr_id = i->wr_id;
+ wqe->num_sge = i->num_sge;
+ for (n = 0; n < wqe->num_sge; n++)
+ wqe->sg_list[n] = i->sg_list[n];
+ rwq->head = head;
+ }
+ ret = 0;
+ goto done;
+
+bad:
+ ret = -ENOMEM;
+ if (bad_wr)
+ *bad_wr = i;
+done:
+ pthread_spin_unlock(&rq->lock);
+ return ret;
+}
+
+int ipath_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr)
+{
+ struct ipath_qp *qp = to_iqp(ibqp);
+
+ return post_recv(&qp->rq, wr, bad_wr);
+}
+
struct ibv_srq *ipath_create_srq(struct ibv_pd *pd,
struct ibv_srq_init_attr *attr)
{
- struct ibv_srq *srq;
+ struct ipath_srq *srq;
struct ibv_create_srq cmd;
- struct ibv_create_srq_resp resp;
+ struct ipath_create_srq_resp resp;
int ret;
+ size_t size;
srq = malloc(sizeof *srq);
if (srq == NULL)
return NULL;
- ret = ibv_cmd_create_srq(pd, srq, attr, &cmd, sizeof cmd,
- &resp, sizeof resp);
+ ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr, &cmd, sizeof cmd,
+ &resp.ibv_resp, sizeof resp);
if (ret) {
free(srq);
return NULL;
}
- return srq;
+ srq->rq.size = attr->attr.max_wr + 1;
+ srq->rq.max_sge = attr->attr.max_sge;
+ size = sizeof(struct ipath_rwq) +
+ (sizeof(struct ipath_rwqe) +
+ (sizeof(struct ibv_sge) * srq->rq.max_sge)) * srq->rq.size;
+ srq->rq.rwq = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ pd->context->cmd_fd, resp.offset);
+ if ((void *) srq->rq.rwq == MAP_FAILED) {
+ free(srq);
+ return NULL;
+ }
+
+ pthread_spin_init(&srq->rq.lock, PTHREAD_PROCESS_PRIVATE);
+ return &srq->ibv_srq;
}
-int ipath_modify_srq(struct ibv_srq *srq,
+int ipath_modify_srq(struct ibv_srq *ibsrq,
struct ibv_srq_attr *attr,
enum ibv_srq_attr_mask attr_mask)
{
- struct ibv_modify_srq cmd;
+ struct ipath_srq *srq = to_isrq(ibsrq);
+ struct ipath_modify_srq_cmd cmd;
+ __u64 offset;
+ size_t size;
+ int ret;
- return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
+ if (attr_mask & IBV_SRQ_MAX_WR) {
+ pthread_spin_lock(&srq->rq.lock);
+ /* Save the old size so we can unmmap the queue. */
+ size = sizeof(struct ipath_rwq) +
+ (sizeof(struct ipath_rwqe) +
+ (sizeof(struct ibv_sge) * srq->rq.max_sge)) *
+ srq->rq.size;
+ }
+ cmd.offset_addr = (__u64) &offset;
+ ret = ibv_cmd_modify_srq(ibsrq, attr, attr_mask,
+ &cmd.ibv_cmd, sizeof cmd);
+ if (ret) {
+ if (attr_mask & IBV_SRQ_MAX_WR)
+ pthread_spin_unlock(&srq->rq.lock);
+ return ret;
+ }
+ if (attr_mask & IBV_SRQ_MAX_WR) {
+ (void) munmap(srq->rq.rwq, size);
+ srq->rq.size = attr->max_wr + 1;
+ size = sizeof(struct ipath_rwq) +
+ (sizeof(struct ipath_rwqe) +
+ (sizeof(struct ibv_sge) * srq->rq.max_sge)) *
+ srq->rq.size;
+ srq->rq.rwq = mmap(NULL, size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ ibsrq->context->cmd_fd, offset);
+ pthread_spin_unlock(&srq->rq.lock);
+ /* XXX Now we have no receive queue. */
+ if ((void *) srq->rq.rwq == MAP_FAILED)
+ return errno;
+ }
+ return 0;
}
int ipath_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr)
@@ -261,18 +463,32 @@
return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
}
-int ipath_destroy_srq(struct ibv_srq *srq)
+int ipath_destroy_srq(struct ibv_srq *ibsrq)
{
+ struct ipath_srq *srq = to_isrq(ibsrq);
+ size_t size;
int ret;
- ret = ibv_cmd_destroy_srq(srq);
+ ret = ibv_cmd_destroy_srq(ibsrq);
if (ret)
return ret;
+ size = sizeof(struct ipath_rwq) +
+ (sizeof(struct ipath_rwqe) +
+ (sizeof(struct ibv_sge) * srq->rq.max_sge)) * srq->rq.size;
+ (void) munmap(srq->rq.rwq, size);
free(srq);
return 0;
}
+int ipath_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr)
+{
+ struct ipath_srq *srq = to_isrq(ibsrq);
+
+ return post_recv(&srq->rq, wr, bad_wr);
+}
+
struct ibv_ah *ipath_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
{
struct ibv_ah *ah;
Index: src/userspace/libipathverbs/src/ipath-abi.h
===================================================================
--- src/userspace/libipathverbs/src/ipath-abi.h (revision 0)
+++ src/userspace/libipathverbs/src/ipath-abi.h (revision 0)
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Patent licenses, if any, provided herein do not apply to
+ * combinations of this program with other software, or any other
+ * product whatsoever.
+ */
+
+#ifndef IPATH_ABI_H
+#define IPATH_ABI_H
+
+#include <infiniband/kern-abi.h>
+
+struct ipath_get_context_resp {
+ struct ibv_get_context_resp ibv_resp;
+ __u32 version;
+};
+
+struct ipath_create_cq_resp {
+ struct ibv_create_cq_resp ibv_resp;
+ __u64 offset;
+};
+
+struct ipath_resize_cq_resp {
+ struct ibv_resize_cq_resp ibv_resp;
+ __u64 offset;
+};
+
+struct ipath_create_qp_resp {
+ struct ibv_create_qp_resp ibv_resp;
+ __u64 offset;
+};
+
+struct ipath_create_srq_resp {
+ struct ibv_create_srq_resp ibv_resp;
+ __u64 offset;
+};
+
+struct ipath_modify_srq_cmd {
+ struct ibv_modify_srq ibv_cmd;
+ __u64 offset_addr;
+};
+
+#endif /* IPATH_ABI_H */
Index: src/userspace/libipathverbs/src/ipathverbs.c
===================================================================
--- src/userspace/libipathverbs/src/ipathverbs.c (revision 8843)
+++ src/userspace/libipathverbs/src/ipathverbs.c (working copy)
@@ -1,4 +1,5 @@
/*
+ * Copyright (C) 2006 QLogic Corporation, All rights reserved.
* Copyright (c) 2005. PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -43,6 +44,7 @@
#include <unistd.h>
#include "ipathverbs.h"
+#include "ipath-abi.h"
#ifndef PCI_VENDOR_ID_PATHSCALE
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
@@ -86,22 +88,25 @@
.dereg_mr = ipath_dereg_mr,
.create_cq = ipath_create_cq,
- .poll_cq = ibv_cmd_poll_cq,
+ .poll_cq = ipath_poll_cq,
.req_notify_cq = ibv_cmd_req_notify_cq,
.cq_event = NULL,
+ .resize_cq = ipath_resize_cq,
.destroy_cq = ipath_destroy_cq,
.create_srq = ipath_create_srq,
.modify_srq = ipath_modify_srq,
+ .query_srq = ipath_query_srq,
.destroy_srq = ipath_destroy_srq,
- .post_srq_recv = ibv_cmd_post_srq_recv,
+ .post_srq_recv = ipath_post_srq_recv,
.create_qp = ipath_create_qp,
+ .query_qp = ipath_query_qp,
.modify_qp = ipath_modify_qp,
.destroy_qp = ipath_destroy_qp,
.post_send = ibv_cmd_post_send,
- .post_recv = ibv_cmd_post_recv,
+ .post_recv = ipath_post_recv,
.create_ah = ipath_create_ah,
.destroy_ah = ipath_destroy_ah,
@@ -116,6 +121,7 @@
struct ipath_context *context;
struct ibv_get_context cmd;
struct ibv_get_context_resp resp;
+ struct ipath_device *dev;
context = malloc(sizeof *context);
if (!context)
@@ -126,6 +132,12 @@
goto err_free;
context->ibv_ctx.ops = ipath_ctx_ops;
+ dev = to_idev(ibdev);
+ if (dev->abi_version == 1) {
+ context->ibv_ctx.ops.poll_cq = ibv_cmd_poll_cq;
+ context->ibv_ctx.ops.post_srq_recv = ibv_cmd_post_srq_recv;
+ context->ibv_ctx.ops.post_recv = ibv_cmd_post_recv;
+ }
return &context->ibv_ctx;
err_free:
@@ -180,6 +192,7 @@
dev->ibv_dev.ops = ipath_dev_ops;
dev->hca_type = hca_table[i].type;
+ dev->abi_version = abi_version;
return &dev->ibv_dev;
}
Index: src/userspace/libipathverbs/src/ipathverbs.h
===================================================================
--- src/userspace/libipathverbs/src/ipathverbs.h (revision 8843)
+++ src/userspace/libipathverbs/src/ipathverbs.h (working copy)
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2005. PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -39,6 +40,7 @@
#include <endian.h>
#include <byteswap.h>
+#include <pthread.h>
#include <infiniband/driver.h>
#include <infiniband/arch.h>
@@ -57,12 +59,88 @@
struct ipath_device {
struct ibv_device ibv_dev;
enum ipath_hca_type hca_type;
+ int abi_version;
};
struct ipath_context {
struct ibv_context ibv_ctx;
};
+/*
+ * This structure needs to have the same size and offsets as
+ * the kernel's ib_wc structure since it is memory mapped.
+ */
+struct ipath_wc {
+ uint64_t wr_id;
+ enum ibv_wc_status status;
+ enum ibv_wc_opcode opcode;
+ uint32_t vendor_err;
+ uint32_t byte_len;
+ uint32_t imm_data; /* in network byte order */
+ uint32_t qp_num;
+ uint32_t src_qp;
+ enum ibv_wc_flags wc_flags;
+ uint16_t pkey_index;
+ uint16_t slid;
+ uint8_t sl;
+ uint8_t dlid_path_bits;
+ uint8_t port_num;
+};
+
+struct ipath_cq_wc {
+ uint32_t head;
+ uint32_t tail;
+ struct ipath_wc queue[1];
+};
+
+struct ipath_cq {
+ struct ibv_cq ibv_cq;
+ struct ipath_cq_wc *queue;
+ pthread_spinlock_t lock;
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->r_max_sge.
+ */
+struct ipath_rwqe {
+ uint64_t wr_id;
+ uint8_t num_sge;
+ struct ibv_sge sg_list[0];
+};
+
+/*
+ * This struture is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct ipath_rwq {
+ uint32_t head; /* new requests posted to the head */
+ uint32_t tail; /* receives pull requests from here. */
+ struct ipath_rwqe wq[0];
+};
+
+struct ipath_rq {
+ struct ipath_rwq *rwq;
+ pthread_spinlock_t lock;
+ uint32_t size;
+ uint32_t max_sge;
+};
+
+struct ipath_qp {
+ struct ibv_qp ibv_qp;
+ struct ipath_rq rq;
+};
+
+struct ipath_srq {
+ struct ibv_srq ibv_srq;
+ struct ipath_rq rq;
+};
+
#define to_ixxx(xxx, type) \
((struct ipath_##type *) \
((void *) ib##xxx - offsetof(struct ipath_##type, ibv_##xxx)))
@@ -72,6 +150,39 @@
return to_ixxx(ctx, context);
}
+static inline struct ipath_device *to_idev(struct ibv_device *ibdev)
+{
+ return to_ixxx(dev, device);
+}
+
+static inline struct ipath_cq *to_icq(struct ibv_cq *ibcq)
+{
+ return to_ixxx(cq, cq);
+}
+
+static inline struct ipath_qp *to_iqp(struct ibv_qp *ibqp)
+{
+ return to_ixxx(qp, qp);
+}
+
+static inline struct ipath_srq *to_isrq(struct ibv_srq *ibsrq)
+{
+ return to_ixxx(srq, srq);
+}
+
+/*
+ * Since struct ipath_rwqe is not a fixed size, we can't simply index into
+ * struct ipath_rq.wq. This function does the array index computation.
+ */
+static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
+ unsigned n)
+{
+ return (struct ipath_rwqe *)
+ ((char *) rq->rwq->wq +
+ (sizeof(struct ipath_rwqe) +
+ rq->max_sge * sizeof(struct ibv_sge)) * n);
+}
+
extern int ipath_query_device(struct ibv_context *context,
struct ibv_device_attr *attr);
@@ -91,8 +202,12 @@
struct ibv_comp_channel *channel,
int comp_vector);
+int ipath_resize_cq(struct ibv_cq *cq, int cqe);
+
int ipath_destroy_cq(struct ibv_cq *cq);
+int ipath_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+
struct ibv_qp *ipath_create_qp(struct ibv_pd *pd,
struct ibv_qp_init_attr *attr);
@@ -122,6 +237,9 @@
int ipath_destroy_srq(struct ibv_srq *srq);
+int ipath_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
+ struct ibv_recv_wr **bad_wr);
+
struct ibv_ah *ipath_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
int ipath_destroy_ah(struct ibv_ah *ah);
More information about the general
mailing list