[openib-general] Patch for review: ipath mmaped CQs, QPs, SRQs [1 of 2]

Ralph Campbell ralphc at pathscale.com
Thu Jun 15 15:40:54 PDT 2006


Here are the diffs Roland requested for the ipath driver changes
to mmap the completion and receive queues into the user library.
This isn't quite the final version though since I need to implement
QP receive queue resizing and some version checking/handling.


Index: src/userspace/libipathverbs/src/verbs.c
===================================================================
--- src/userspace/libipathverbs/src/verbs.c	(revision 8021)
+++ src/userspace/libipathverbs/src/verbs.c	(working copy)
@@ -40,11 +40,14 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <strings.h>
+#include <string.h>
 #include <pthread.h>
 #include <netinet/in.h>
+#include <sys/mman.h>
+#include <errno.h>
 
 #include "ipathverbs.h"
+#include "ipath-abi.h"
 
 int ipath_query_device(struct ibv_context *context,
 		       struct ibv_device_attr *attr)
@@ -83,11 +86,11 @@
 	struct ibv_pd		 *pd;
 
 	pd = malloc(sizeof *pd);
-	if(!pd)
+	if (!pd)
 		return NULL;
 
-	if(ibv_cmd_alloc_pd(context, pd, &cmd, sizeof cmd,
-			    &resp, sizeof resp)) {
+	if (ibv_cmd_alloc_pd(context, pd, &cmd, sizeof cmd,
+			     &resp, sizeof resp)) {
 		free(pd);
 		return NULL;
 	}
@@ -142,129 +145,396 @@
 			       struct ibv_comp_channel *channel,
 			       int comp_vector)
 {
-	struct ibv_cq		 *cq;
-	struct ibv_create_cq	  cmd;
-	struct ibv_create_cq_resp resp;
-	int			  ret;
+	struct ipath_cq		   *cq;
+	struct ibv_create_cq	    cmd;
+	struct ipath_create_cq_resp resp;
+	int			    ret;
+	size_t			    size;
 
 	cq = malloc(sizeof *cq);
 	if (!cq)
 		return NULL;
 
-	ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector, cq,
-				&cmd, sizeof cmd, &resp, sizeof resp);
+	ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
+				&cq->ibv_cq, &cmd, sizeof cmd,
+				&resp.ibv_resp, sizeof resp);
 	if (ret) {
 		free(cq);
 		return NULL;
 	}
 
-	return cq;
+	size = sizeof(struct ipath_cq_wc) + sizeof(struct ipath_wc) * cqe;
+	cq->queue = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+			 context->cmd_fd, resp.offset);
+	if ((void *) cq->queue == MAP_FAILED) {
+		free(cq);
+		return NULL;
+	}
+
+	pthread_spin_init(&cq->lock, PTHREAD_PROCESS_PRIVATE);
+	return &cq->ibv_cq;
 }
 
-int ipath_destroy_cq(struct ibv_cq *cq)
+int ipath_resize_cq(struct ibv_cq *ibcq, int cqe)
 {
+	struct ipath_cq		       *cq = to_icq(ibcq);
+	struct ibv_resize_cq		cmd;
+	struct ipath_resize_cq_resp	resp;
+	size_t				size;
+	int				ret;
+
+	pthread_spin_lock(&cq->lock);
+	/* Unmap the old queue so we can resize it. */
+	size = sizeof(struct ipath_cq_wc) +
+		(sizeof(struct ipath_wc) * cq->ibv_cq.cqe);
+	(void) munmap(cq->queue, size);
+	ret = ibv_cmd_resize_cq_resp(ibcq, cqe, &cmd, sizeof cmd,
+				     &resp.ibv_resp, sizeof resp);
+	if (ret) {
+		pthread_spin_unlock(&cq->lock);
+		return ret;
+	}
+	size = sizeof(struct ipath_cq_wc) +
+		(sizeof(struct ipath_wc) * cq->ibv_cq.cqe);
+	cq->queue = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+			 ibcq->context->cmd_fd, resp.offset);
+	ret = errno;
+	pthread_spin_unlock(&cq->lock);
+	if ((void *) cq->queue == MAP_FAILED)
+		return ret;
+	return 0;
+}
+
+int ipath_destroy_cq(struct ibv_cq *ibcq)
+{
+	struct ipath_cq *cq = to_icq(ibcq);
 	int ret;
 
-	ret = ibv_cmd_destroy_cq(cq);
+	ret = ibv_cmd_destroy_cq(ibcq);
 	if (ret)
 		return ret;
 
+	(void) munmap(cq->queue, sizeof(struct ipath_cq_wc) +
+				 (sizeof(struct ipath_wc) * cq->ibv_cq.cqe));
 	free(cq);
 	return 0;
 }
 
+int ipath_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
+{
+	struct ipath_cq *cq = to_icq(ibcq);
+	struct ipath_cq_wc *q;
+	int npolled;
+	uint32_t tail;
+
+	pthread_spin_lock(&cq->lock);
+	q = cq->queue;
+	tail = q->tail;
+	for (npolled = 0; npolled < ne; ++npolled, ++wc) {
+		if (tail == q->head)
+			break;
+		memcpy(wc, &q->queue[tail], sizeof(*wc));
+		if (tail == cq->ibv_cq.cqe)
+			tail = 0;
+		else
+			tail++;
+	}
+	q->tail = tail;
+	pthread_spin_unlock(&cq->lock);
+
+	return npolled;
+}
+
 struct ibv_qp *ipath_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
 {
-	struct ibv_create_qp	  cmd;
-	struct ibv_create_qp_resp resp;
-	struct ibv_qp		 *qp;
-	int			  ret;
+	struct ibv_create_qp	     cmd;
+	struct ipath_create_qp_resp  resp;
+	struct ipath_qp		    *qp;
+	int			     ret;
+	size_t			     size;
 
 	qp = malloc(sizeof *qp);
 	if (!qp)
 		return NULL;
 
-	ret = ibv_cmd_create_qp(pd, qp, attr, &cmd, sizeof cmd, &resp, sizeof resp);
+	ret = ibv_cmd_create_qp(pd, &qp->ibv_qp, attr, &cmd, sizeof cmd,
+				&resp.ibv_resp, sizeof resp);
 	if (ret) {
 		free(qp);
 		return NULL;
 	}
 
-	return qp;
+	if (attr->srq) {
+		qp->rq.size = 0;
+		qp->rq.max_sge = 0;
+		qp->rq.rwq = NULL;
+	} else {
+		qp->rq.size = attr->cap.max_recv_wr + 1;
+		qp->rq.max_sge = attr->cap.max_recv_sge;
+		size = sizeof(struct ipath_rwq) +
+			(sizeof(struct ipath_rwqe) +
+			 (sizeof(struct ibv_sge) * qp->rq.max_sge)) *
+			qp->rq.size;
+		qp->rq.rwq = mmap(NULL, size,
+				  PROT_READ | PROT_WRITE, MAP_SHARED,
+				  pd->context->cmd_fd, resp.offset);
+		if ((void *) qp->rq.rwq == MAP_FAILED) {
+			free(qp);
+			return NULL;
+		}
+	}
+
+	pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE);
+	return &qp->ibv_qp;
 }
 
-int ipath_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+int ipath_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+		   enum ibv_qp_attr_mask attr_mask,
+		   struct ibv_qp_init_attr *init_attr)
+{
+	struct ibv_query_qp cmd;
+
+	return ibv_cmd_query_qp(qp, attr, attr_mask, init_attr,
+				&cmd, sizeof cmd);
+}
+
+int ipath_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
 		    enum ibv_qp_attr_mask attr_mask)
 {
-	struct ibv_modify_qp cmd;
+	struct ipath_qp	           *qp = to_iqp(ibqp);
+	struct ipath_modify_qp_cmd  cmd;
+	__u64                       offset;
+	size_t                      size;
+	int                         ret;
 
-	return ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof cmd);
+	if (attr_mask & IBV_QP_CAP) {
+		/* Can't resize receive queue if we haved a shared one. */
+		if (ibqp->srq)
+			return EINVAL;
+		pthread_spin_lock(&qp->rq.lock);
+		/* Unmap the old queue so we can resize it. */
+		size = sizeof(struct ipath_rwq) +
+			(sizeof(struct ipath_rwqe) +
+			 (sizeof(struct ibv_sge) * qp->rq.max_sge)) *
+			qp->rq.size;
+		(void) munmap(qp->rq.rwq, size);
+	}
+	cmd.offset_addr = (__u64) &offset;
+	ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask,
+				&cmd.ibv_cmd, sizeof cmd);
+	if (ret) {
+		if (attr_mask & IBV_QP_CAP)
+			pthread_spin_unlock(&qp->rq.lock);
+		return ret;
+	}
+	if (attr_mask & IBV_QP_CAP) {
+		qp->rq.size = attr->cap.max_recv_wr + 1;
+		qp->rq.max_sge = attr->cap.max_recv_sge;
+		size = sizeof(struct ipath_rwq) +
+			(sizeof(struct ipath_rwqe) +
+			 (sizeof(struct ibv_sge) * qp->rq.max_sge)) *
+			qp->rq.size;
+		qp->rq.rwq = mmap(NULL, size,
+				  PROT_READ | PROT_WRITE, MAP_SHARED,
+				  ibqp->context->cmd_fd, offset);
+		pthread_spin_unlock(&qp->rq.lock);
+		/* XXX Now we have no receive queue. */
+		if ((void *) qp->rq.rwq == MAP_FAILED)
+			return errno;
+	}
+	return 0;
 }
 
-int ipath_destroy_qp(struct ibv_qp *qp)
+int ipath_destroy_qp(struct ibv_qp *ibqp)
 {
+	struct ipath_qp	*qp = to_iqp(ibqp);
 	int ret;
 
-	ret = ibv_cmd_destroy_qp(qp);
+	ret = ibv_cmd_destroy_qp(ibqp);
 	if (ret)
 		return ret;
 
+	if (qp->rq.rwq) {
+		size_t size;
+
+		size = sizeof(struct ipath_rwq) +
+			(sizeof(struct ipath_rwqe) +
+			 (sizeof(struct ibv_sge) * qp->rq.max_sge)) *
+			qp->rq.size;
+		(void) munmap(qp->rq.rwq, size);
+	}
 	free(qp);
 	return 0;
 }
 
+static int post_recv(struct ipath_rq *rq, struct ibv_recv_wr *wr,
+		     struct ibv_recv_wr **bad_wr)
+{
+	struct ibv_recv_wr *i;
+	struct ipath_rwq *rwq;
+	struct ipath_rwqe *wqe;
+	uint32_t head;
+	int n, ret;
+
+	pthread_spin_lock(&rq->lock);
+	rwq = rq->rwq;
+	head = rwq->head;
+	for (i = wr; i; i = i->next) {
+		if ((unsigned) i->num_sge > rq->max_sge)
+			goto bad;
+		wqe = get_rwqe_ptr(rq, head);
+		if (++head >= rq->size)
+			head = 0;
+		if (head == rwq->tail)
+			goto bad;
+		wqe->wr_id = i->wr_id;
+		wqe->num_sge = i->num_sge;
+		for (n = 0; n < wqe->num_sge; n++)
+			wqe->sg_list[n] = i->sg_list[n];
+		rwq->head = head;
+	}
+	ret = 0;
+	goto done;
+
+bad:
+	ret = -ENOMEM;
+	if (bad_wr)
+		*bad_wr = i;
+done:
+	pthread_spin_unlock(&rq->lock);
+	return ret;
+}
+
+int ipath_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+		    struct ibv_recv_wr **bad_wr)
+{
+	struct ipath_qp *qp = to_iqp(ibqp);
+
+	return post_recv(&qp->rq, wr, bad_wr);
+}
+
 struct ibv_srq *ipath_create_srq(struct ibv_pd *pd,
 				 struct ibv_srq_init_attr *attr)
 {
-	struct ibv_srq *srq;
+	struct ipath_srq *srq;
 	struct ibv_create_srq cmd;
-	struct ibv_create_srq_resp resp;
+	struct ipath_create_srq_resp resp;
 	int ret;
+	size_t size;
 
 	srq = malloc(sizeof *srq);
-	if(srq == NULL)
+	if (srq == NULL)
 		return NULL;
 
-	ret = ibv_cmd_create_srq(pd, srq, attr, &cmd, sizeof cmd,
-		&resp, sizeof resp);
+	ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr, &cmd, sizeof cmd,
+				 &resp.ibv_resp, sizeof resp);
 	if (ret) {
 		free(srq);
 		return NULL;
 	}
 
-	return srq;
+	srq->rq.size = attr->attr.max_wr + 1;
+	srq->rq.max_sge = attr->attr.max_sge;
+	size = sizeof(struct ipath_rwq) +
+		(sizeof(struct ipath_rwqe) +
+		 (sizeof(struct ibv_sge) * srq->rq.max_sge)) * srq->rq.size;
+	srq->rq.rwq = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+			   pd->context->cmd_fd, resp.offset);
+	if ((void *) srq->rq.rwq == MAP_FAILED) {
+		free(srq);
+		return NULL;
+	}
+
+	pthread_spin_init(&srq->rq.lock, PTHREAD_PROCESS_PRIVATE);
+	return &srq->ibv_srq;
 }
 
-int ipath_modify_srq(struct ibv_srq *srq,
+int ipath_modify_srq(struct ibv_srq *ibsrq,
 		     struct ibv_srq_attr *attr, 
 		     enum ibv_srq_attr_mask attr_mask)
 {
-	struct ibv_modify_srq cmd;
+	struct ipath_srq            *srq = to_isrq(ibsrq);
+	struct ipath_modify_srq_cmd  cmd;
+	__u64                        offset;
+	size_t                       size;
+	int                          ret;
 
-	return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
+	if (attr_mask & IBV_SRQ_MAX_WR) {
+		pthread_spin_lock(&srq->rq.lock);
+		/* Unmap the old queue so we can resize it. */
+		size = sizeof(struct ipath_rwq) +
+			(sizeof(struct ipath_rwqe) +
+			 (sizeof(struct ibv_sge) * srq->rq.max_sge)) *
+			srq->rq.size;
+		(void) munmap(srq->rq.rwq, size);
+	}
+	cmd.offset_addr = (__u64) &offset;
+	ret = ibv_cmd_modify_srq(ibsrq, attr, attr_mask,
+				 &cmd.ibv_cmd, sizeof cmd);
+	if (ret) {
+		if (attr_mask & IBV_SRQ_MAX_WR)
+			pthread_spin_unlock(&srq->rq.lock);
+		return ret;
+	}
+	if (attr_mask & IBV_SRQ_MAX_WR) {
+		srq->rq.size = attr->max_wr + 1;
+		size = sizeof(struct ipath_rwq) +
+			(sizeof(struct ipath_rwqe) +
+			 (sizeof(struct ibv_sge) * srq->rq.max_sge)) *
+			srq->rq.size;
+		srq->rq.rwq = mmap(NULL, size,
+				   PROT_READ | PROT_WRITE, MAP_SHARED,
+				   ibsrq->context->cmd_fd, offset);
+		pthread_spin_unlock(&srq->rq.lock);
+		/* XXX Now we have no receive queue. */
+		if ((void *) srq->rq.rwq == MAP_FAILED)
+			return errno;
+	}
+	return 0;
 }
 
-int ipath_destroy_srq(struct ibv_srq *srq)
+int ipath_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr)
 {
+	struct ibv_query_srq cmd;
+
+	return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
+}
+
+int ipath_destroy_srq(struct ibv_srq *ibsrq)
+{
+	struct ipath_srq *srq = to_isrq(ibsrq);
+	size_t size;
 	int ret;
 
-	ret = ibv_cmd_destroy_srq(srq);
+	ret = ibv_cmd_destroy_srq(ibsrq);
 	if (ret)
 		return ret;
 
+	size = sizeof(struct ipath_rwq) +
+		(sizeof(struct ipath_rwqe) +
+		 (sizeof(struct ibv_sge) * srq->rq.max_sge)) * srq->rq.size;
+	(void) munmap(srq->rq.rwq, size);
 	free(srq);
 	return 0;
 }
 
+int ipath_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
+			struct ibv_recv_wr **bad_wr)
+{
+	struct ipath_srq *srq = to_isrq(ibsrq);
+
+	return post_recv(&srq->rq, wr, bad_wr); 
+}
+
 struct ibv_ah *ipath_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
 {
 	struct ibv_ah *ah;
 
 	ah = malloc(sizeof *ah);
-	if(ah == NULL)
+	if (ah == NULL)
 		return NULL;
 
-	if(ibv_cmd_create_ah(pd, ah, attr)) {
+	if (ibv_cmd_create_ah(pd, ah, attr)) {
 		free(ah);
 		return NULL;
 	}
Index: src/userspace/libipathverbs/src/ipath-abi.h
===================================================================
--- src/userspace/libipathverbs/src/ipath-abi.h	(revision 0)
+++ src/userspace/libipathverbs/src/ipath-abi.h	(revision 0)
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2006. PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Patent licenses, if any, provided herein do not apply to
+ * combinations of this program with other software, or any other
+ * product whatsoever.
+ */
+
+#ifndef IPATH_ABI_H
+#define IPATH_ABI_H
+
+#include <infiniband/kern-abi.h>
+
+struct ipath_create_cq_resp {
+	struct ibv_create_cq_resp	ibv_resp;
+	__u64				offset;
+};
+
+struct ipath_resize_cq_resp {
+	struct ibv_resize_cq_resp	ibv_resp;
+	__u64				offset;
+};
+
+struct ipath_create_qp_resp {
+	struct ibv_create_qp_resp	ibv_resp;
+	__u64				offset;
+};
+
+struct ipath_modify_qp_cmd {
+	struct ibv_modify_qp		ibv_cmd;
+	__u64				offset_addr;
+};
+
+struct ipath_create_srq_resp {
+	struct ibv_create_srq_resp	ibv_resp;
+	__u64				offset;
+};
+
+struct ipath_modify_srq_cmd {
+	struct ibv_modify_srq		ibv_cmd;
+	__u64				offset_addr;
+};
+
+#endif /* IPATH_ABI_H */
Index: src/userspace/libipathverbs/src/ipathverbs.c
===================================================================
--- src/userspace/libipathverbs/src/ipathverbs.c	(revision 8021)
+++ src/userspace/libipathverbs/src/ipathverbs.c	(working copy)
@@ -86,22 +86,25 @@
 	.dereg_mr	= ipath_dereg_mr,
 
 	.create_cq	= ipath_create_cq,
-	.poll_cq	= ibv_cmd_poll_cq,
+	.poll_cq	= ipath_poll_cq,
 	.req_notify_cq	= ibv_cmd_req_notify_cq,
 	.cq_event	= NULL,
+	.resize_cq	= ipath_resize_cq,
 	.destroy_cq	= ipath_destroy_cq,
 
 	.create_srq	= ipath_create_srq,
 	.modify_srq	= ipath_modify_srq,
+	.query_srq	= ipath_query_srq,
 	.destroy_srq	= ipath_destroy_srq,
-	.post_srq_recv	= ibv_cmd_post_srq_recv,
+	.post_srq_recv	= ipath_post_srq_recv,
 
 	.create_qp	= ipath_create_qp,
+	.query_qp	= ipath_query_qp,
 	.modify_qp	= ipath_modify_qp,
 	.destroy_qp	= ipath_destroy_qp,
 
 	.post_send	= ibv_cmd_post_send,
-	.post_recv	= ibv_cmd_post_recv,
+	.post_recv	= ipath_post_recv,
 
 	.create_ah	= ipath_create_ah,
 	.destroy_ah	= ipath_destroy_ah,
Index: src/userspace/libipathverbs/src/ipathverbs.h
===================================================================
--- src/userspace/libipathverbs/src/ipathverbs.h	(revision 8021)
+++ src/userspace/libipathverbs/src/ipathverbs.h	(working copy)
@@ -39,6 +39,7 @@
 
 #include <endian.h>
 #include <byteswap.h>
+#include <pthread.h>
 
 #include <infiniband/driver.h>
 #include <infiniband/arch.h>
@@ -64,6 +65,81 @@
 	struct ibv_context	ibv_ctx;
 };
 
+/*
+ * This structure needs to have the same size and offsets as
+ * the kernel's ib_wc structure since it is memory mapped.
+ */
+struct ipath_wc {
+	uint64_t		wr_id;
+	enum ibv_wc_status	status;
+	enum ibv_wc_opcode	opcode;
+	uint32_t		vendor_err;
+	uint32_t		byte_len;
+	uint32_t		imm_data;	/* in network byte order */
+	uint32_t		qp_num;
+	uint32_t		src_qp;
+	enum ibv_wc_flags	wc_flags;
+	uint16_t		pkey_index;
+	uint16_t		slid;
+	uint8_t			sl;
+	uint8_t			dlid_path_bits;
+	uint8_t			port_num;
+};
+
+struct ipath_cq_wc {
+	uint32_t		head;
+	uint32_t		tail;
+	struct ipath_wc		queue[1];
+};
+
+struct ipath_cq {
+	struct ibv_cq		ibv_cq;
+	struct ipath_cq_wc	*queue;
+	pthread_spinlock_t	lock;
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->r_max_sge.
+ */
+struct ipath_rwqe {
+	uint64_t		wr_id;
+	uint8_t			num_sge;
+	struct ibv_sge		sg_list[0];
+};
+
+/*
+ * This struture is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct ipath_rwq {
+	uint32_t		head;	/* new requests posted to the head */
+	uint32_t		tail;	/* receives pull requests from here. */
+	struct ipath_rwqe	wq[0];
+};
+
+struct ipath_rq {
+	struct ipath_rwq       *rwq;
+	pthread_spinlock_t	lock;
+	uint32_t		size;
+	uint32_t		max_sge;
+};
+
+struct ipath_qp {
+	struct ibv_qp		ibv_qp;
+	struct ipath_rq		rq;
+};
+
+struct ipath_srq {
+	struct ibv_srq		ibv_srq;
+	struct ipath_rq		rq;
+};
+
 #define to_ixxx(xxx, type)						\
 	((struct ipath_##type *)					\
 	 ((void *) ib##xxx - offsetof(struct ipath_##type, ibv_##xxx)))
@@ -73,6 +149,34 @@
 	return to_ixxx(ctx, context);
 }
 
+static inline struct ipath_cq *to_icq(struct ibv_cq *ibcq)
+{
+	return to_ixxx(cq, cq);
+}
+
+static inline struct ipath_qp *to_iqp(struct ibv_qp *ibqp)
+{
+	return to_ixxx(qp, qp);
+}
+
+static inline struct ipath_srq *to_isrq(struct ibv_srq *ibsrq)
+{
+	return to_ixxx(srq, srq);
+}
+
+/*
+ * Since struct ipath_rwqe is not a fixed size, we can't simply index into
+ * struct ipath_rq.wq.  This function does the array index computation.
+ */
+static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
+					      unsigned n)
+{
+	return (struct ipath_rwqe *)
+		((char *) rq->rwq->wq +
+		 (sizeof(struct ipath_rwqe) +
+		  rq->max_sge * sizeof(struct ibv_sge)) * n);
+}
+
 extern int ipath_query_device(struct ibv_context *context,
 			      struct ibv_device_attr *attr);
 
@@ -92,11 +196,19 @@
 			       struct ibv_comp_channel *channel,
 			       int comp_vector);
 
+int ipath_resize_cq(struct ibv_cq *cq, int cqe);
+
 int ipath_destroy_cq(struct ibv_cq *cq);
 
+int ipath_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+
 struct ibv_qp *ipath_create_qp(struct ibv_pd *pd,
 			       struct ibv_qp_init_attr *attr);
 
+int ipath_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+		   enum ibv_qp_attr_mask attr_mask,
+		   struct ibv_qp_init_attr *init_attr);
+
 int ipath_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
 		    enum ibv_qp_attr_mask attr_mask);
 
@@ -115,8 +227,12 @@
 		     struct ibv_srq_attr *attr, 
 		     enum ibv_srq_attr_mask attr_mask);
 
+int ipath_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr);
+
 int ipath_destroy_srq(struct ibv_srq *srq);
 
+int ipath_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
+			struct ibv_recv_wr **bad_wr);
 
 struct ibv_ah *ipath_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);


-- 
Ralph Campbell <ralphc at pathscale.com>





More information about the general mailing list