[openib-general] [RFC] [PATCH 3/7] ibrdmaverbs hdr files
Krishna Kumar
krkumar2 at in.ibm.com
Mon Jul 10 03:12:34 PDT 2006
diff -ruNp ORG/librdmaverbs/include/rdma/arch.h NEW/librdmaverbs/include/rdma/arch.h
--- ORG/librdmaverbs/include/rdma/arch.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/arch.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: arch.h 6987 2006-05-08 15:18:51Z tom $
+ */
+
+#ifndef INFINIBAND_ARCH_H
+#define INFINIBAND_ARCH_H
+
+#include <endian.h>
+#include <byteswap.h>
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+static inline uint64_t htonll(uint64_t x) { return bswap_64(x); }
+static inline uint64_t ntohll(uint64_t x) { return bswap_64(x); }
+#elif __BYTE_ORDER == __BIG_ENDIAN
+static inline uint64_t htonll(uint64_t x) { return x; }
+static inline uint64_t ntohll(uint64_t x) { return x; }
+#else
+#error __BYTE_ORDER is neither __LITTLE_ENDIAN nor __BIG_ENDIAN
+#endif
+
+/*
+ * Architecture-specific defines. Currently, an architecture is
+ * required to implement the following operations:
+ *
+ * mb() - memory barrier. No loads or stores may be reordered across
+ * this macro by either the compiler or the CPU.
+ */
+
+#if defined(__i386__)
+
+#define mb() asm volatile("" ::: "memory")
+
+#elif defined(__x86_64__)
+
+#define mb() asm volatile("" ::: "memory")
+
+#elif defined(__PPC64__)
+
+#define mb() asm volatile("sync" ::: "memory")
+
+#elif defined(__ia64__)
+
+#define mb() asm volatile("mf" ::: "memory")
+
+#elif defined(__PPC__)
+
+#define mb() asm volatile("sync" ::: "memory")
+
+#elif defined(__sparc_v9__)
+
+#define mb() asm volatile("membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad" ::: "memory")
+
+#elif defined(__sparc__)
+
+#define mb() asm volatile("sync" ::: "memory")
+
+#else
+
+#warning No architecture specific defines found. Using generic implementation.
+
+#define mb() asm volatile("" ::: "memory")
+
+#endif
+
+#endif /* INFINIBAND_ARCH_H */
diff -ruNp ORG/librdmaverbs/include/rdma/driver.h NEW/librdmaverbs/include/rdma/driver.h
--- ORG/librdmaverbs/include/rdma/driver.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/driver.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: driver.h 7631 2006-06-02 19:53:25Z swise $
+ */
+
+#ifndef INFINIBAND_DRIVER_H
+#define INFINIBAND_DRIVER_H
+
+#include <rdma/verbs.h>
+#include <rdma/kern-abi.h>
+
+#ifdef __cplusplus
+# define BEGIN_C_DECLS extern "C" {
+# define END_C_DECLS }
+#else /* !__cplusplus */
+# define BEGIN_C_DECLS
+# define END_C_DECLS
+#endif /* __cplusplus */
+
+/*
+ * Device-specific drivers should declare their device init function
+ * as below (the name must be "openib_driver_init"):
+ *
+ * struct rdma_device *rdma_driver_init(const char *uverbs_sys_path,
+ * int abi_version);
+ *
+ * librdmaverbs will call each driver's rdma_driver_init() function once
+ * for each InfiniBand device. If the device is one that the driver
+ * can support, it should return a struct rdma_device * with the ops
+ * member filled in. If the driver does not support the device, it
+ * should return NULL from openib_driver_init().
+ */
+
+typedef struct rdma_device *(*rdma_driver_init_func)(const char *, int);
+
+int rdma_cmd_get_context(struct rdma_context *context, struct rdma_get_context *cmd,
+ size_t cmd_size, struct rdma_get_context_resp *resp,
+ size_t resp_size);
+int rdma_cmd_query_device(struct rdma_context *context,
+ struct rdma_device_attr *device_attr,
+ uint64_t *raw_fw_ver,
+ struct rdma_query_device *cmd, size_t cmd_size);
+int rdma_cmd_query_port(struct rdma_context *context, uint8_t port_num,
+ struct rdma_port_attr *port_attr,
+ struct rdma_query_port *cmd, size_t cmd_size);
+int rdma_cmd_query_gid(struct rdma_context *context, uint8_t port_num,
+ int index, union rdma_gid *gid);
+int rdma_cmd_query_pkey(struct rdma_context *context, uint8_t port_num,
+ int index, uint16_t *pkey);
+int rdma_cmd_alloc_pd(struct rdma_context *context, struct rdma_pd *pd,
+ struct rdma_alloc_pd *cmd, size_t cmd_size,
+ struct rdma_alloc_pd_resp *resp, size_t resp_size);
+int rdma_cmd_dealloc_pd(struct rdma_pd *pd);
+int rdma_cmd_reg_mr(struct rdma_pd *pd, void *addr, size_t length,
+ uint64_t hca_va, enum rdma_access_flags access,
+ struct rdma_mr *mr, struct rdma_reg_mr *cmd,
+ size_t cmd_size);
+int rdma_cmd_dereg_mr(struct rdma_mr *mr);
+int rdma_cmd_create_cq(struct rdma_context *context, int cqe,
+ struct rdma_comp_channel *channel,
+ int comp_vector, struct rdma_cq *cq,
+ struct rdma_create_cq *cmd, size_t cmd_size,
+ struct rdma_create_cq_resp *resp, size_t resp_size);
+int rdma_cmd_poll_cq(struct rdma_cq *cq, int ne, struct rdma_wc *wc);
+int rdma_cmd_req_notify_cq(struct rdma_cq *cq, int solicited_only);
+int rdma_cmd_resize_cq(struct rdma_cq *cq, int cqe,
+ struct rdma_resize_cq *cmd, size_t cmd_size);
+int rdma_cmd_destroy_cq(struct rdma_cq *cq);
+
+int rdma_cmd_create_srq(struct rdma_pd *pd,
+ struct rdma_srq *srq, struct rdma_srq_init_attr *attr,
+ struct rdma_create_srq *cmd, size_t cmd_size,
+ struct rdma_create_srq_resp *resp, size_t resp_size);
+int rdma_cmd_modify_srq(struct rdma_srq *srq,
+ struct rdma_srq_attr *srq_attr,
+ enum rdma_srq_attr_mask srq_attr_mask,
+ struct rdma_modify_srq *cmd, size_t cmd_size);
+int rdma_cmd_query_srq(struct rdma_srq *srq,
+ struct rdma_srq_attr *srq_attr,
+ struct rdma_query_srq *cmd, size_t cmd_size);
+int rdma_cmd_destroy_srq(struct rdma_srq *srq);
+
+int rdma_cmd_create_qp(struct rdma_pd *pd,
+ struct rdma_qp *qp, struct rdma_qp_init_attr *attr,
+ struct rdma_create_qp *cmd, size_t cmd_size,
+ struct rdma_create_qp_resp *resp, size_t resp_size);
+int rdma_cmd_query_qp(struct rdma_qp *qp, struct rdma_qp_attr *qp_attr,
+ enum rdma_qp_attr_mask attr_mask,
+ struct rdma_qp_init_attr *qp_init_attr,
+ struct rdma_query_qp *cmd, size_t cmd_size);
+int rdma_cmd_modify_qp(struct rdma_qp *qp, struct rdma_qp_attr *attr,
+ enum rdma_qp_attr_mask attr_mask,
+ struct rdma_modify_qp *cmd, size_t cmd_size);
+int rdma_cmd_destroy_qp(struct rdma_qp *qp);
+int rdma_cmd_post_send(struct rdma_qp *ibqp, struct rdma_send_wr *wr,
+ struct rdma_send_wr **bad_wr);
+int rdma_cmd_post_recv(struct rdma_qp *ibqp, struct rdma_recv_wr *wr,
+ struct rdma_recv_wr **bad_wr);
+int rdma_cmd_post_srq_recv(struct rdma_srq *srq, struct rdma_recv_wr *wr,
+ struct rdma_recv_wr **bad_wr);
+int rdma_cmd_create_ah(struct rdma_pd *pd, struct rdma_ah *ah,
+ struct rdma_ah_attr *attr);
+int rdma_cmd_destroy_ah(struct rdma_ah *ah);
+int rdma_cmd_attach_mcast(struct rdma_qp *qp, union rdma_gid *gid, uint16_t lid);
+int rdma_cmd_detach_mcast(struct rdma_qp *qp, union rdma_gid *gid, uint16_t lid);
+
+/*
+ * sysfs helper functions
+ */
+const char *rdma_get_sysfs_path(void);
+
+int rdma_read_sysfs_file(const char *dir, const char *file,
+ char *buf, size_t size);
+
+#endif /* INFINIBAND_DRIVER_H */
diff -ruNp ORG/librdmaverbs/include/rdma/kern-abi.h NEW/librdmaverbs/include/rdma/kern-abi.h
--- ORG/librdmaverbs/include/rdma/kern-abi.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/kern-abi.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,881 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: kern-abi.h 5640 2006-03-06 21:36:57Z tom $
+ */
+
+#ifndef KERN_ABI_H
+#define KERN_ABI_H
+
+#include <linux/types.h>
+
+/*
+ * This file must be kept in sync with the kernel's version of
+ * drivers/infiniband/include/ib_user_verbs.h
+ */
+
+/*
+ * The minimum and maximum kernel ABI that we can handle.
+ */
+#define RDMA_USER_VERBS_MIN_ABI_VERSION 1
+#define RDMA_USER_VERBS_MAX_ABI_VERSION 6
+
+enum {
+ RDMA_USER_VERBS_CMD_GET_CONTEXT,
+ RDMA_USER_VERBS_CMD_QUERY_DEVICE,
+ RDMA_USER_VERBS_CMD_QUERY_PORT,
+ RDMA_USER_VERBS_CMD_ALLOC_PD,
+ RDMA_USER_VERBS_CMD_DEALLOC_PD,
+ RDMA_USER_VERBS_CMD_CREATE_AH,
+ RDMA_USER_VERBS_CMD_MODIFY_AH,
+ RDMA_USER_VERBS_CMD_QUERY_AH,
+ RDMA_USER_VERBS_CMD_DESTROY_AH,
+ RDMA_USER_VERBS_CMD_REG_MR,
+ RDMA_USER_VERBS_CMD_REG_SMR,
+ RDMA_USER_VERBS_CMD_REREG_MR,
+ RDMA_USER_VERBS_CMD_QUERY_MR,
+ RDMA_USER_VERBS_CMD_DEREG_MR,
+ RDMA_USER_VERBS_CMD_ALLOC_MW,
+ RDMA_USER_VERBS_CMD_BIND_MW,
+ RDMA_USER_VERBS_CMD_DEALLOC_MW,
+ RDMA_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
+ RDMA_USER_VERBS_CMD_CREATE_CQ,
+ RDMA_USER_VERBS_CMD_RESIZE_CQ,
+ RDMA_USER_VERBS_CMD_DESTROY_CQ,
+ RDMA_USER_VERBS_CMD_POLL_CQ,
+ RDMA_USER_VERBS_CMD_PEEK_CQ,
+ RDMA_USER_VERBS_CMD_REQ_NOTIFY_CQ,
+ RDMA_USER_VERBS_CMD_CREATE_QP,
+ RDMA_USER_VERBS_CMD_QUERY_QP,
+ RDMA_USER_VERBS_CMD_MODIFY_QP,
+ RDMA_USER_VERBS_CMD_DESTROY_QP,
+ RDMA_USER_VERBS_CMD_POST_SEND,
+ RDMA_USER_VERBS_CMD_POST_RECV,
+ RDMA_USER_VERBS_CMD_ATTACH_MCAST,
+ RDMA_USER_VERBS_CMD_DETACH_MCAST,
+ RDMA_USER_VERBS_CMD_CREATE_SRQ,
+ RDMA_USER_VERBS_CMD_MODIFY_SRQ,
+ RDMA_USER_VERBS_CMD_QUERY_SRQ,
+ RDMA_USER_VERBS_CMD_DESTROY_SRQ,
+ RDMA_USER_VERBS_CMD_POST_SRQ_RECV
+};
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * Specifically:
+ * - Do not use pointer types -- pass pointers in __u64 instead.
+ * - Make sure that any structure larger than 4 bytes is padded to a
+ * multiple of 8 bytes. Otherwise the structure size will be
+ * different between 32-bit and 64-bit architectures.
+ */
+
+struct rdma_kern_async_event {
+ __u64 element;
+ __u32 event_type;
+ __u32 reserved;
+};
+
+struct rdma_comp_event {
+ __u64 cq_handle;
+};
+
+/*
+ * All commands from userspace should start with a __u32 command field
+ * followed by __u16 in_words and out_words fields (which give the
+ * length of the command block and response buffer if any in 32-bit
+ * words). The kernel driver will read these fields first and read
+ * the rest of the command struct based on these value.
+ */
+
+struct rdma_query_params {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+};
+
+struct rdma_query_params_resp {
+ __u32 num_cq_events;
+};
+
+struct rdma_get_context {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 driver_data[0];
+};
+
+struct rdma_get_context_resp {
+ __u32 async_fd;
+ __u32 num_comp_vectors;
+};
+
+struct rdma_query_device {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 driver_data[0];
+};
+
+struct rdma_query_device_resp {
+ __u64 fw_ver;
+ __u64 node_guid;
+ __u64 sys_image_guid;
+ __u64 max_mr_size;
+ __u64 page_size_cap;
+ __u32 vendor_id;
+ __u32 vendor_part_id;
+ __u32 hw_ver;
+ __u32 max_qp;
+ __u32 max_qp_wr;
+ __u32 device_cap_flags;
+ __u32 max_sge;
+ __u32 max_sge_rd;
+ __u32 max_cq;
+ __u32 max_cqe;
+ __u32 max_mr;
+ __u32 max_pd;
+ __u32 max_qp_rd_atom;
+ __u32 max_ee_rd_atom;
+ __u32 max_res_rd_atom;
+ __u32 max_qp_init_rd_atom;
+ __u32 max_ee_init_rd_atom;
+ __u32 atomic_cap;
+ __u32 max_ee;
+ __u32 max_rdd;
+ __u32 max_mw;
+ __u32 max_raw_ipv6_qp;
+ __u32 max_raw_ethy_qp;
+ __u32 max_mcast_grp;
+ __u32 max_mcast_qp_attach;
+ __u32 max_total_mcast_qp_attach;
+ __u32 max_ah;
+ __u32 max_fmr;
+ __u32 max_map_per_fmr;
+ __u32 max_srq;
+ __u32 max_srq_wr;
+ __u32 max_srq_sge;
+ __u16 max_pkeys;
+ __u8 local_ca_ack_delay;
+ __u8 phys_port_cnt;
+ __u8 reserved[4];
+};
+
+struct rdma_query_port {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u8 port_num;
+ __u8 reserved[7];
+ __u64 driver_data[0];
+};
+
+struct rdma_query_port_resp {
+ __u32 port_cap_flags;
+ __u32 max_msg_sz;
+ __u32 bad_pkey_cntr;
+ __u32 qkey_viol_cntr;
+ __u32 gid_tbl_len;
+ __u16 pkey_tbl_len;
+ __u16 lid;
+ __u16 sm_lid;
+ __u8 state;
+ __u8 max_mtu;
+ __u8 active_mtu;
+ __u8 lmc;
+ __u8 max_vl_num;
+ __u8 sm_sl;
+ __u8 subnet_timeout;
+ __u8 init_type_reply;
+ __u8 active_width;
+ __u8 active_speed;
+ __u8 phys_state;
+ __u8 reserved[3];
+};
+
+struct rdma_alloc_pd {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 driver_data[0];
+};
+
+struct rdma_alloc_pd_resp {
+ __u32 pd_handle;
+};
+
+struct rdma_dealloc_pd {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 pd_handle;
+};
+
+struct rdma_reg_mr {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 start;
+ __u64 length;
+ __u64 hca_va;
+ __u32 pd_handle;
+ __u32 access_flags;
+ __u64 driver_data[0];
+};
+
+struct rdma_reg_mr_resp {
+ __u32 mr_handle;
+ __u32 lkey;
+ __u32 rkey;
+};
+
+struct rdma_dereg_mr {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 mr_handle;
+};
+
+struct rdma_create_comp_channel {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+};
+
+struct rdma_create_comp_channel_resp {
+ __u32 fd;
+};
+
+struct rdma_create_cq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 user_handle;
+ __u32 cqe;
+ __u32 comp_vector;
+ __s32 comp_channel;
+ __u32 reserved;
+ __u64 driver_data[0];
+};
+
+struct rdma_create_cq_resp {
+ __u32 cq_handle;
+ __u32 cqe;
+};
+
+struct rdma_kern_wc {
+ __u64 wr_id;
+ __u32 status;
+ __u32 opcode;
+ __u32 vendor_err;
+ __u32 byte_len;
+ __u32 imm_data;
+ __u32 qp_num;
+ __u32 src_qp;
+ __u32 wc_flags;
+ __u16 pkey_index;
+ __u16 slid;
+ __u8 sl;
+ __u8 dlid_path_bits;
+ __u8 port_num;
+ __u8 reserved;
+};
+
+struct rdma_poll_cq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 cq_handle;
+ __u32 ne;
+};
+
+struct rdma_poll_cq_resp {
+ __u32 count;
+ __u32 reserved;
+ struct rdma_kern_wc wc[0];
+};
+
+struct rdma_req_notify_cq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 cq_handle;
+ __u32 solicited;
+};
+
+struct rdma_resize_cq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 cq_handle;
+ __u32 cqe;
+ __u64 driver_data[0];
+};
+
+struct rdma_resize_cq_resp {
+ __u32 cqe;
+};
+
+struct rdma_destroy_cq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 cq_handle;
+ __u32 reserved;
+};
+
+struct rdma_destroy_cq_resp {
+ __u32 comp_events_reported;
+ __u32 async_events_reported;
+};
+
+struct rdma_kern_global_route {
+ __u8 dgid[16];
+ __u32 flow_label;
+ __u8 sgid_index;
+ __u8 hop_limit;
+ __u8 traffic_class;
+ __u8 reserved;
+};
+
+struct rdma_kern_ah_attr {
+ struct rdma_kern_global_route grh;
+ __u16 dlid;
+ __u8 sl;
+ __u8 src_path_bits;
+ __u8 static_rate;
+ __u8 is_global;
+ __u8 port_num;
+ __u8 reserved;
+};
+
+struct rdma_kern_qp_attr {
+ __u32 qp_attr_mask;
+ __u32 qp_state;
+ __u32 cur_qp_state;
+ __u32 path_mtu;
+ __u32 path_mig_state;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+
+ struct rdma_kern_ah_attr ah_attr;
+ struct rdma_kern_ah_attr alt_ah_attr;
+
+ /* ib_qp_cap */
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 en_sqd_async_notify;
+ __u8 sq_draining;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 reserved[5];
+};
+
+struct rdma_create_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 user_handle;
+ __u32 pd_handle;
+ __u32 send_cq_handle;
+ __u32 recv_cq_handle;
+ __u32 srq_handle;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u8 sq_sig_all;
+ __u8 qp_type;
+ __u8 is_srq;
+ __u8 reserved;
+ __u64 driver_data[0];
+};
+
+struct rdma_create_qp_resp {
+ __u32 qp_handle;
+ __u32 qpn;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u32 reserved;
+};
+
+struct rdma_qp_dest {
+ __u8 dgid[16];
+ __u32 flow_label;
+ __u16 dlid;
+ __u16 reserved;
+ __u8 sgid_index;
+ __u8 hop_limit;
+ __u8 traffic_class;
+ __u8 sl;
+ __u8 src_path_bits;
+ __u8 static_rate;
+ __u8 is_global;
+ __u8 port_num;
+};
+
+struct rdma_query_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 qp_handle;
+ __u32 attr_mask;
+ __u64 driver_data[0];
+};
+
+struct rdma_query_qp_resp {
+ struct rdma_qp_dest dest;
+ struct rdma_qp_dest alt_dest;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 qp_state;
+ __u8 cur_qp_state;
+ __u8 path_mtu;
+ __u8 path_mig_state;
+ __u8 en_sqd_async_notify;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 sq_sig_all;
+ __u8 reserved[5];
+ __u64 driver_data[0];
+};
+
+struct rdma_modify_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ struct rdma_qp_dest dest;
+ struct rdma_qp_dest alt_dest;
+ __u32 qp_handle;
+ __u32 attr_mask;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 qp_state;
+ __u8 cur_qp_state;
+ __u8 path_mtu;
+ __u8 path_mig_state;
+ __u8 en_sqd_async_notify;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 reserved[2];
+ __u64 driver_data[0];
+};
+
+struct rdma_destroy_qp {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 qp_handle;
+ __u32 reserved;
+};
+
+struct rdma_destroy_qp_resp {
+ __u32 events_reported;
+};
+
+struct rdma_kern_send_wr {
+ __u64 wr_id;
+ __u32 num_sge;
+ __u32 opcode;
+ __u32 send_flags;
+ __u32 imm_data;
+ union {
+ struct {
+ __u64 remote_addr;
+ __u32 rkey;
+ __u32 reserved;
+ } rdma;
+ struct {
+ __u64 remote_addr;
+ __u64 compare_add;
+ __u64 swap;
+ __u32 rkey;
+ __u32 reserved;
+ } atomic;
+ struct {
+ __u32 ah;
+ __u32 remote_qpn;
+ __u32 remote_qkey;
+ __u32 reserved;
+ } ud;
+ } wr;
+};
+
+struct rdma_post_send {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 qp_handle;
+ __u32 wr_count;
+ __u32 sge_count;
+ __u32 wqe_size;
+ struct rdma_kern_send_wr send_wr[0];
+};
+
+struct rdma_post_send_resp {
+ __u32 bad_wr;
+};
+
+struct rdma_kern_recv_wr {
+ __u64 wr_id;
+ __u32 num_sge;
+ __u32 reserved;
+};
+
+struct rdma_post_recv {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 qp_handle;
+ __u32 wr_count;
+ __u32 sge_count;
+ __u32 wqe_size;
+ struct rdma_kern_recv_wr recv_wr[0];
+};
+
+struct rdma_post_recv_resp {
+ __u32 bad_wr;
+};
+
+struct rdma_post_srq_recv {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 srq_handle;
+ __u32 wr_count;
+ __u32 sge_count;
+ __u32 wqe_size;
+ struct rdma_kern_recv_wr recv_wr[0];
+};
+
+struct rdma_post_srq_recv_resp {
+ __u32 bad_wr;
+};
+
+struct rdma_create_ah {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 user_handle;
+ __u32 pd_handle;
+ __u32 reserved;
+ struct rdma_kern_ah_attr attr;
+};
+
+struct rdma_create_ah_resp {
+ __u32 handle;
+};
+
+struct rdma_destroy_ah {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 ah_handle;
+};
+
+struct rdma_attach_mcast {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u8 gid[16];
+ __u32 qp_handle;
+ __u16 mlid;
+ __u16 reserved;
+ __u64 driver_data[0];
+};
+
+struct rdma_detach_mcast {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u8 gid[16];
+ __u32 qp_handle;
+ __u16 mlid;
+ __u16 reserved;
+ __u64 driver_data[0];
+};
+
+struct rdma_create_srq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 user_handle;
+ __u32 pd_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u64 driver_data[0];
+};
+
+struct rdma_create_srq_resp {
+ __u32 srq_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 reserved;
+};
+
+struct rdma_modify_srq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 srq_handle;
+ __u32 attr_mask;
+ __u32 max_wr;
+ __u32 srq_limit;
+ __u64 driver_data[0];
+};
+
+struct rdma_query_srq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 srq_handle;
+ __u32 reserved;
+ __u64 driver_data[0];
+};
+
+struct rdma_query_srq_resp {
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u32 reserved;
+};
+
+struct rdma_destroy_srq {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u32 srq_handle;
+ __u32 reserved;
+};
+
+struct rdma_destroy_srq_resp {
+ __u32 events_reported;
+};
+
+/*
+ * Compatibility with older ABI versions
+ */
+
+enum {
+ RDMA_USER_VERBS_CMD_QUERY_PARAMS_V2,
+ RDMA_USER_VERBS_CMD_GET_CONTEXT_V2,
+ RDMA_USER_VERBS_CMD_QUERY_DEVICE_V2,
+ RDMA_USER_VERBS_CMD_QUERY_PORT_V2,
+ RDMA_USER_VERBS_CMD_QUERY_GID_V2,
+ RDMA_USER_VERBS_CMD_QUERY_PKEY_V2,
+ RDMA_USER_VERBS_CMD_ALLOC_PD_V2,
+ RDMA_USER_VERBS_CMD_DEALLOC_PD_V2,
+ RDMA_USER_VERBS_CMD_CREATE_AH_V2,
+ RDMA_USER_VERBS_CMD_MODIFY_AH_V2,
+ RDMA_USER_VERBS_CMD_QUERY_AH_V2,
+ RDMA_USER_VERBS_CMD_DESTROY_AH_V2,
+ RDMA_USER_VERBS_CMD_REG_MR_V2,
+ RDMA_USER_VERBS_CMD_REG_SMR_V2,
+ RDMA_USER_VERBS_CMD_REREG_MR_V2,
+ RDMA_USER_VERBS_CMD_QUERY_MR_V2,
+ RDMA_USER_VERBS_CMD_DEREG_MR_V2,
+ RDMA_USER_VERBS_CMD_ALLOC_MW_V2,
+ RDMA_USER_VERBS_CMD_BIND_MW_V2,
+ RDMA_USER_VERBS_CMD_DEALLOC_MW_V2,
+ RDMA_USER_VERBS_CMD_CREATE_CQ_V2,
+ RDMA_USER_VERBS_CMD_RESIZE_CQ_V2,
+ RDMA_USER_VERBS_CMD_DESTROY_CQ_V2,
+ RDMA_USER_VERBS_CMD_POLL_CQ_V2,
+ RDMA_USER_VERBS_CMD_PEEK_CQ_V2,
+ RDMA_USER_VERBS_CMD_REQ_NOTIFY_CQ_V2,
+ RDMA_USER_VERBS_CMD_CREATE_QP_V2,
+ RDMA_USER_VERBS_CMD_QUERY_QP_V2,
+ RDMA_USER_VERBS_CMD_MODIFY_QP_V2,
+ RDMA_USER_VERBS_CMD_DESTROY_QP_V2,
+ RDMA_USER_VERBS_CMD_POST_SEND_V2,
+ RDMA_USER_VERBS_CMD_POST_RECV_V2,
+ RDMA_USER_VERBS_CMD_ATTACH_MCAST_V2,
+ RDMA_USER_VERBS_CMD_DETACH_MCAST_V2,
+ RDMA_USER_VERBS_CMD_CREATE_SRQ_V2,
+ RDMA_USER_VERBS_CMD_MODIFY_SRQ_V2,
+ RDMA_USER_VERBS_CMD_QUERY_SRQ_V2,
+ RDMA_USER_VERBS_CMD_DESTROY_SRQ_V2,
+ RDMA_USER_VERBS_CMD_POST_SRQ_RECV_V2,
+ /*
+ * Set commands that didn't exist to -1 so our compile-time
+ * trick opcodes in RDMA_INIT_CMD() doesn't break.
+ */
+ RDMA_USER_VERBS_CMD_CREATE_COMP_CHANNEL_V2 = -1,
+};
+
+struct rdma_destroy_cq_v1 {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 cq_handle;
+};
+
+struct rdma_destroy_qp_v1 {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 qp_handle;
+};
+
+struct rdma_destroy_srq_v1 {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 srq_handle;
+};
+
+struct rdma_get_context_v2 {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 cq_fd_tab;
+ __u64 driver_data[0];
+};
+
+struct rdma_create_cq_v2 {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u64 response;
+ __u64 user_handle;
+ __u32 cqe;
+ __u32 event_handler;
+ __u64 driver_data[0];
+};
+
+struct rdma_modify_srq_v3 {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u32 srq_handle;
+ __u32 attr_mask;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u32 reserved;
+ __u64 driver_data[0];
+};
+
+struct rdma_create_qp_resp_v3 {
+ __u32 qp_handle;
+ __u32 qpn;
+};
+
+struct rdma_create_qp_resp_v4 {
+ __u32 qp_handle;
+ __u32 qpn;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+};
+
+struct rdma_create_srq_resp_v5 {
+ __u32 srq_handle;
+};
+
+#endif /* KERN_ABI_H */
diff -ruNp ORG/librdmaverbs/include/rdma/marshall.h NEW/librdmaverbs/include/rdma/marshall.h
--- ORG/librdmaverbs/include/rdma/marshall.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/marshall.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2005 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INFINIBAND_MARSHALL_H
+#define INFINIBAND_MARSHALL_H
+
+#include <rdma/verbs.h>
+#include <rdma/sa.h>
+#include <rdma/kern-abi.h>
+#include <rdma/sa-kern-abi.h>
+
+#ifdef __cplusplus
+# define BEGIN_C_DECLS extern "C" {
+# define END_C_DECLS }
+#else /* !__cplusplus */
+# define BEGIN_C_DECLS
+# define END_C_DECLS
+#endif /* __cplusplus */
+
+BEGIN_C_DECLS
+
+void rdma_copy_qp_attr_from_kern(struct rdma_qp_attr *dst,
+ struct rdma_kern_qp_attr *src);
+
+void rdma_copy_path_rec_from_kern(struct rdma_sa_path_rec *dst,
+ struct rdma_kern_path_rec *src);
+
+void rdma_copy_path_rec_to_kern(struct rdma_kern_path_rec *dst,
+ struct rdma_sa_path_rec *src);
+
+END_C_DECLS
+
+#endif /* INFINIBAND_MARSHALL_H */
diff -ruNp ORG/librdmaverbs/include/rdma/opcode.h NEW/librdmaverbs/include/rdma/opcode.h
--- ORG/librdmaverbs/include/rdma/opcode.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/opcode.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: opcode.h 1989 2005-03-14 20:25:13Z roland $
+ */
+
+#ifndef INFINIBAND_OPCODE_H
+#define INFINIBAND_OPCODE_H
+
+/*
+ * This macro cleans up the definitions of constants for BTH opcodes.
+ * It is used to define constants such as RDMA_OPCODE_UD_SEND_ONLY,
+ * which becomes RDMA_OPCODE_UD + RDMA_OPCODE_SEND_ONLY, and this gives
+ * the correct value.
+ *
+ * In short, user code should use the constants defined using the
+ * macro rather than worrying about adding together other constants.
+*/
+#define RDMA_OPCODE(transport, op) \
+ RDMA_OPCODE_ ## transport ## _ ## op = \
+ RDMA_OPCODE_ ## transport + RDMA_OPCODE_ ## op
+
+enum {
+ /* transport types -- just used to define real constants */
+ RDMA_OPCODE_RC = 0x00,
+ RDMA_OPCODE_UC = 0x20,
+ RDMA_OPCODE_RD = 0x40,
+ RDMA_OPCODE_UD = 0x60,
+
+ /* operations -- just used to define real constants */
+ RDMA_OPCODE_SEND_FIRST = 0x00,
+ RDMA_OPCODE_SEND_MIDDLE = 0x01,
+ RDMA_OPCODE_SEND_LAST = 0x02,
+ RDMA_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03,
+ RDMA_OPCODE_SEND_ONLY = 0x04,
+ RDMA_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05,
+ RDMA_OPCODE_RDMA_WRITE_FIRST = 0x06,
+ RDMA_OPCODE_RDMA_WRITE_MIDDLE = 0x07,
+ RDMA_OPCODE_RDMA_WRITE_LAST = 0x08,
+ RDMA_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09,
+ RDMA_OPCODE_RDMA_WRITE_ONLY = 0x0a,
+ RDMA_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b,
+ RDMA_OPCODE_RDMA_READ_REQUEST = 0x0c,
+ RDMA_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d,
+ RDMA_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e,
+ RDMA_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f,
+ RDMA_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10,
+ RDMA_OPCODE_ACKNOWLEDGE = 0x11,
+ RDMA_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
+ RDMA_OPCODE_COMPARE_SWAP = 0x13,
+ RDMA_OPCODE_FETCH_ADD = 0x14,
+
+ /* real constants follow -- see comment about above RDMA_OPCODE()
+ macro for more details */
+
+ /* RC */
+ RDMA_OPCODE(RC, SEND_FIRST),
+ RDMA_OPCODE(RC, SEND_MIDDLE),
+ RDMA_OPCODE(RC, SEND_LAST),
+ RDMA_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
+ RDMA_OPCODE(RC, SEND_ONLY),
+ RDMA_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
+ RDMA_OPCODE(RC, RDMA_WRITE_FIRST),
+ RDMA_OPCODE(RC, RDMA_WRITE_MIDDLE),
+ RDMA_OPCODE(RC, RDMA_WRITE_LAST),
+ RDMA_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+ RDMA_OPCODE(RC, RDMA_WRITE_ONLY),
+ RDMA_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+ RDMA_OPCODE(RC, RDMA_READ_REQUEST),
+ RDMA_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
+ RDMA_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
+ RDMA_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
+ RDMA_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
+ RDMA_OPCODE(RC, ACKNOWLEDGE),
+ RDMA_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
+ RDMA_OPCODE(RC, COMPARE_SWAP),
+ RDMA_OPCODE(RC, FETCH_ADD),
+
+ /* UC */
+ RDMA_OPCODE(UC, SEND_FIRST),
+ RDMA_OPCODE(UC, SEND_MIDDLE),
+ RDMA_OPCODE(UC, SEND_LAST),
+ RDMA_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
+ RDMA_OPCODE(UC, SEND_ONLY),
+ RDMA_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
+ RDMA_OPCODE(UC, RDMA_WRITE_FIRST),
+ RDMA_OPCODE(UC, RDMA_WRITE_MIDDLE),
+ RDMA_OPCODE(UC, RDMA_WRITE_LAST),
+ RDMA_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+ RDMA_OPCODE(UC, RDMA_WRITE_ONLY),
+ RDMA_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+
+ /* RD */
+ RDMA_OPCODE(RD, SEND_FIRST),
+ RDMA_OPCODE(RD, SEND_MIDDLE),
+ RDMA_OPCODE(RD, SEND_LAST),
+ RDMA_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
+ RDMA_OPCODE(RD, SEND_ONLY),
+ RDMA_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
+ RDMA_OPCODE(RD, RDMA_WRITE_FIRST),
+ RDMA_OPCODE(RD, RDMA_WRITE_MIDDLE),
+ RDMA_OPCODE(RD, RDMA_WRITE_LAST),
+ RDMA_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+ RDMA_OPCODE(RD, RDMA_WRITE_ONLY),
+ RDMA_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+ RDMA_OPCODE(RD, RDMA_READ_REQUEST),
+ RDMA_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
+ RDMA_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
+ RDMA_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
+ RDMA_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
+ RDMA_OPCODE(RD, ACKNOWLEDGE),
+ RDMA_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
+ RDMA_OPCODE(RD, COMPARE_SWAP),
+ RDMA_OPCODE(RD, FETCH_ADD),
+
+ /* UD */
+ RDMA_OPCODE(UD, SEND_ONLY),
+ RDMA_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
+};
+
+#endif /* INFINIBAND_OPCODE_H */
diff -ruNp ORG/librdmaverbs/include/rdma/sa-kern-abi.h NEW/librdmaverbs/include/rdma/sa-kern-abi.h
--- ORG/librdmaverbs/include/rdma/sa-kern-abi.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/sa-kern-abi.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2005 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INFINIBAND_SA_KERN_ABI_H
+#define INFINIBAND_SA_KERN_ABI_H
+
+#include <linux/types.h>
+
+/*
+ * Obsolete, deprecated names. Will be removed in libibverbs 1.1.
+ */
+#define ib_kern_path_rec rdma_kern_path_rec
+
+struct rdma_kern_path_rec {
+ __u8 dgid[16];
+ __u8 sgid[16];
+ __u16 dlid;
+ __u16 slid;
+ __u32 raw_traffic;
+ __u32 flow_label;
+ __u32 reversible;
+ __u32 mtu;
+ __u16 pkey;
+ __u8 hop_limit;
+ __u8 traffic_class;
+ __u8 numb_path;
+ __u8 sl;
+ __u8 mtu_selector;
+ __u8 rate_selector;
+ __u8 rate;
+ __u8 packet_life_time_selector;
+ __u8 packet_life_time;
+ __u8 preference;
+};
+
+#endif /* INFINIBAND_SA_KERN_ABI_H */
diff -ruNp ORG/librdmaverbs/include/rdma/sa.h NEW/librdmaverbs/include/rdma/sa.h
--- ORG/librdmaverbs/include/rdma/sa.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/sa.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: sa.h 2616 2005-06-15 15:22:39Z halr $
+ */
+
+#ifndef INFINIBAND_SA_H
+#define INFINIBAND_SA_H
+
+#include <rdma/verbs.h>
+
+struct rdma_sa_path_rec {
+ /* reserved */
+ /* reserved */
+ union rdma_gid dgid;
+ union rdma_gid sgid;
+ uint16_t dlid;
+ uint16_t slid;
+ int raw_traffic;
+ /* reserved */
+ uint32_t flow_label;
+ uint8_t hop_limit;
+ uint8_t traffic_class;
+ int reversible;
+ uint8_t numb_path;
+ uint16_t pkey;
+ /* reserved */
+ uint8_t sl;
+ uint8_t mtu_selector;
+ uint8_t mtu;
+ uint8_t rate_selector;
+ uint8_t rate;
+ uint8_t packet_life_time_selector;
+ uint8_t packet_life_time;
+ uint8_t preference;
+};
+
+struct rdma_sa_mcmember_rec {
+ union rdma_gid mgid;
+ union rdma_gid port_gid;
+ uint32_t qkey;
+ uint16_t mlid;
+ uint8_t mtu_selector;
+ uint8_t mtu;
+ uint8_t traffic_class;
+ uint16_t pkey;
+ uint8_t rate_selector;
+ uint8_t rate;
+ uint8_t packet_life_time_selector;
+ uint8_t packet_life_time;
+ uint8_t sl;
+ uint32_t flow_label;
+ uint8_t hop_limit;
+ uint8_t scope;
+ uint8_t join_state;
+ int proxy_join;
+};
+
+struct rdma_sa_service_rec {
+ uint64_t id;
+ union rdma_gid gid;
+ uint16_t pkey;
+ /* uint16_t resv; */
+ uint32_t lease;
+ uint8_t key[16];
+ uint8_t name[64];
+ uint8_t data8[16];
+ uint16_t data16[8];
+ uint32_t data32[4];
+ uint64_t data64[2];
+};
+
+#endif /* INFINIBAND_SA_H */
diff -ruNp ORG/librdmaverbs/include/rdma/verbs.h NEW/librdmaverbs/include/rdma/verbs.h
--- ORG/librdmaverbs/include/rdma/verbs.h 1969-12-31 16:00:00.000000000 -0800
+++ NEW/librdmaverbs/include/rdma/verbs.h 2006-07-10 18:07:46.000000000 -0700
@@ -0,0 +1,1026 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2004 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: verbs.h 7631 2006-06-02 19:53:25Z swise $
+ */
+
+#ifndef INFINIBAND_VERBS_H
+#define INFINIBAND_VERBS_H
+
+#include <stdint.h>
+#include <pthread.h>
+
+#ifdef __cplusplus
+# define BEGIN_C_DECLS extern "C" {
+# define END_C_DECLS }
+#else /* !__cplusplus */
+# define BEGIN_C_DECLS
+# define END_C_DECLS
+#endif /* __cplusplus */
+
+#if __GNUC__ >= 3
+# define __attribute_const __attribute__((const))
+#else
+# define __attribute_const
+#endif
+
+BEGIN_C_DECLS
+
+union rdma_gid {
+ uint8_t raw[16];
+ struct {
+ uint64_t subnet_prefix;
+ uint64_t interface_id;
+ } global;
+};
+
+enum rdma_node_type {
+ RDMA_NODE_UNKNOWN=-1,
+ RDMA_NODE_CA = 1,
+ RDMA_NODE_SWITCH,
+ RDMA_NODE_ROUTER,
+ RDMA_NODE_RNIC
+};
+
+enum rdma_transport_type {
+ RDMA_TRANSPORT_UNKNOWN=0,
+ RDMA_TRANSPORT_IB=1,
+ RDMA_TRANSPORT_IWARP=2
+};
+
+enum rdma_device_cap_flags {
+ RDMA_DEVICE_RESIZE_MAX_WR = 1,
+ RDMA_DEVICE_BAD_PKEY_CNTR = 1 << 1,
+ RDMA_DEVICE_BAD_QKEY_CNTR = 1 << 2,
+ RDMA_DEVICE_RAW_MULTI = 1 << 3,
+ RDMA_DEVICE_AUTO_PATH_MIG = 1 << 4,
+ RDMA_DEVICE_CHANGE_PHY_PORT = 1 << 5,
+ RDMA_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6,
+ RDMA_DEVICE_CURR_QP_STATE_MOD = 1 << 7,
+ RDMA_DEVICE_SHUTDOWN_PORT = 1 << 8,
+ RDMA_DEVICE_INIT_TYPE = 1 << 9,
+ RDMA_DEVICE_PORT_ACTIVE_EVENT = 1 << 10,
+ RDMA_DEVICE_SYS_IMAGE_GUID = 1 << 11,
+ RDMA_DEVICE_RC_RNR_NAK_GEN = 1 << 12,
+ RDMA_DEVICE_SRQ_RESIZE = 1 << 13,
+ RDMA_DEVICE_N_NOTIFY_CQ = 1 << 14
+};
+
+enum rdma_atomic_cap {
+ RDMA_ATOMIC_NONE,
+ RDMA_ATOMIC_HCA,
+ RDMA_ATOMIC_GLOB
+};
+
+struct rdma_device_attr {
+ char fw_ver[64];
+ uint64_t node_guid;
+ uint64_t sys_image_guid;
+ uint64_t max_mr_size;
+ uint64_t page_size_cap;
+ uint32_t vendor_id;
+ uint32_t vendor_part_id;
+ uint32_t hw_ver;
+ int max_qp;
+ int max_qp_wr;
+ int device_cap_flags;
+ int max_sge;
+ int max_sge_rd;
+ int max_cq;
+ int max_cqe;
+ int max_mr;
+ int max_pd;
+ int max_qp_rd_atom;
+ int max_ee_rd_atom;
+ int max_res_rd_atom;
+ int max_qp_init_rd_atom;
+ int max_ee_init_rd_atom;
+ enum rdma_atomic_cap atomic_cap;
+ int max_ee;
+ int max_rdd;
+ int max_mw;
+ int max_raw_ipv6_qp;
+ int max_raw_ethy_qp;
+ int max_mcast_grp;
+ int max_mcast_qp_attach;
+ int max_total_mcast_qp_attach;
+ int max_ah;
+ int max_fmr;
+ int max_map_per_fmr;
+ int max_srq;
+ int max_srq_wr;
+ int max_srq_sge;
+ uint16_t max_pkeys;
+ uint8_t local_ca_ack_delay;
+ uint8_t phys_port_cnt;
+};
+
+enum rdma_mtu {
+ RDMA_MTU_256 = 1,
+ RDMA_MTU_512 = 2,
+ RDMA_MTU_1024 = 3,
+ RDMA_MTU_2048 = 4,
+ RDMA_MTU_4096 = 5
+};
+
+enum rdma_port_state {
+ RDMA_PORT_NOP = 0,
+ RDMA_PORT_DOWN = 1,
+ RDMA_PORT_INIT = 2,
+ RDMA_PORT_ARMED = 3,
+ RDMA_PORT_ACTIVE = 4,
+ RDMA_PORT_ACTIVE_DEFER = 5
+};
+
+struct rdma_port_attr {
+ enum rdma_port_state state;
+ enum rdma_mtu max_mtu;
+ enum rdma_mtu active_mtu;
+ int gid_tbl_len;
+ uint32_t port_cap_flags;
+ uint32_t max_msg_sz;
+ uint32_t bad_pkey_cntr;
+ uint32_t qkey_viol_cntr;
+ uint16_t pkey_tbl_len;
+ uint16_t lid;
+ uint16_t sm_lid;
+ uint8_t lmc;
+ uint8_t max_vl_num;
+ uint8_t sm_sl;
+ uint8_t subnet_timeout;
+ uint8_t init_type_reply;
+ uint8_t active_width;
+ uint8_t active_speed;
+ uint8_t phys_state;
+};
+
+enum rdma_event_type {
+ RDMA_EVENT_CQ_ERR,
+ RDMA_EVENT_QP_FATAL,
+ RDMA_EVENT_QP_REQ_ERR,
+ RDMA_EVENT_QP_ACCESS_ERR,
+ RDMA_EVENT_COMM_EST,
+ RDMA_EVENT_SQ_DRAINED,
+ RDMA_EVENT_PATH_MIG,
+ RDMA_EVENT_PATH_MIG_ERR,
+ RDMA_EVENT_DEVICE_FATAL,
+ RDMA_EVENT_PORT_ACTIVE,
+ RDMA_EVENT_PORT_ERR,
+ RDMA_EVENT_LID_CHANGE,
+ RDMA_EVENT_PKEY_CHANGE,
+ RDMA_EVENT_SM_CHANGE,
+ RDMA_EVENT_SRQ_ERR,
+ RDMA_EVENT_SRQ_LIMIT_REACHED,
+ RDMA_EVENT_QP_LAST_WQE_REACHED,
+ RDMA_EVENT_CLIENT_REREGISTER
+};
+
+struct rdma_async_event {
+ union {
+ struct rdma_cq *cq;
+ struct rdma_qp *qp;
+ struct rdma_srq *srq;
+ int port_num;
+ } element;
+ enum rdma_event_type event_type;
+};
+
+enum rdma_wc_status {
+ RDMA_WC_SUCCESS,
+ RDMA_WC_LOC_LEN_ERR,
+ RDMA_WC_LOC_QP_OP_ERR,
+ RDMA_WC_LOC_EEC_OP_ERR,
+ RDMA_WC_LOC_PROT_ERR,
+ RDMA_WC_WR_FLUSH_ERR,
+ RDMA_WC_MW_BIND_ERR,
+ RDMA_WC_BAD_RESP_ERR,
+ RDMA_WC_LOC_ACCESS_ERR,
+ RDMA_WC_REM_INV_REQ_ERR,
+ RDMA_WC_REM_ACCESS_ERR,
+ RDMA_WC_REM_OP_ERR,
+ RDMA_WC_RETRY_EXC_ERR,
+ RDMA_WC_RNR_RETRY_EXC_ERR,
+ RDMA_WC_LOC_RDD_VIOL_ERR,
+ RDMA_WC_REM_INV_RD_REQ_ERR,
+ RDMA_WC_REM_ABORT_ERR,
+ RDMA_WC_INV_EECN_ERR,
+ RDMA_WC_INV_EEC_STATE_ERR,
+ RDMA_WC_FATAL_ERR,
+ RDMA_WC_RESP_TIMEOUT_ERR,
+ RDMA_WC_GENERAL_ERR
+};
+
+enum rdma_wc_opcode {
+ RDMA_WC_SEND,
+ RDMA_WC_RDMA_WRITE,
+ RDMA_WC_RDMA_READ,
+ RDMA_WC_COMP_SWAP,
+ RDMA_WC_FETCH_ADD,
+ RDMA_WC_BIND_MW,
+/*
+ * Set value of RDMA_WC_RECV so consumers can test if a completion is a
+ * receive by testing (opcode & RDMA_WC_RECV).
+ */
+ RDMA_WC_RECV = 1 << 7,
+ RDMA_WC_RECV_RDMA_WITH_IMM
+};
+
+enum rdma_wc_flags {
+ RDMA_WC_GRH = 1 << 0,
+ RDMA_WC_WITH_IMM = 1 << 1
+};
+
+struct rdma_wc {
+ uint64_t wr_id;
+ enum rdma_wc_status status;
+ enum rdma_wc_opcode opcode;
+ uint32_t vendor_err;
+ uint32_t byte_len;
+ uint32_t imm_data; /* in network byte order */
+ uint32_t qp_num;
+ uint32_t src_qp;
+ enum rdma_wc_flags wc_flags;
+ uint16_t pkey_index;
+ uint16_t slid;
+ uint8_t sl;
+ uint8_t dlid_path_bits;
+};
+
+enum rdma_access_flags {
+ RDMA_ACCESS_LOCAL_WRITE = 1,
+ RDMA_ACCESS_REMOTE_WRITE = (1<<1),
+ RDMA_ACCESS_REMOTE_READ = (1<<2),
+ RDMA_ACCESS_REMOTE_ATOMIC = (1<<3),
+ RDMA_ACCESS_MW_BIND = (1<<4)
+};
+
+struct rdma_pd {
+ struct rdma_context *context;
+ uint32_t handle;
+};
+
+struct rdma_mr {
+ struct rdma_context *context;
+ struct rdma_pd *pd;
+ uint32_t handle;
+ uint32_t lkey;
+ uint32_t rkey;
+};
+
+struct rdma_global_route {
+ union rdma_gid dgid;
+ uint32_t flow_label;
+ uint8_t sgid_index;
+ uint8_t hop_limit;
+ uint8_t traffic_class;
+};
+
+enum rdma_rate {
+ RDMA_RATE_MAX = 0,
+ RDMA_RATE_2_5_GBPS = 2,
+ RDMA_RATE_5_GBPS = 5,
+ RDMA_RATE_10_GBPS = 3,
+ RDMA_RATE_20_GBPS = 6,
+ RDMA_RATE_30_GBPS = 4,
+ RDMA_RATE_40_GBPS = 7,
+ RDMA_RATE_60_GBPS = 8,
+ RDMA_RATE_80_GBPS = 9,
+ RDMA_RATE_120_GBPS = 10
+};
+
+/**
+ * rdma_rate_to_mult - Convert the IB rate enum to a multiple of the
+ * base rate of 2.5 Gbit/sec. For example, RDMA_RATE_5_GBPS will be
+ * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
+ * @rate: rate to convert.
+ */
+int rdma_rate_to_mult(enum rdma_rate rate) __attribute_const;
+
+/**
+ * mult_to_rdma_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate enum.
+ * @mult: multiple to convert.
+ */
+enum rdma_rate mult_to_rdma_rate(int mult) __attribute_const;
+
+struct rdma_ah_attr {
+ struct rdma_global_route grh;
+ uint16_t dlid;
+ uint8_t sl;
+ uint8_t src_path_bits;
+ uint8_t static_rate;
+ uint8_t is_global;
+ uint8_t port_num;
+};
+
+enum rdma_srq_attr_mask {
+ RDMA_SRQ_MAX_WR = 1 << 0,
+ RDMA_SRQ_LIMIT = 1 << 1
+};
+
+struct rdma_srq_attr {
+ uint32_t max_wr;
+ uint32_t max_sge;
+ uint32_t srq_limit;
+};
+
+struct rdma_srq_init_attr {
+ void *srq_context;
+ struct rdma_srq_attr attr;
+};
+
+enum rdma_qp_type {
+ RDMA_QPT_RC = 2,
+ RDMA_QPT_UC,
+ RDMA_QPT_UD
+};
+
+struct rdma_qp_cap {
+ uint32_t max_send_wr;
+ uint32_t max_recv_wr;
+ uint32_t max_send_sge;
+ uint32_t max_recv_sge;
+ uint32_t max_inline_data;
+};
+
+struct rdma_qp_init_attr {
+ void *qp_context;
+ struct rdma_cq *send_cq;
+ struct rdma_cq *recv_cq;
+ struct rdma_srq *srq;
+ struct rdma_qp_cap cap;
+ enum rdma_qp_type qp_type;
+ int sq_sig_all;
+};
+
+enum rdma_qp_attr_mask {
+ RDMA_QP_STATE = 1 << 0,
+ RDMA_QP_CUR_STATE = 1 << 1,
+ RDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
+ RDMA_QP_ACCESS_FLAGS = 1 << 3,
+ RDMA_QP_PKEY_INDEX = 1 << 4,
+ RDMA_QP_PORT = 1 << 5,
+ RDMA_QP_QKEY = 1 << 6,
+ RDMA_QP_AV = 1 << 7,
+ RDMA_QP_PATH_MTU = 1 << 8,
+ RDMA_QP_TIMEOUT = 1 << 9,
+ RDMA_QP_RETRY_CNT = 1 << 10,
+ RDMA_QP_RNR_RETRY = 1 << 11,
+ RDMA_QP_RQ_PSN = 1 << 12,
+ RDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13,
+ RDMA_QP_ALT_PATH = 1 << 14,
+ RDMA_QP_MIN_RNR_TIMER = 1 << 15,
+ RDMA_QP_SQ_PSN = 1 << 16,
+ RDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
+ RDMA_QP_PATH_MIG_STATE = 1 << 18,
+ RDMA_QP_CAP = 1 << 19,
+ RDMA_QP_DEST_QPN = 1 << 20
+};
+
+enum rdma_qp_state {
+ RDMA_QPS_RESET,
+ RDMA_QPS_INIT,
+ RDMA_QPS_RTR,
+ RDMA_QPS_RTS,
+ RDMA_QPS_SQD,
+ RDMA_QPS_SQE,
+ RDMA_QPS_ERR
+};
+
+enum rdma_mig_state {
+ RDMA_MIG_MIGRATED,
+ RDMA_MIG_REARM,
+ RDMA_MIG_ARMED
+};
+
+struct rdma_qp_attr {
+ enum rdma_qp_state qp_state;
+ enum rdma_qp_state cur_qp_state;
+ enum rdma_mtu path_mtu;
+ enum rdma_mig_state path_mig_state;
+ uint32_t qkey;
+ uint32_t rq_psn;
+ uint32_t sq_psn;
+ uint32_t dest_qp_num;
+ int qp_access_flags;
+ struct rdma_qp_cap cap;
+ struct rdma_ah_attr ah_attr;
+ struct rdma_ah_attr alt_ah_attr;
+ uint16_t pkey_index;
+ uint16_t alt_pkey_index;
+ uint8_t en_sqd_async_notify;
+ uint8_t sq_draining;
+ uint8_t max_rd_atomic;
+ uint8_t max_dest_rd_atomic;
+ uint8_t min_rnr_timer;
+ uint8_t port_num;
+ uint8_t timeout;
+ uint8_t retry_cnt;
+ uint8_t rnr_retry;
+ uint8_t alt_port_num;
+ uint8_t alt_timeout;
+};
+
+enum rdma_wr_opcode {
+ RDMA_WR_RDMA_WRITE,
+ RDMA_WR_RDMA_WRITE_WITH_IMM,
+ RDMA_WR_SEND,
+ RDMA_WR_SEND_WITH_IMM,
+ RDMA_WR_RDMA_READ,
+ RDMA_WR_ATOMIC_CMP_AND_SWP,
+ RDMA_WR_ATOMIC_FETCH_AND_ADD
+};
+
+enum rdma_send_flags {
+ RDMA_SEND_FENCE = 1 << 0,
+ RDMA_SEND_SIGNALED = 1 << 1,
+ RDMA_SEND_SOLICITED = 1 << 2,
+ RDMA_SEND_INLINE = 1 << 3
+};
+
+struct rdma_sge {
+ uint64_t addr;
+ uint32_t length;
+ uint32_t lkey;
+};
+
+struct rdma_send_wr {
+ struct rdma_send_wr *next;
+ uint64_t wr_id;
+ struct rdma_sge *sg_list;
+ int num_sge;
+ enum rdma_wr_opcode opcode;
+ enum rdma_send_flags send_flags;
+ uint32_t imm_data; /* in network byte order */
+ union {
+ struct {
+ uint64_t remote_addr;
+ uint32_t rkey;
+ } rdma;
+ struct {
+ uint64_t remote_addr;
+ uint64_t compare_add;
+ uint64_t swap;
+ uint32_t rkey;
+ } atomic;
+ struct {
+ struct rdma_ah *ah;
+ uint32_t remote_qpn;
+ uint32_t remote_qkey;
+ } ud;
+ } wr;
+};
+
+struct rdma_recv_wr {
+ struct rdma_recv_wr *next;
+ uint64_t wr_id;
+ struct rdma_sge *sg_list;
+ int num_sge;
+};
+
+struct rdma_srq {
+ struct rdma_context *context;
+ void *srq_context;
+ struct rdma_pd *pd;
+ uint32_t handle;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t events_completed;
+};
+
+struct rdma_qp {
+ struct rdma_context *context;
+ void *qp_context;
+ struct rdma_pd *pd;
+ struct rdma_cq *send_cq;
+ struct rdma_cq *recv_cq;
+ struct rdma_srq *srq;
+ uint32_t handle;
+ uint32_t qp_num;
+ enum rdma_qp_state state;
+ enum rdma_qp_type qp_type;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t events_completed;
+};
+
+struct rdma_comp_channel {
+ int fd;
+};
+
+struct rdma_cq {
+ struct rdma_context *context;
+ void *cq_context;
+ uint32_t handle;
+ int cqe;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ uint32_t comp_events_completed;
+ uint32_t async_events_completed;
+};
+
+struct rdma_ah {
+ struct rdma_context *context;
+ struct rdma_pd *pd;
+ uint32_t handle;
+};
+
+struct rdma_device;
+struct rdma_context;
+
+struct rdma_device_ops {
+ struct rdma_context * (*alloc_context)(struct rdma_device *device, int cmd_fd);
+ void (*free_context)(struct rdma_context *context);
+};
+
+enum {
+ RDMA_SYSFS_NAME_MAX = 64,
+ RDMA_SYSFS_PATH_MAX = 256
+};
+
+struct rdma_device {
+ struct rdma_driver *driver;
+ enum rdma_node_type node_type;
+ struct rdma_device_ops ops;
+ /* Name of underlying kernel IB device, eg "mthca0" */
+ char name[RDMA_SYSFS_NAME_MAX];
+ /* Name of uverbs device, eg "uverbs0" */
+ char dev_name[RDMA_SYSFS_NAME_MAX];
+ /* Path to infiniband_verbs class device in sysfs */
+ char dev_path[RDMA_SYSFS_PATH_MAX];
+ /* Path to infiniband class device in sysfs */
+ char ibdev_path[RDMA_SYSFS_PATH_MAX];
+};
+
+struct rdma_context_ops {
+ int (*query_device)(struct rdma_context *context,
+ struct rdma_device_attr *device_attr);
+ int (*query_port)(struct rdma_context *context, uint8_t port_num,
+ struct rdma_port_attr *port_attr);
+ struct rdma_pd * (*alloc_pd)(struct rdma_context *context);
+ int (*dealloc_pd)(struct rdma_pd *pd);
+ struct rdma_mr * (*reg_mr)(struct rdma_pd *pd, void *addr, size_t length,
+ enum rdma_access_flags access);
+ int (*dereg_mr)(struct rdma_mr *mr);
+ struct rdma_cq * (*create_cq)(struct rdma_context *context, int cqe,
+ struct rdma_comp_channel *channel,
+ int comp_vector);
+ int (*poll_cq)(struct rdma_cq *cq, int num_entries, struct rdma_wc *wc);
+ int (*req_notify_cq)(struct rdma_cq *cq, int solicited_only);
+ void (*cq_event)(struct rdma_cq *cq);
+ int (*resize_cq)(struct rdma_cq *cq, int cqe);
+ int (*destroy_cq)(struct rdma_cq *cq);
+ struct rdma_srq * (*create_srq)(struct rdma_pd *pd,
+ struct rdma_srq_init_attr *srq_init_attr);
+ int (*modify_srq)(struct rdma_srq *srq,
+ struct rdma_srq_attr *srq_attr,
+ enum rdma_srq_attr_mask srq_attr_mask);
+ int (*query_srq)(struct rdma_srq *srq,
+ struct rdma_srq_attr *srq_attr);
+ int (*destroy_srq)(struct rdma_srq *srq);
+ int (*post_srq_recv)(struct rdma_srq *srq,
+ struct rdma_recv_wr *recv_wr,
+ struct rdma_recv_wr **bad_recv_wr);
+ struct rdma_qp * (*create_qp)(struct rdma_pd *pd, struct rdma_qp_init_attr *attr);
+ int (*query_qp)(struct rdma_qp *qp, struct rdma_qp_attr *attr,
+ enum rdma_qp_attr_mask attr_mask,
+ struct rdma_qp_init_attr *init_attr);
+ int (*modify_qp)(struct rdma_qp *qp, struct rdma_qp_attr *attr,
+ enum rdma_qp_attr_mask attr_mask);
+ int (*destroy_qp)(struct rdma_qp *qp);
+ int (*post_send)(struct rdma_qp *qp, struct rdma_send_wr *wr,
+ struct rdma_send_wr **bad_wr);
+ int (*post_recv)(struct rdma_qp *qp, struct rdma_recv_wr *wr,
+ struct rdma_recv_wr **bad_wr);
+ struct rdma_ah * (*create_ah)(struct rdma_pd *pd, struct rdma_ah_attr *attr);
+ int (*destroy_ah)(struct rdma_ah *ah);
+ int (*attach_mcast)(struct rdma_qp *qp, union rdma_gid *gid,
+ uint16_t lid);
+ int (*detach_mcast)(struct rdma_qp *qp, union rdma_gid *gid,
+ uint16_t lid);
+};
+
+struct rdma_context {
+ struct rdma_device *device;
+ struct rdma_context_ops ops;
+ int cmd_fd;
+ int async_fd;
+ int num_comp_vectors;
+ void *abi_compat;
+};
+
+/**
+ * rdma_get_device_list - Get list of IB devices currently available
+ * @num_devices: optional. if non-NULL, set to the number of devices
+ * returned in the array.
+ *
+ * Return a NULL-terminated array of IB devices. The array can be
+ * released with rdma_free_device_list().
+ */
+struct rdma_device **rdma_get_device_list(int *num_devices);
+
+/**
+ * rdma_free_device_list - Free list from rdma_get_device_list()
+ *
+ * Free an array of devices returned from rdma_get_device_list(). Once
+ * the array is freed, pointers to devices that were not opened with
+ * rdma_open_device() are no longer valid. Client code must open all
+ * devices it intends to use before calling rdma_free_device_list().
+ */
+void rdma_free_device_list(struct rdma_device **list);
+
+/**
+ * rdma_get_device_name - Return kernel device name
+ */
+const char *rdma_get_device_name(struct rdma_device *device);
+
+/**
+ * rdma_get_device_guid - Return device's node GUID
+ */
+uint64_t rdma_get_device_guid(struct rdma_device *device);
+
+/**
+ * rdma_get_transport_type - Return device's network transport type
+ */
+static inline enum rdma_transport_type
+rdma_get_transport_type(struct rdma_context *context)
+{
+ if (!context->device)
+ return RDMA_TRANSPORT_UNKNOWN;
+
+ switch (context->device->node_type) {
+ case RDMA_NODE_CA:
+ case RDMA_NODE_SWITCH:
+ case RDMA_NODE_ROUTER:
+ return RDMA_TRANSPORT_IB;
+ case RDMA_NODE_RNIC:
+ return RDMA_TRANSPORT_IWARP;
+ default:
+ return RDMA_TRANSPORT_UNKNOWN;
+ }
+}
+
+/**
+ * rdma_get_node_type - Return device's node type
+ */
+static inline enum rdma_node_type
+rdma_get_node_type(struct rdma_context *context)
+{
+ if (!context->device)
+ return RDMA_NODE_UNKNOWN;
+
+ return context->device->node_type;
+}
+
+/**
+ * rdma_open_device - Initialize device for use
+ */
+struct rdma_context *rdma_open_device(struct rdma_device *device);
+
+/**
+ * rdma_close_device - Release device
+ */
+int rdma_close_device(struct rdma_context *context);
+
+/**
+ * rdma_get_async_event - Get next async event
+ * @event: Pointer to use to return async event
+ *
+ * All async events returned by rdma_get_async_event() must eventually
+ * be acknowledged with rdma_ack_async_event().
+ */
+int rdma_get_async_event(struct rdma_context *context,
+ struct rdma_async_event *event);
+
+/**
+ * rdma_ack_async_event - Acknowledge an async event
+ * @event: Event to be acknowledged.
+ *
+ * All async events which are returned by rdma_get_async_event() must
+ * be acknowledged. To avoid races, destroying an object (CQ, SRQ or
+ * QP) will wait for all affiliated events to be acknowledged, so
+ * there should be a one-to-one correspondence between acks and
+ * successful gets.
+ */
+void rdma_ack_async_event(struct rdma_async_event *event);
+
+/**
+ * rdma_query_device - Get device properties
+ */
+int rdma_query_device(struct rdma_context *context,
+ struct rdma_device_attr *device_attr);
+
+/**
+ * rdma_query_port - Get port properties
+ */
+int rdma_query_port(struct rdma_context *context, uint8_t port_num,
+ struct rdma_port_attr *port_attr);
+
+/**
+ * rdma_query_gid - Get a GID table entry
+ */
+int rdma_query_gid(struct rdma_context *context, uint8_t port_num,
+ int index, union rdma_gid *gid);
+
+/**
+ * rdma_query_pkey - Get a P_Key table entry
+ */
+int rdma_query_pkey(struct rdma_context *context, uint8_t port_num,
+ int index, uint16_t *pkey);
+
+/**
+ * rdma_alloc_pd - Allocate a protection domain
+ */
+struct rdma_pd *rdma_alloc_pd(struct rdma_context *context);
+
+/**
+ * rdma_dealloc_pd - Free a protection domain
+ */
+int rdma_dealloc_pd(struct rdma_pd *pd);
+
+/**
+ * rdma_reg_mr - Register a memory region
+ */
+struct rdma_mr *rdma_reg_mr(struct rdma_pd *pd, void *addr,
+ size_t length, enum rdma_access_flags access);
+
+/**
+ * rdma_dereg_mr - Deregister a memory region
+ */
+int rdma_dereg_mr(struct rdma_mr *mr);
+
+/**
+ * rdma_create_comp_channel - Create a completion event channel
+ */
+struct rdma_comp_channel *rdma_create_comp_channel(struct rdma_context *context);
+
+/**
+ * rdma_destroy_comp_channel - Destroy a completion event channel
+ */
+int rdma_destroy_comp_channel(struct rdma_comp_channel *channel);
+
+/**
+ * rdma_create_cq - Create a completion queue
+ * @context - Context CQ will be attached to
+ * @cqe - Minimum number of entries required for CQ
+ * @cq_context - Consumer-supplied context returned for completion events
+ * @channel - Completion channel where completion events will be queued.
+ * May be NULL if completion events will not be used.
+ * @comp_vector - Completion vector used to signal completion events.
+ * Must be >= 0 and < context->num_comp_vectors.
+ */
+struct rdma_cq *rdma_create_cq(struct rdma_context *context, int cqe,
+ void *cq_context,
+ struct rdma_comp_channel *channel,
+ int comp_vector);
+
+/**
+ * rdma_resize_cq - Modifies the capacity of the CQ.
+ * @cq: The CQ to resize.
+ * @cqe: The minimum size of the CQ.
+ *
+ * Users can examine the cq structure to determine the actual CQ size.
+ */
+int rdma_resize_cq(struct rdma_cq *cq, int cqe);
+
+/**
+ * rdma_destroy_cq - Destroy a completion queue
+ */
+int rdma_destroy_cq(struct rdma_cq *cq);
+
+/**
+ * rdma_get_cq_event - Read next CQ event
+ * @channel: Channel to get next event from.
+ * @cq: Used to return pointer to CQ.
+ * @cq_context: Used to return consumer-supplied CQ context.
+ *
+ * All completion events returned by rdma_get_cq_event() must
+ * eventually be acknowledged with rdma_ack_cq_events().
+ */
+int rdma_get_cq_event(struct rdma_comp_channel *channel,
+ struct rdma_cq **cq, void **cq_context);
+
+/**
+ * rdma_ack_cq_events - Acknowledge CQ completion events
+ * @cq: CQ to acknowledge events for
+ * @nevents: Number of events to acknowledge.
+ *
+ * All completion events which are returned by rdma_get_cq_event() must
+ * be acknowledged. To avoid races, rdma_destroy_cq() will wait for
+ * all completion events to be acknowledged, so there should be a
+ * one-to-one correspondence between acks and successful gets. An
+ * application may accumulate multiple completion events and
+ * acknowledge them in a single call to rdma_ack_cq_events() by passing
+ * the number of events to ack in @nevents.
+ */
+void rdma_ack_cq_events(struct rdma_cq *cq, unsigned int nevents);
+
+/**
+ * rdma_poll_cq - Poll a CQ for work completions
+ * @cq:the CQ being polled
+ * @num_entries:maximum number of completions to return
+ * @wc:array of at least @num_entries of &struct rdma_wc where completions
+ * will be returned
+ *
+ * Poll a CQ for (possibly multiple) completions. If the return value
+ * is < 0, an error occurred. If the return value is >= 0, it is the
+ * number of completions returned. If the return value is
+ * non-negative and strictly less than num_entries, then the CQ was
+ * emptied.
+ */
+static inline int rdma_poll_cq(struct rdma_cq *cq, int num_entries, struct rdma_wc *wc)
+{
+ return cq->context->ops.poll_cq(cq, num_entries, wc);
+}
+
+/**
+ * rdma_req_notify_cq - Request completion notification on a CQ. An
+ * event will be added to the completion channel associated with the
+ * CQ when an entry is added to the CQ.
+ * @cq: The completion queue to request notification for.
+ * @solicited_only: If non-zero, an event will be generated only for
+ * the next solicited CQ entry. If zero, any CQ entry, solicited or
+ * not, will generate an event.
+ */
+static inline int rdma_req_notify_cq(struct rdma_cq *cq, int solicited_only)
+{
+ return cq->context->ops.req_notify_cq(cq, solicited_only);
+}
+
+/**
+ * rdma_create_srq - Creates a SRQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the SRQ.
+ *
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the
+ * requested size of the SRQ, and set to the actual values allocated
+ * on return. If rdma_create_srq() succeeds, then max_wr and max_sge
+ * will always be at least as large as the requested values.
+ */
+struct rdma_srq *rdma_create_srq(struct rdma_pd *pd,
+ struct rdma_srq_init_attr *srq_init_attr);
+
+/**
+ * rdma_modify_srq - Modifies the attributes for the specified SRQ.
+ * @srq: The SRQ to modify.
+ * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
+ * the current values of selected SRQ attributes are returned.
+ * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
+ * are being modified.
+ *
+ * The mask may contain RDMA_SRQ_MAX_WR to resize the SRQ and/or
+ * RDMA_SRQ_LIMIT to set the SRQ's limit and request notification when
+ * the number of receives queued drops below the limit.
+ */
+int rdma_modify_srq(struct rdma_srq *srq,
+ struct rdma_srq_attr *srq_attr,
+ enum rdma_srq_attr_mask srq_attr_mask);
+
+/**
+ * rdma_query_srq - Returns the attribute list and current values for the
+ * specified SRQ.
+ * @srq: The SRQ to query.
+ * @srq_attr: The attributes of the specified SRQ.
+ */
+int rdma_query_srq(struct rdma_srq *srq, struct rdma_srq_attr *srq_attr);
+
+/**
+ * rdma_destroy_srq - Destroys the specified SRQ.
+ * @srq: The SRQ to destroy.
+ */
+int rdma_destroy_srq(struct rdma_srq *srq);
+
+/**
+ * rdma_post_srq_recv - Posts a list of work requests to the specified SRQ.
+ * @srq: The SRQ to post the work request on.
+ * @recv_wr: A list of work requests to post on the receive queue.
+ * @bad_recv_wr: On an immediate failure, this parameter will reference
+ * the work request that failed to be posted on the QP.
+ */
+static inline int rdma_post_srq_recv(struct rdma_srq *srq,
+ struct rdma_recv_wr *recv_wr,
+ struct rdma_recv_wr **bad_recv_wr)
+{
+ return srq->context->ops.post_srq_recv(srq, recv_wr, bad_recv_wr);
+}
+
+/**
+ * rdmav_create_qp - Create a queue pair.
+ */
+struct rdma_qp *rdmav_create_qp(struct rdma_pd *pd,
+ struct rdma_qp_init_attr *qp_init_attr);
+
+/**
+ * rdma_modify_qp - Modify a queue pair.
+ */
+int rdma_modify_qp(struct rdma_qp *qp, struct rdma_qp_attr *attr,
+ enum rdma_qp_attr_mask attr_mask);
+
+/**
+ * rdma_query_qp - Returns the attribute list and current values for the
+ * specified QP.
+ * @qp: The QP to query.
+ * @attr: The attributes of the specified QP.
+ * @attr_mask: A bit-mask used to select specific attributes to query.
+ * @init_attr: Additional attributes of the selected QP.
+ *
+ * The qp_attr_mask may be used to limit the query to gathering only the
+ * selected attributes.
+ */
+int rdma_query_qp(struct rdma_qp *qp, struct rdma_qp_attr *attr,
+ enum rdma_qp_attr_mask attr_mask,
+ struct rdma_qp_init_attr *init_attr);
+
+/**
+ * rdmav_destroy_qp - Destroy a queue pair.
+ */
+int rdmav_destroy_qp(struct rdma_qp *qp);
+
+/**
+ * rdma_post_send - Post a list of work requests to a send queue.
+ */
+static inline int rdma_post_send(struct rdma_qp *qp, struct rdma_send_wr *wr,
+ struct rdma_send_wr **bad_wr)
+{
+ return qp->context->ops.post_send(qp, wr, bad_wr);
+}
+
+/**
+ * rdma_post_recv - Post a list of work requests to a receive queue.
+ */
+static inline int rdma_post_recv(struct rdma_qp *qp, struct rdma_recv_wr *wr,
+ struct rdma_recv_wr **bad_wr)
+{
+ return qp->context->ops.post_recv(qp, wr, bad_wr);
+}
+
+/**
+ * rdma_create_ah - Create an address handle.
+ */
+struct rdma_ah *rdma_create_ah(struct rdma_pd *pd, struct rdma_ah_attr *attr);
+
+/**
+ * rdma_destroy_ah - Destroy an address handle.
+ */
+int rdma_destroy_ah(struct rdma_ah *ah);
+
+/**
+ * rdma_attach_mcast - Attaches the specified QP to a multicast group.
+ * @qp: QP to attach to the multicast group. The QP must be a UD QP.
+ * @gid: Multicast group GID.
+ * @lid: Multicast group LID in host byte order.
+ *
+ * In order to route multicast packets correctly, subnet
+ * administration must have created the multicast group and configured
+ * the fabric appropriately. The port associated with the specified
+ * QP must also be a member of the multicast group.
+ */
+int rdma_attach_mcast(struct rdma_qp *qp, union rdma_gid *gid, uint16_t lid);
+
+/**
+ * rdma_detach_mcast - Detaches the specified QP from a multicast group.
+ * @qp: QP to detach from the multicast group.
+ * @gid: Multicast group GID.
+ * @lid: Multicast group LID in host byte order.
+ */
+int rdma_detach_mcast(struct rdma_qp *qp, union rdma_gid *gid, uint16_t lid);
+
+END_C_DECLS
+
+# undef __attribute_const
+
+#endif /* INFINIBAND_VERBS_H */
More information about the general
mailing list