[openib-general] [PATCH]: GSI: synchronize with latest ib_verbs.h file
Hal Rosenstock
halr at voltaire.com
Wed Aug 11 10:14:07 PDT 2004
Synchronize GSI with latest ib_verbs.h file
Note that this change does not include the replacement of the use of
ib_reg_mr with ib_reg_phys_mr
Index: access/gsi_main.c
===================================================================
--- access/gsi_main.c (revision 628)
+++ access/gsi_main.c (working copy)
@@ -471,7 +471,7 @@
dtgrm_priv->v_mem_h = ib_reg_mr(hca->pd,
dtgrm_priv->grh,
MAD_BLOCK_SIZE + IB_GRH_LEN,
- IB_MR_LOCAL_WRITE,
+ IB_ACCESS_LOCAL_WRITE,
&dtgrm_priv->sg.lkey, &rkey);
if (IS_ERR(dtgrm_priv->v_mem_h)) {
printk(KERN_ERR \
@@ -1913,7 +1913,7 @@
dtgrm_priv->v_mem_h = ib_reg_mr(hca->pd,
mad,
MAD_BLOCK_SIZE,
- IB_MR_LOCAL_WRITE,
+ IB_ACCESS_LOCAL_WRITE,
&dtgrm_priv->sg.lkey, &rkey);
if (IS_ERR(dtgrm_priv->v_mem_h)) {
printk(KERN_ERR "Could not get general memory attr.\n");
@@ -1921,7 +1921,6 @@
goto error2;
}
- wr.next = NULL;
wr.wr_id = (unsigned long) dtgrm_priv;
wr.sg_list = &dtgrm_priv->sg;
wr.sg_list->addr = (unsigned long) mad;
@@ -2065,14 +2064,13 @@
dtgrm_priv->v_mem_h = ib_reg_mr(hca->pd,
mad,
MAD_BLOCK_SIZE,
- IB_MR_LOCAL_WRITE,
+ IB_ACCESS_LOCAL_WRITE,
&dtgrm_priv->sg.lkey, &rkey);
if (IS_ERR(dtgrm_priv->v_mem_h)) {
printk(KERN_ERR "Could not get general memory attr.\n");
goto error2;
}
- wr.next = NULL;
wr.wr_id = (unsigned long) dtgrm_priv;
wr.sg_list = &dtgrm_priv->sg;
wr.sg_list->addr = (unsigned long) mad;
Index: include/ib_verbs.h
===================================================================
--- include/ib_verbs.h (revision 627)
+++ include/ib_verbs.h (working copy)
@@ -26,6 +26,12 @@
#if !defined( IB_VERBS_H )
#define IB_VERBS_H
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct ib_mad;
+
enum ib_event_type {
IB_EVENT_CQ_ERR,
IB_EVENT_QP_FATAL,
@@ -44,75 +50,96 @@
};
struct ib_event {
- struct ib_device *device;
- void *context;
+ struct ib_device *device;
+ void *context;
union {
- struct ib_cq *cq;
- struct ib_qp *qp;
- u8 port;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+ u8 port;
} element;
- enum ib_event_type event;
+ enum ib_event_type event;
};
-typedef void (*ib_event_handler) (struct ib_event * async_event);
+typedef void (*ib_event_handler)(struct ib_event *async_event);
-typedef void (*ib_comp_handler) (struct ib_cq * cq);
+typedef void (*ib_comp_handler)(struct ib_cq *cq);
struct ib_pd {
- struct ib_device *device;
+ struct ib_device *device;
+ atomic_t usecnt;
};
struct ib_ah {
- struct ib_device *device;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ atomic_t usecnt;
};
struct ib_cq {
- struct ib_device *device;
- ib_comp_handler comp_handler;
- void *cq_context;
- int cqe;
+ struct ib_device *device;
+ ib_comp_handler comp_handler;
+ void *cq_context;
+ int cqe;
+ atomic_t usecnt;
};
struct ib_srq {
- struct ib_device *device;
- void *srq_context;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ void *srq_context;
+ atomic_t usecnt;
};
struct ib_qp {
- struct ib_device *device;
- void *qp_context;
- u32 qp_num;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct ib_cq *send_cq;
+ struct ib_cq *recv_cq;
+ void *qp_context;
+ u32 qp_num;
+ atomic_t usecnt;
};
struct ib_mr {
- struct ib_device *device;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ u32 lkey;
+ u32 rkey;
+ atomic_t usecnt;
};
struct ib_mw {
- struct ib_device *device;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ u32 rkey;
+ atomic_t usecnt;
};
struct ib_fmr {
- struct ib_device *device;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ u32 lkey;
+ u32 rkey;
+ atomic_t usecnt;
};
enum ib_device_cap_flags {
- IB_DEVICE_RESIZE_MAX_WR = 1,
- IB_DEVICE_BAD_PKEY_CNT = (1 << 1),
- IB_DEVICE_BAD_QKEY_CNT = (1 << 2),
- IB_DEVICE_RAW_MULTI = (1 << 3),
- IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
- IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
- IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
- IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
- IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
- IB_DEVICE_INIT_TYPE = (1 << 9),
- IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
- IB_DEVICE_SYS_IMG_GUID = (1 << 11),
- IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
- IB_DEVICE_SRQ_RESIZE = (1 << 13),
- IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
- IB_DEVICE_RQ_SIG_TYPE = (1 << 15)
+ IB_DEVICE_RESIZE_MAX_WR = 1,
+ IB_DEVICE_BAD_PKEY_CNT = (1<<1),
+ IB_DEVICE_BAD_QKEY_CNT = (1<<2),
+ IB_DEVICE_RAW_MULTI = (1<<3),
+ IB_DEVICE_AUTO_PATH_MIG = (1<<4),
+ IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
+ IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
+ IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
+ IB_DEVICE_SHUTDOWN_PORT = (1<<8),
+ IB_DEVICE_INIT_TYPE = (1<<9),
+ IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
+ IB_DEVICE_SYS_IMG_GUID = (1<<11),
+ IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
+ IB_DEVICE_SRQ_RESIZE = (1<<13),
+ IB_DEVICE_N_NOTIFY_CQ = (1<<14),
+ IB_DEVICE_RQ_SIG_TYPE = (1<<15)
};
enum ib_atomic_cap {
@@ -122,157 +149,153 @@
};
struct ib_device_cap {
- u64 fw_ver;
- u64 node_guid;
- u64 sys_image_guid;
- u64 max_mr_size;
- u64 page_size_cap;
- u32 vendor_id;
- u32 vendor_part_id;
- u32 hw_ver;
- int max_qp;
- int max_qp_wr;
- int device_cap_flags;
- int max_sge;
- int max_sge_rd;
- int max_cq;
- int max_cqe;
- int max_mr;
- int max_pd;
- int phys_port_cnt;
- int max_qp_rd_atom;
- int max_ee_rd_atom;
- int max_res_rd_atom;
- int max_qp_init_rd_atom;
- int max_ee_init_rd_atom;
- enum ib_atomic_cap atomic_cap;
- int max_ee;
- int max_rdd;
- int max_mw;
- int max_raw_ipv6_qp;
- int max_raw_ethy_qp;
- int max_mcast_grp;
- int max_mcast_qp_attach;
- int max_total_mcast_qp_attach;
- int max_ah;
- int max_fmr;
- int max_map_per_fmr;
- int max_srq;
- int max_srq_wr;
- int max_srq_sge;
- u16 max_pkeys;
- u8 local_ca_ack_delay;
+ u64 fw_ver;
+ u64 node_guid;
+ u64 sys_image_guid;
+ u64 max_mr_size;
+ u64 page_size_cap;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ int max_qp;
+ int max_qp_wr;
+ int device_cap_flags;
+ int max_sge;
+ int max_sge_rd;
+ int max_cq;
+ int max_cqe;
+ int max_mr;
+ int max_pd;
+ int phys_port_cnt;
+ int max_qp_rd_atom;
+ int max_ee_rd_atom;
+ int max_res_rd_atom;
+ int max_qp_init_rd_atom;
+ int max_ee_init_rd_atom;
+ enum ib_atomic_cap atomic_cap;
+ int max_ee;
+ int max_rdd;
+ int max_mw;
+ int max_raw_ipv6_qp;
+ int max_raw_ethy_qp;
+ int max_mcast_grp;
+ int max_mcast_qp_attach;
+ int max_total_mcast_qp_attach;
+ int max_ah;
+ int max_fmr;
+ int max_map_per_fmr;
+ int max_srq;
+ int max_srq_wr;
+ int max_srq_sge;
+ u16 max_pkeys;
+ u8 local_ca_ack_delay;
};
enum ib_mtu {
- IB_MTU_256 = 1,
- IB_MTU_512 = 2,
+ IB_MTU_256 = 1,
+ IB_MTU_512 = 2,
IB_MTU_1024 = 3,
IB_MTU_2048 = 4,
IB_MTU_4096 = 5
};
enum ib_static_rate {
- IB_STATIC_RATE_FULL = 0,
- IB_STATIC_RATE_12X_TO_4X = 2,
- IB_STATIC_RATE_4X_TO_1X = 3,
- IB_STATIC_RATE_12X_TO_1X = 11
+ IB_STATIC_RATE_FULL = 0,
+ IB_STATIC_RATE_12X_TO_4X = 2,
+ IB_STATIC_RATE_4X_TO_1X = 3,
+ IB_STATIC_RATE_12X_TO_1X = 11
};
enum ib_port_state {
- IB_PORT_NOP = 0,
- IB_PORT_DOWN = 1,
- IB_PORT_INIT = 2,
- IB_PORT_ARMED = 3,
- IB_PORT_ACTIVE = 4,
- IB_PORT_ACTIVE_DEFER = 5
+ IB_PORT_NOP = 0,
+ IB_PORT_DOWN = 1,
+ IB_PORT_INIT = 2,
+ IB_PORT_ARMED = 3,
+ IB_PORT_ACTIVE = 4,
+ IB_PORT_ACTIVE_DEFER = 5
};
enum ib_port_cap_flags {
- IB_PORT_SM = (1 << 1),
- IB_PORT_NOTICE_SUP = (1 << 2),
- IB_PORT_TRAP_SUP = (1 << 3),
- IB_PORT_AUTO_MIGR_SUP = (1 << 5),
- IB_PORT_SL_MAP_SUP = (1 << 6),
- IB_PORT_MKEY_NVRAM = (1 << 7),
- IB_PORT_PKEY_NVRAM = (1 << 8),
- IB_PORT_LED_INFO_SUP = (1 << 9),
- IB_PORT_SM_DISABLED = (1 << 10),
- IB_PORT_SYS_IMAGE_GUID_SUP = (1 << 11),
- IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = (1 << 12),
- IB_PORT_CM_SUP = (1 << 16),
- IB_PORT_SNMP_TUNN_SUP = (1 << 17),
- IB_PORT_REINIT_SUP = (1 << 18),
- IB_PORT_DEVICE_MGMT_SUP = (1 << 19),
- IB_PORT_VENDOR_CLS_SUP = (1 << 20),
- IB_PORT_DR_NOTICE_SUP = (1 << 21),
- IB_PORT_PORT_NOTICE_SUP = (1 << 22),
- IB_PORT_BOOT_MGMT_SUP = (1 << 23)
+ IB_PORT_SM = (1<<1),
+ IB_PORT_NOTICE_SUP = (1<<2),
+ IB_PORT_TRAP_SUP = (1<<3),
+ IB_PORT_AUTO_MIGR_SUP = (1<<5),
+ IB_PORT_SL_MAP_SUP = (1<<6),
+ IB_PORT_MKEY_NVRAM = (1<<7),
+ IB_PORT_PKEY_NVRAM = (1<<8),
+ IB_PORT_LED_INFO_SUP = (1<<9),
+ IB_PORT_SM_DISABLED = (1<<10),
+ IB_PORT_SYS_IMAGE_GUID_SUP = (1<<11),
+ IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = (1<<12),
+ IB_PORT_CM_SUP = (1<<16),
+ IB_PORT_SNMP_TUNN_SUP = (1<<17),
+ IB_PORT_REINIT_SUP = (1<<18),
+ IB_PORT_DEVICE_MGMT_SUP = (1<<19),
+ IB_PORT_VENDOR_CLS_SUP = (1<<20),
+ IB_PORT_DR_NOTICE_SUP = (1<<21),
+ IB_PORT_PORT_NOTICE_SUP = (1<<22),
+ IB_PORT_BOOT_MGMT_SUP = (1<<23)
};
struct ib_port {
- enum ib_port_state state;
- enum ib_mtu max_mtu;
- int port_cap_flags;
- int gid_tbl_len;
- u32 max_msg_sz;
- u32 bad_pkey_cntr;
- u32 qkey_viol_cntr;
- u16 pkey_tbl_len;
- u16 lid;
- u16 sm_lid;
- u8 lmc;
- u8 max_vl_num;
- u8 sm_sl;
- u8 subnet_timeout;
- u8 init_type_reply;
-};
+ enum ib_port_state state;
+ enum ib_mtu max_mtu;
+ enum ib_mtu active_mtu;
+ int port_cap_flags;
+ int gid_tbl_len;
+ u32 max_msg_sz;
+ u32 bad_pkey_cntr;
+ u32 qkey_viol_cntr;
+ u16 pkey_tbl_len;
+ u16 lid;
+ u16 sm_lid;
+ u8 lmc;
+ u8 max_vl_num;
+ u8 sm_sl;
+ u8 subnet_timeout;
+ u8 init_type_reply;
+};
enum ib_device_attr_flags {
- IB_DEVICE_SM = 1,
- IB_DEVICE_SNMP_TUN_SUP = (1 << 1),
- IB_DEVICE_DM_SUP = (1 << 2),
- IB_DEVICE_VENDOR_CLS_SUP = (1 << 3),
- IB_DEVICE_RESET_QKEY_CNTR = (1 << 4)
+ IB_DEVICE_SM = 1,
+ IB_DEVICE_SNMP_TUN_SUP = (1<<1),
+ IB_DEVICE_DM_SUP = (1<<2),
+ IB_DEVICE_VENDOR_CLS_SUP = (1<<3),
+ IB_DEVICE_RESET_QKEY_CNTR = (1<<4)
};
union ib_gid {
- u8 raw[16];
+ u8 raw[16];
struct {
-#if __BIG_ENDIAN
- u64 subnet_prefix;
- u64 interface_id;
-#else
- u64 interface_id;
- u64 subnet_prefix;
-#endif
+ u64 subnet_prefix;
+ u64 interface_id;
} global;
};
struct ib_global_route {
- union ib_gid dgid;
- u32 flow_label;
- u8 sgid_index;
- u8 hop_limit;
- u8 traffic_class;
+ union ib_gid dgid;
+ u32 flow_label;
+ u8 sgid_index;
+ u8 hop_limit;
+ u8 traffic_class;
};
struct ib_ah_attr {
- struct ib_global_route grh;
- u16 dlid;
- u8 sl;
- u8 src_path_bits;
- u8 static_rate;
- u8 grh_flag;
- u8 port;
+ struct ib_global_route grh;
+ u16 dlid;
+ u8 sl;
+ u8 src_path_bits;
+ u8 static_rate;
+ u8 grh_flag;
+ u8 port;
};
struct ib_qp_cap {
- u32 max_send_wr;
- u32 max_recv_wr;
- u32 max_send_sge;
- u32 max_recv_sge;
- u32 max_inline_data;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
};
enum ib_sig_type {
@@ -291,38 +314,46 @@
};
struct ib_qp_init_attr {
- void *qp_context;
- struct ib_cq *send_cq;
- struct ib_cq *recv_cq;
- struct ib_srq *srq;
- struct ib_qp_cap cap;
- enum ib_sig_type sq_sig_type;
- enum ib_sig_type rq_sig_type;
- enum ib_qp_type qp_type;
- u8 port_num; /* special QP types only */
+ void *qp_context;
+ struct ib_cq *send_cq;
+ struct ib_cq *recv_cq;
+ struct ib_srq *srq;
+ struct ib_qp_cap cap;
+ enum ib_sig_type sq_sig_type;
+ enum ib_sig_type rq_sig_type;
+ enum ib_qp_type qp_type;
+ u8 port_num; /* special QP types only */
};
+enum ib_access_flags {
+ IB_ACCESS_REMOTE_WRITE = 1,
+ IB_ACCESS_REMOTE_READ = (1<<1),
+ IB_ACCESS_REMOTE_ATOMIC = (1<<2),
+ IB_ACCESS_LOCAL_WRITE = (1<<3),
+ IB_ACCESS_MW_BIND = (1<<4)
+};
+
enum ib_qp_attr_mask {
- IB_QP_STATE = 1,
- IB_QP_EN_SQD_ASYNC_NOTIFY = (1 << 1),
- IB_QP_REMOTE_ATOMIC_FLAGS = (1 << 3),
- IB_QP_PKEY_INDEX = (1 << 4),
- IB_QP_PORT = (1 << 5),
- IB_QP_QKEY = (1 << 6),
- IB_QP_AV = (1 << 7),
- IB_QP_PATH_MTU = (1 << 8),
- IB_QP_TIMEOUT = (1 << 9),
- IB_QP_RETRY_CNT = (1 << 10),
- IB_QP_RNR_RETRY = (1 << 11),
- IB_QP_RQ_PSN = (1 << 12),
- IB_QP_MAX_QP_RD_ATOMIC = (1 << 13),
- IB_QP_ALT_PATH = (1 << 14),
- IB_QP_MIN_RNR_TIMER = (1 << 15),
- IB_QP_SQ_PSN = (1 << 16),
- IB_QP_MAX_DEST_RD_ATOMIC = (1 << 17),
- IB_QP_PATH_MIG_STATE = (1 << 18),
- IB_QP_CAP = (1 << 19),
- IB_QP_DEST_QPN = (1 << 20)
+ IB_QP_STATE = 1,
+ IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<1),
+ IB_QP_ACCESS_FLAGS = (1<<3),
+ IB_QP_PKEY_INDEX = (1<<4),
+ IB_QP_PORT = (1<<5),
+ IB_QP_QKEY = (1<<6),
+ IB_QP_AV = (1<<7),
+ IB_QP_PATH_MTU = (1<<8),
+ IB_QP_TIMEOUT = (1<<9),
+ IB_QP_RETRY_CNT = (1<<10),
+ IB_QP_RNR_RETRY = (1<<11),
+ IB_QP_RQ_PSN = (1<<12),
+ IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
+ IB_QP_ALT_PATH = (1<<14),
+ IB_QP_MIN_RNR_TIMER = (1<<15),
+ IB_QP_SQ_PSN = (1<<16),
+ IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
+ IB_QP_PATH_MIG_STATE = (1<<18),
+ IB_QP_CAP = (1<<19),
+ IB_QP_DEST_QPN = (1<<20)
};
enum ib_qp_state {
@@ -341,81 +372,111 @@
IB_MIG_ARMED
};
+enum ib_rnr_timeout {
+ IB_RNR_TIMER_655_36 = 0,
+ IB_RNR_TIMER_000_01 = 1,
+ IB_RNR_TIMER_000_02 = 2,
+ IB_RNR_TIMER_000_03 = 3,
+ IB_RNR_TIMER_000_04 = 4,
+ IB_RNR_TIMER_000_06 = 5,
+ IB_RNR_TIMER_000_08 = 6,
+ IB_RNR_TIMER_000_12 = 7,
+ IB_RNR_TIMER_000_16 = 8,
+ IB_RNR_TIMER_000_24 = 9,
+ IB_RNR_TIMER_000_32 = 10,
+ IB_RNR_TIMER_000_48 = 11,
+ IB_RNR_TIMER_000_64 = 12,
+ IB_RNR_TIMER_000_96 = 13,
+ IB_RNR_TIMER_001_28 = 14,
+ IB_RNR_TIMER_001_92 = 15,
+ IB_RNR_TIMER_002_56 = 16,
+ IB_RNR_TIMER_003_84 = 17,
+ IB_RNR_TIMER_005_12 = 18,
+ IB_RNR_TIMER_007_68 = 19,
+ IB_RNR_TIMER_010_24 = 20,
+ IB_RNR_TIMER_015_36 = 21,
+ IB_RNR_TIMER_020_48 = 22,
+ IB_RNR_TIMER_030_72 = 23,
+ IB_RNR_TIMER_040_96 = 24,
+ IB_RNR_TIMER_061_44 = 25,
+ IB_RNR_TIMER_081_92 = 26,
+ IB_RNR_TIMER_122_88 = 27,
+ IB_RNR_TIMER_163_84 = 28,
+ IB_RNR_TIMER_245_76 = 29,
+ IB_RNR_TIMER_327_68 = 30,
+ IB_RNR_TIMER_491_52 = 31
+};
+
struct ib_qp_attr {
- enum ib_qp_state qp_state;
- enum ib_mtu path_mtu;
- enum ib_mig_state path_mig_state;
- u32 qkey;
- u32 rq_psn;
- u32 sq_psn;
- u32 dest_qp_num;
- int remote_atomic_flags;
- struct ib_qp_cap cap;
- struct ib_ah_attr ah_attr;
- struct ib_ah_attr alt_ah_attr;
- u16 pkey_index;
- u16 alt_pkey_index;
- u8 en_sqd_async_notify;
- u8 sq_draining;
- u8 max_rd_atomic;
- u8 max_dest_rd_atomic;
- u8 min_rnr_timer;
- u8 port;
- u8 timeout;
- u8 retry_cnt;
- u8 rnr_retry;
- u8 alt_port;
- u8 alt_timeout;
+ enum ib_qp_state qp_state;
+ enum ib_mtu path_mtu;
+ enum ib_mig_state path_mig_state;
+ u32 qkey;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 dest_qp_num;
+ int qp_access_flags;
+ struct ib_qp_cap cap;
+ struct ib_ah_attr ah_attr;
+ struct ib_ah_attr alt_ah_attr;
+ u16 pkey_index;
+ u16 alt_pkey_index;
+ u8 en_sqd_async_notify;
+ u8 sq_draining;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ u8 min_rnr_timer;
+ u8 port;
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u8 alt_port;
+ u8 alt_timeout;
};
enum ib_srq_attr_mask {
- IB_SRQ_PD = 1,
- IB_SRQ_MAX_WR = (1 << 1),
- IB_SRQ_MAX_SGE = (1 << 2),
- IB_SRQ_LIMIT = (1 << 3)
+ IB_SRQ_PD = 1,
+ IB_SRQ_MAX_WR = (1<<1),
+ IB_SRQ_MAX_SGE = (1<<2),
+ IB_SRQ_LIMIT = (1<<3)
};
struct ib_srq_attr {
- void *srq_context;
- int max_wr;
- int max_sge;
- int srq_limit;
+ int max_wr;
+ int max_sge;
+ int srq_limit;
};
-enum ib_mr_access_flags {
- IB_MR_LOCAL_WRITE = 1,
- IB_MR_REMOTE_WRITE = (1 << 1),
- IB_MR_REMOTE_READ = (1 << 2),
- IB_MR_REMOTE_ATOMIC = (1 << 3),
- IB_MR_MW_BIND = (1 << 4)
-};
-
struct ib_phys_buf {
- u64 addr;
- u64 size;
+ u64 addr;
+ u64 size;
};
struct ib_mr_attr {
- struct ib_pd *pd;
- u64 device_virt_addr;
- u64 size;
- int mr_access_flags;
- u32 lkey;
- u32 rkey;
+ u64 device_virt_addr;
+ u64 size;
+ int mr_access_flags;
};
enum ib_mr_rereg_flags {
- IB_MR_REREG_TRANS = 1,
- IB_MR_REREG_PD = (1 << 1),
- IB_MR_REREG_ACCESS = (1 << 2)
+ IB_MR_REREG_TRANS = 1,
+ IB_MR_REREG_PD = (1<<1),
+ IB_MR_REREG_ACCESS = (1<<2)
};
-struct ib_mw_bind;
+struct ib_mw_bind {
+ struct ib_mr *mr;
+ u64 wr_id;
+ u64 addr;
+ u32 length;
+ int send_flags;
+ int mw_access_flags;
+};
struct ib_fmr_attr {
- int max_pages;
- int max_maps;
- u8 page_size;
+ int max_pages;
+ int max_maps;
+ u8 page_size;
};
enum ib_wr_opcode {
@@ -429,52 +490,52 @@
};
enum ib_send_flags {
- IB_SEND_FENCE = 1,
- IB_SEND_SIGNALED = (1 << 1),
- IB_SEND_SOLICITED = (1 << 2),
- IB_SEND_INLINE = (1 << 3)
+ IB_SEND_FENCE = 1,
+ IB_SEND_SIGNALED = (1<<1),
+ IB_SEND_SOLICITED = (1<<2),
+ IB_SEND_INLINE = (1<<3)
};
struct ib_sge {
- u64 addr;
- u32 length;
- u32 lkey;
+ u64 addr;
+ u32 length;
+ u32 lkey;
};
struct ib_send_wr {
- struct ib_send_wr *next;
- u64 wr_id;
- struct ib_sge *sg_list;
- int num_sge;
- enum ib_wr_opcode opcode;
- int send_flags;
- u32 imm_data;
+ struct list_head list;
+ u64 wr_id;
+ struct ib_sge *sg_list;
+ int num_sge;
+ enum ib_wr_opcode opcode;
+ int send_flags;
+ u32 imm_data;
union {
struct {
- u64 remote_addr;
- u32 rkey;
+ u64 remote_addr;
+ u32 rkey;
} rdma;
struct {
- u64 remote_addr;
- u64 compare_add;
- u64 swap;
- u32 rkey;
+ u64 remote_addr;
+ u64 compare_add;
+ u64 swap;
+ u32 rkey;
} atomic;
struct {
- struct ib_ah *ah;
- u32 remote_qpn;
- u32 remote_qkey;
- u16 pkey_index;
+ struct ib_ah *ah;
+ u32 remote_qpn;
+ u32 remote_qkey;
+ u16 pkey_index; /* valid for GSI only */
} ud;
} wr;
};
struct ib_recv_wr {
- struct _ib_recv_wr *next;
- u64 wr_id;
- struct ib_sge *sg_list;
- int num_sge;
- int recv_flags;
+ struct list_head list;
+ u64 wr_id;
+ struct ib_sge *sg_list;
+ int num_sge;
+ int recv_flags;
};
enum ib_wc_status {
@@ -497,7 +558,8 @@
IB_WC_REM_ABORT_ERR,
IB_WC_INV_EECN_ERR,
IB_WC_INV_EEC_STATE_ERR,
- IB_WC_GENERAL_ERR
+ IB_WC_GENERAL_ERR,
+ IB_WC_RESP_TIMEOUT_ERR
};
enum ib_wc_opcode {
@@ -511,24 +573,24 @@
* Set value of IB_WC_RECV so consumers can test if a completion is a
* receive by testing (opcode & IB_WC_RECV).
*/
- IB_WC_RECV = (1 << 7),
+ IB_WC_RECV = (1<<7),
IB_WC_RECV_RDMA_WITH_IMM
};
struct ib_wc {
- u64 wr_id;
- enum ib_wc_status status;
- enum ib_wc_opcode opcode;
- u32 vendor_err;
- u32 byte_len;
- u32 imm_data;
- u32 src_qp;
- u16 pkey_index;
- int grh_flag:1;
- int imm_data_valid:1;
- u16 slid;
- u8 sl;
- u8 dlid_path_bits;
+ u64 wr_id;
+ enum ib_wc_status status;
+ enum ib_wc_opcode opcode;
+ u32 vendor_err;
+ u32 byte_len;
+ u32 imm_data;
+ u32 src_qp;
+ u16 pkey_index;
+ int grh_flag:1;
+ int imm_data_valid:1;
+ u16 slid;
+ u8 sl;
+ u8 dlid_path_bits;
};
enum ib_cq_notify {
@@ -536,129 +598,431 @@
IB_CQ_NEXT_COMP
};
-int ib_query_device(struct ib_device *device,
- struct ib_device_cap *device_cap);
+enum ib_process_mad_flags {
+ IB_MAD_IGNORE_MKEY = 1
+};
-int ib_query_port(struct ib_device *device,
- u8 port_num, struct ib_port *port);
+#define IB_DEVICE_NAME_MAX 64
-int ib_query_gid(struct ib_device *device,
- u8 port_num,
- int index, union ib_gid *gid);
+struct ib_device {
+ struct module *owner;
+ struct pci_dev *dma_device;
-int ib_query_pkey(struct ib_device *device,
- u8 port_num,
- u16 index, u16 *pkey);
+ char name[IB_DEVICE_NAME_MAX];
+ char *provider;
+ void *private;
+ struct list_head core_list;
+ void *core;
+ void *mad;
+ u32 flags;
-int ib_modify_device(struct ib_device *device,
- u8 port_num, int device_attr_flags);
+ int (*query_device)(struct ib_device *device,
+ struct ib_device_cap *device_cap);
+ int (*query_port)(struct ib_device *device,
+ u8 port_num, struct ib_port *port);
+ int (*query_gid)(struct ib_device *device,
+ u8 port_num, int index,
+ union ib_gid *gid);
+ int (*query_pkey)(struct ib_device *device,
+ u8 port_num, u16 index, u16 *pkey);
+ int (*modify_device)(struct ib_device *device,
+ u8 port_num, int device_attr_flags);
+ struct ib_pd * (*alloc_pd)(struct ib_device *device);
+ int (*dealloc_pd)(struct ib_pd *pd);
+ struct ib_ah * (*create_ah)(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr);
+ int (*modify_ah)(struct ib_ah *ah,
+ struct ib_ah_attr *ah_attr);
+ int (*query_ah)(struct ib_ah *ah,
+ struct ib_ah_attr *ah_attr);
+ int (*destroy_ah)(struct ib_ah *ah);
+ struct ib_qp * (*create_qp)(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_qp_cap *qp_cap);
+ int (*modify_qp)(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_cap *qp_cap);
+ int (*query_qp)(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+ int (*destroy_qp)(struct ib_qp *qp);
+ struct ib_srq * (*create_srq)(struct ib_pd *pd,
+ void *srq_context,
+ struct ib_srq_attr *srq_attr);
+ int (*query_srq)(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr);
+ int (*modify_srq)(struct ib_srq *srq,
+ struct ib_pd *pd,
+ struct ib_srq_attr *srq_attr,
+ int srq_attr_mask);
+ int (*destroy_srq)(struct ib_srq *srq);
+ int (*post_srq)(struct ib_srq *srq,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
+ struct ib_cq * (*create_cq)(struct ib_device *device,
+ ib_comp_handler comp_handler,
+ void *cq_context, int cqe);
+ int (*resize_cq)(struct ib_cq *cq, int cqe);
+ int (*destroy_cq)(struct ib_cq *cq);
+ struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ int mr_access_flags,
+ u64 *iova_start);
+ int (*query_mr)(struct ib_mr *mr,
+ struct ib_mr_attr *mr_attr);
+ int (*dereg_mr)(struct ib_mr *mr);
+ int (*rereg_phys_mr)(struct ib_mr *mr,
+ int mr_rereg_mask,
+ struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ int mr_access_flags,
+ u64 *iova_start);
+ struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
+ int (*bind_mw)(struct ib_qp *qp,
+ struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind);
+ int (*dealloc_mw)(struct ib_mw *mw);
+ struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
+ int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+ int (*map_fmr)(struct ib_fmr *fmr, void *addr, u64 size);
+ int (*map_phys_fmr)(struct ib_fmr *fmr,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf);
+ int (*unmap_fmr)(struct ib_fmr **fmr_array, int fmr_cnt);
+ int (*free_fmr)(struct ib_fmr *fmr);
+ int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid,
+ u16 lid);
+ int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid,
+ u16 lid);
+ int (*post_send)(struct ib_qp *qp,
+ struct ib_send_wr *send_wr,
+ struct ib_send_wr **bad_send_wr);
+ int (*post_recv)(struct ib_qp *qp,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
+ int (*poll_cq)(struct ib_cq *cq,
+ int num_entries,
+ struct ib_wc *wc_array);
+ int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
+ int (*req_notify_cq)(struct ib_cq *cq,
+ enum ib_cq_notify cq_notify);
+ int (*req_n_notify_cq)(struct ib_cq *cq, int wc_cnt);
+ int (*process_mad)(struct ib_device *device,
+ int process_mad_flags,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad);
-struct ib_pd *ib_alloc_pd(struct ib_device *device);
+ struct class_device class_dev;
+};
-int ib_dealloc_pd(struct ib_pd *pd);
+static inline int ib_query_device(struct ib_device *device,
+ struct ib_device_cap *device_cap)
+{
+ return device->query_device(device, device_cap);
+}
-struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+static inline int ib_query_port(struct ib_device *device,
+ u8 port_num,
+ struct ib_port *port)
+{
+ return device->query_port(device, port_num, port);
+}
-int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+static inline int ib_query_gid(struct ib_device *device,
+ u8 port_num,
+ int index,
+ union ib_gid *gid)
+{
+ return device->query_gid(device, port_num, index, gid);
+}
-int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+static inline int ib_query_pkey(struct ib_device *device,
+ u8 port_num,
+ u16 index,
+ u16 *pkey)
+{
+ return device->query_pkey(device, port_num, index, pkey);
+}
-int ib_destroy_ah(struct ib_ah *ah);
+static inline int ib_modify_device(struct ib_device *device,
+ u8 port_num,
+ int device_attr_flags)
+{
+ return device->modify_device(device, port_num, device_attr_flags);
+}
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_qp_cap *qp_cap);
+static inline struct ib_pd *ib_alloc_pd(struct ib_device *device)
+{
+ return device->alloc_pd(device);
+}
-int ib_modify_qp(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_cap *qp_cap);
+static inline int ib_dealloc_pd(struct ib_pd *pd)
+{
+ return pd->device->dealloc_pd(pd);
+}
-int ib_query_qp(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+static inline struct ib_ah *ib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ return pd->device->create_ah(pd, ah_attr);
+}
-int ib_destroy_qp(struct ib_qp *qp);
+static inline int ib_modify_ah(struct ib_ah *ah,
+ struct ib_ah_attr *ah_attr)
+{
+ return ah->device->modify_ah(ah, ah_attr);
+}
-struct ib_srq *ib_create_srq(struct ib_pd *pd, struct ib_srq_attr *srq_attr);
+static inline int ib_query_ah(struct ib_ah *ah,
+ struct ib_ah_attr *ah_attr)
+{
+ return ah->device->query_ah(ah, ah_attr);
+}
-int ib_query_srq(struct ib_srq *srq,
- struct ib_pd **pd, struct ib_srq_attr *srq_attr);
+static inline int ib_destroy_ah(struct ib_ah *ah)
+{
+ return ah->device->destroy_ah(ah);
+}
-int ib_modify_srq(struct ib_srq *srq,
- struct ib_pd *pd,
- struct ib_srq_attr *srq_attr, int srq_attr_mask);
+static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_qp_cap *qp_cap)
+{
+ return pd->device->create_qp(pd, qp_init_attr, qp_cap);
+}
-int ib_destroy_srq(struct ib_srq *srq);
+static inline int ib_modify_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_cap *qp_cap)
+{
+ return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, qp_cap);
+}
-int ib_post_srq(struct ib_srq *srq,
- struct ib_recv_wr *recv_wr, struct ib_recv_wr **bad_recv_wr);
+static inline int ib_query_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ return qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr);
+}
-struct ib_cq *ib_create_cq(struct ib_device *device,
- ib_comp_handler comp_handler,
- void *cq_context, int cqe);
+static inline int ib_destroy_qp(struct ib_qp *qp)
+{
+ return qp->device->destroy_qp(qp);
+}
-int ib_resize_cq(struct ib_cq *cq, int cqe);
+static inline struct ib_srq *ib_create_srq(struct ib_pd *pd,
+ void *srq_context,
+ struct ib_srq_attr *srq_attr)
+{
+ return pd->device->create_srq(pd, srq_context, srq_attr);
+}
-int ib_destroy_cq(struct ib_cq *cq);
+static inline int ib_query_srq(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr)
+{
+ return srq->device->query_srq(srq, srq_attr);
+}
+static inline int ib_modify_srq(struct ib_srq *srq,
+ struct ib_pd *pd,
+ struct ib_srq_attr *srq_attr,
+ int srq_attr_mask)
+{
+ return srq->device->modify_srq(srq, pd, srq_attr, srq_attr_mask);
+}
+
+static inline int ib_destroy_srq(struct ib_srq *srq)
+{
+ return srq->device->destroy_srq(srq);
+}
+
+static inline int ib_post_srq(struct ib_srq *srq,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ return srq->device->post_srq(srq, recv_wr, bad_recv_wr);
+}
+
+static inline struct ib_cq *ib_create_cq(struct ib_device *device,
+ ib_comp_handler comp_handler,
+ void *cq_context,
+ int cqe)
+{
+ return device->create_cq(device, comp_handler, cq_context, cqe);
+}
+
+static inline int ib_resize_cq(struct ib_cq *cq,
+ int cqe)
+{
+ return cq->device->resize_cq(cq, cqe);
+}
+
+static inline int ib_destroy_cq(struct ib_cq *cq)
+{
+ return cq->device->destroy_cq(cq);
+}
+
/* in functions below iova_start is in/out parameter */
-struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start);
+static inline struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ int mr_access_flags,
+ u64 *iova_start)
+{
+ return pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
+ mr_access_flags, iova_start);
+}
-int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
+static inline int ib_query_mr(struct ib_mr *mr,
+ struct ib_mr_attr *mr_attr)
+{
+ return mr->device->query_mr(mr, mr_attr);
+}
-int ib_dereg_mr(struct ib_mr *mr);
+static inline int ib_dereg_mr(struct ib_mr *mr)
+{
+ return mr->device->dereg_mr(mr);
+}
-int ib_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start);
+static inline int ib_rereg_phys_mr(struct ib_mr *mr,
+ int mr_rereg_mask,
+ struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ int mr_access_flags,
+ u64 *iova_start)
+{
+ return mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, phys_buf_array,
+ num_phys_buf, mr_access_flags,
+ iova_start);
+}
-struct ib_mw *ib_alloc_mw(struct ib_pd *pd, u32 * rkey);
+static inline struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
+{
+ return pd->device->alloc_mw(pd);
+}
-int ib_query_mw(struct ib_mw *mw, u32 * rkey, struct ib_pd **pd);
+static inline int ib_bind_mw(struct ib_qp *qp,
+ struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind)
+{
+ return mw->device->bind_mw(qp, mw, mw_bind);
+}
-int ib_bind_mw(struct ib_qp *qp,
- struct ib_mw *mw, struct ib_mw_bind *mw_bind);
+static inline int ib_dealloc_mw(struct ib_mw *mw)
+{
+ return mw->device->dealloc_mw(mw);
+}
-int ib_dealloc_mw(struct ib_mw *mw);
+static inline struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
+ int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ return pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
+}
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags, struct ib_fmr_attr *fmr_attr);
+static inline int ib_map_fmr(struct ib_fmr *fmr,
+ void *addr,
+ u64 size)
+{
+ return fmr->device->map_fmr(fmr, addr, size);
+}
-int ib_map_fmr(struct ib_fmr *fmr,
- void *addr, u64 size);
+static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf)
+{
+ return fmr->device->map_phys_fmr(fmr, phys_buf_array, num_phys_buf);
+}
-int ib_map_phys_fmr(struct ib_fmr *fmr,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf, u32 * lkey, u32 * rkey);
+static inline int ib_unmap_fmr(struct ib_fmr **fmr_array,
+ int fmr_cnt)
+{
+ /* Requires all FMRs to come from same device. */
+ return fmr_array[0]->device->unmap_fmr(fmr_array, fmr_cnt);
+}
-int ib_unmap_fmr(struct ib_fmr **fmr_array, int fmr_cnt);
+static inline int ib_free_fmr(struct ib_fmr *fmr)
+{
+ return fmr->device->free_fmr(fmr);
+}
-int ib_free_fmr(struct ib_fmr *fmr);
+static inline int ib_attach_mcast(struct ib_qp *qp,
+ union ib_gid *gid,
+ u16 lid)
+{
+ return qp->device->attach_mcast(qp, gid, lid);
+}
-int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+static inline int ib_detach_mcast(struct ib_qp *qp,
+ union ib_gid *gid,
+ u16 lid)
+{
+ return qp->device->detach_mcast(qp, gid, lid);
+}
-int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+static inline int ib_post_send(struct ib_qp *qp,
+ struct ib_send_wr *send_wr,
+ struct ib_send_wr **bad_send_wr)
+{
+ return qp->device->post_send(qp, send_wr, bad_send_wr);
+}
-int ib_post_send(struct ib_qp *qp,
- struct ib_send_wr *send_wr, struct ib_send_wr **bad_send_wr);
+static inline int ib_post_recv(struct ib_qp *qp,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
+}
-int ib_post_recv(struct ib_qp *qp,
- struct ib_recv_wr *recv_wr, struct ib_recv_wr **bad_recv_wr);
+/**
+ * ib_poll_cq - poll a CQ for completion(s)
+ * @cq:the CQ being polled
+ * @num_entries:maximum number of completions to return
+ * @wc:array of at least @num_entries &struct ib_wc where completions
+ * will be returned
+ *
+ * Poll a CQ for (possibly multiple) completions. If the return value
+ * is < 0, an error occurred. If the return value is >= 0, it is the
+ * number of completions returned. If the return value is
+ * non-negative and < num_entries, then the CQ was emptied.
+ */
+static inline int ib_poll_cq(struct ib_cq *cq,
+ int num_entries,
+ struct ib_wc *wc_array)
+{
+ return cq->device->poll_cq(cq, num_entries, wc_array);
+}
-int ib_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc_array);
+static inline int ib_peek_cq(struct ib_cq *cq,
+ int wc_cnt)
+{
+ return cq->device->peek_cq(cq, wc_cnt);
+}
-int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
+/**
+ * ib_req_notify_cq - request completion notification
+ * @cq:the CQ to generate an event for
+ * @cq_notify:%IB_CQ_SOLICITED for next solicited event,
+ * %IB_CQ_NEXT_COMP for any completion.
+ */
+static inline int ib_req_notify_cq(struct ib_cq *cq,
+ enum ib_cq_notify cq_notify)
+{
+ return cq->device->req_notify_cq(cq, cq_notify);
+}
-int ib_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify);
+static inline int ib_req_n_notify_cq(struct ib_cq *cq,
+ int wc_cnt)
+{
+ return cq->device->req_n_notify_cq(cq, wc_cnt);
+}
-int ib_req_n_notify_cq(struct ib_cq *cq, int wc_cnt);
-
-#endif /* IB_VERBS_H */
+#endif /* IB_VERBS_H */
Index: include/ib_core.h
===================================================================
--- include/ib_core.h (revision 562)
+++ include/ib_core.h (working copy)
@@ -24,10 +24,6 @@
#include "ib_core_types.h"
-struct ib_device {
- char name[IB_DEVICE_NAME_MAX];
-};
-
struct ib_device *ib_device_get_by_name(const char *name);
struct ib_device *ib_device_get_by_index(int index);
Index: include/ib_core_types.h
===================================================================
--- include/ib_core_types.h (revision 624)
+++ include/ib_core_types.h (working copy)
@@ -22,8 +22,6 @@
#ifndef _IB_CORE_TYPES_H
#define _IB_CORE_TYPES_H
-#define IB_DEVICE_NAME_MAX 64
-
enum {
IB_DEVICE_NOTIFIER_ADD,
IB_DEVICE_NOTIFIER_REMOVE
More information about the general
mailing list