[ofw] [RFC] [PATCH] CI/verbs: set callback handlers per object

Sean Hefty sean.hefty at intel.com
Tue Apr 29 15:24:36 PDT 2008


The underlying HCA drivers support setting completion handlers per CQ and
event handers per CQ, QP, or SRQ.  Expose this capability through the kernel
verb channel interface.  This allows the HCA driver to callback users
directly, rather than going through indirect calls.

The patch has a nice side effect of reducing the code base, and helps to
support multiple filter drivers accessing the HCA verb interface.

Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
Combined with the query interface patch, this allows an HCA to support
multiple, simultaneous filter drivers.  Only one of the filter drivers can
call open_ca and receive device level events, but device level events are
generic (such as port up/down) and not tied to a specific user's context.

A future enhancement to the CI is to allow multiple drivers to register
for device events.  This too is something that the HCA driver has the
framework to support, but is not exposed through the CI. 


Index: core/al/al_verbs.h
===================================================================
--- core/al/al_verbs.h	(revision 1006)
+++ core/al/al_verbs.h	(working copy)
@@ -72,6 +72,9 @@
 	h_ca->obj.p_ci_ca->verbs.modify_ca( h_ca->obj.p_ci_ca->h_ci_ca,\
 		port_num, ca_mod, p_port_attr_mod )
 
+void ci_ca_comp_cb(void *cq_context);
+void ci_ca_async_event_cb(ib_event_rec_t* p_event_record);
+
 static inline ib_api_status_t
 verbs_create_cq(
 	IN		const	ib_ca_handle_t				h_ca,
@@ -81,7 +84,8 @@
 {
 	return h_ca->obj.p_ci_ca->verbs.create_cq(
 		(p_umv_buf) ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca,
-		h_cq, &p_cq_create->size, &h_cq->h_ci_cq, p_umv_buf );
+		h_cq, ci_ca_async_event_cb, ci_ca_comp_cb, &p_cq_create->size,
+		&h_cq->h_ci_cq, p_umv_buf );
 }
 
 #define verbs_check_cq(h_cq)	((h_cq)->h_ci_cq)
@@ -241,7 +245,7 @@
 	ib_api_status_t		status;
 
 	status = h_srq->obj.p_ci_ca->verbs.create_srq(
-		h_pd->h_ci_pd, h_srq, p_srq_attr,
+		h_pd->h_ci_pd, h_srq, ci_ca_async_event_cb, p_srq_attr,
 		&h_srq->h_ci_srq, p_umv_buf );
 
 	h_srq->h_recv_srq = h_srq->h_ci_srq;
@@ -286,7 +290,7 @@
 	ib_api_status_t		status;
 
 	status = h_qp->obj.p_ci_ca->verbs.create_spl_qp(
-		h_pd->h_ci_pd, port_num, h_qp, p_qp_create,
+		h_pd->h_ci_pd, port_num, h_qp, ci_ca_async_event_cb, p_qp_create,
 		p_qp_attr, &h_qp->h_ci_qp );
 
 	h_qp->h_recv_qp = h_qp->h_ci_qp;
@@ -309,7 +313,7 @@
 	ib_api_status_t		status;
 
 	status = h_qp->obj.p_ci_ca->verbs.create_qp(
-		h_pd->h_ci_pd, h_qp, p_qp_create, p_qp_attr,
+		h_pd->h_ci_pd, h_qp, ci_ca_async_event_cb, p_qp_create, p_qp_attr,
 		&h_qp->h_ci_qp, p_umv_buf );
 
 	h_qp->h_recv_qp = h_qp->h_ci_qp;
Index: core/al/kernel/al_ci_ca.c
===================================================================
--- core/al/kernel/al_ci_ca.c	(revision 1006)
+++ core/al/kernel/al_ci_ca.c	(working copy)
@@ -76,7 +76,7 @@
 
 void
 ci_ca_async_event_cb(
-	IN		const	ib_event_rec_t* const		p_event_record );
+	IN				ib_event_rec_t*				p_event_record );
 
 
 
@@ -154,7 +154,7 @@
 	p_ci_ca->dereg_async_item.pfn_callback = ci_ca_async_proc_cb;
 
 	/* Open the CI CA. */
-	status = p_ci_ca->verbs.open_ca( p_ci_ca->verbs.guid, ci_ca_comp_cb,
+	status = p_ci_ca->verbs.open_ca( p_ci_ca->verbs.guid,
 		ci_ca_async_event_cb, p_ci_ca, &p_ci_ca->h_ci_ca );
 	if( status != IB_SUCCESS )
 	{
@@ -335,7 +335,7 @@
  */
 void
 ci_ca_async_event_cb(
-	IN		const	ib_event_rec_t* const		p_event_record )
+	IN		ib_event_rec_t*		p_event_record )
 {
 	ib_async_event_rec_t	event_rec;
 
Index: hw/mthca/kernel/hca_data.c
===================================================================
--- hw/mthca/kernel/hca_data.c	(revision 1006)
+++ hw/mthca/kernel/hca_data.c	(working copy)
@@ -137,7 +137,6 @@
 ib_api_status_t
 mlnx_hobs_set_cb(
 	IN				mlnx_hob_t					*hob_p, 
-	IN				ci_completion_cb_t			comp_cb_p,
 	IN				ci_async_event_cb_t			async_cb_p,
 	IN		const	void* const					ib_context)
 {
@@ -162,7 +161,6 @@
 		}
 	}
 
-	hob_p->comp_cb_p	= comp_cb_p;
 	hob_p->async_cb_p = async_cb_p;
 	hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL
 	HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array),
ib_context));
@@ -185,7 +183,6 @@
 	p_async_proc = hob_p->async_proc_mgr_p;
 	hob_p->async_proc_mgr_p = NULL;
 
-	hob_p->comp_cb_p = NULL;
 	hob_p->async_cb_p = NULL;
 	hob_p->ca_context = NULL;
 	hob_p->cl_device_h = NULL;
@@ -330,21 +327,6 @@
 	}
 }
 
-void cq_comp_handler(struct ib_cq *cq, void *context)
-{
-	mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
-	struct mthca_cq *mcq =(struct mthca_cq *)cq; 
-	HCA_ENTER(HCA_DBG_CQ);
-	if (hob_p && hob_p->comp_cb_p) {
-		HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));
-		(hob_p->comp_cb_p)(mcq->cq_context);
-	}
-	else {
-		HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));
-	}
-	HCA_EXIT(HCA_DBG_CQ);
-}
-
 void ca_event_handler(struct ib_event *ev, void *context)
 {
 	mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
@@ -352,7 +334,6 @@
 
 	// prepare parameters
 	event_rec.context = (void *)hob_p->ca_context;
-	event_rec.trap.info.port_num = ev->element.port_num;
 	event_rec.type = ev->event;
 	if (event_rec.type > IB_AE_UNKNOWN) {
 		// CL_ASSERT(0); // This shouldn't happen
@@ -369,66 +350,6 @@
 	}
 }
 
-void srq_event_handler(struct ib_event *ev, void *context)
-{
-	mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
-	ib_event_rec_t event_rec;
-	struct mthca_srq *srq_p;
-
-	// prepare parameters
-	event_rec.type = ev->event;
-	event_rec.vendor_specific = ev->vendor_specific;
-	srq_p = (struct mthca_srq *)ev->element.srq;
-	event_rec.context = srq_p->srq_context;
-
-	// call the user callback
-	if (hob_p)
-		(hob_p->async_cb_p)(&event_rec);
-	else {
-		HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));
-	}
-}
-
-
-void qp_event_handler(struct ib_event *ev, void *context)
-{
-	mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
-	ib_event_rec_t event_rec;
-	struct mthca_qp *qp_p;
-
-	// prepare parameters
-	event_rec.type = ev->event;
-	event_rec.vendor_specific = ev->vendor_specific;
-	qp_p = (struct mthca_qp *)ev->element.qp;
-	event_rec.context = qp_p->qp_context;
-
-	// call the user callback
-	if (hob_p)
-		(hob_p->async_cb_p)(&event_rec);
-	else {
-		HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));
-	}
-}
-
-void cq_event_handler(struct ib_event *ev, void *context)
-{
-	mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
-	ib_event_rec_t event_rec;
-	struct mthca_cq *cq_p;
-
-	// prepare parameters
-	event_rec.type = ev->event;
-	cq_p = (struct mthca_cq *)ev->element.cq;
-	event_rec.context = cq_p->cq_context;
-
-	// call the user callback
-	if (hob_p)
-		(hob_p->async_cb_p)(&event_rec);
-	else {
-		HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));
-	}
-}
-
 ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps)
 {
 #define MAP_QPS(val1,val2) case val1: ib_qps = val2; break
@@ -841,7 +762,7 @@
 			&ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label );
 		err = ib_find_cached_gid((struct ib_device *)ib_dev_p, 
 			(union ib_gid	*)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index);
-		if (err) {
+		if (err) {
 
 			HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default:
sgid_index = 0\n", err, err));
 			gid_index = 0;
Index: hw/mthca/kernel/hca_data.h
===================================================================
--- hw/mthca/kernel/hca_data.h	(revision 1006)
+++ hw/mthca/kernel/hca_data.h	(working copy)
@@ -171,7 +171,6 @@
 
 typedef struct _ib_ca {
 	ENUM_MARK           mark;
-	ci_completion_cb_t  comp_cb_p;
 	ci_async_event_cb_t async_cb_p;
 	const void          *ca_context;
 	void                *cl_device_h;
@@ -275,7 +274,6 @@
 ib_api_status_t
 mlnx_hobs_set_cb(
 	IN				mlnx_hob_t					*hob_p, 
-	IN				ci_completion_cb_t			comp_cb_p,
 	IN				ci_async_event_cb_t			async_cb_p,
 	IN		const	void* const					ib_context);
 
@@ -351,16 +349,8 @@
 
 void unmap_crspace_for_all( struct ib_ucontext *p_context );
 
-void cq_comp_handler(struct ib_cq *cq, void *context);
-
 void ca_event_handler(struct ib_event *ev, void *context);
 
-void srq_event_handler(struct ib_event *ev, void *context);
-
-void qp_event_handler(struct ib_event *ev, void *context);
-
-void cq_event_handler(struct ib_event *ev, void *context);
-
 ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps);
 
 enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps);
Index: hw/mthca/kernel/hca_verbs.c
===================================================================
--- hw/mthca/kernel/hca_verbs.c	(revision 1006)
+++ hw/mthca/kernel/hca_verbs.c	(working copy)
@@ -59,7 +59,6 @@
 ib_api_status_t
 mlnx_open_ca (
 	IN		const	ib_net64_t					ca_guid, // IN  const char *
ca_name,
-	IN		const	ci_completion_cb_t			pfn_completion_cb,
 	IN		const	ci_async_event_cb_t			pfn_async_event_cb,
 	IN		const	void*const					ca_context,
 		OUT			ib_ca_handle_t				*ph_ca)
@@ -92,7 +91,6 @@
 	HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,
 		("context 0x%p\n", ca_context));
 	status = mlnx_hobs_set_cb(&p_hca->hob,
-		pfn_completion_cb,
 		pfn_async_event_cb,
 		ca_context);
 	if (IB_SUCCESS != status) {
@@ -837,6 +835,7 @@
 mlnx_create_srq (
 	IN		const	ib_pd_handle_t			h_pd,
 	IN		const	void						*srq_context,
+	IN				ci_async_event_cb_t		event_handler,
 	IN		const	ib_srq_attr_t * const		p_srq_attr,
 		OUT			ib_srq_handle_t			*ph_srq,
 	IN	OUT			ci_umv_buf_t				*p_umv_buf )
@@ -867,7 +866,7 @@
 
 	// prepare the parameters
 	RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));
-	srq_init_attr.event_handler = srq_event_handler;
+	srq_init_attr.event_handler = event_handler;
 	srq_init_attr.srq_context = hob_p;
 	srq_init_attr.attr = *p_srq_attr;
 
@@ -1004,6 +1003,7 @@
 	IN		const	ib_pd_handle_t				h_pd,
 	IN		const	uint8_t						port_num,
 	IN		const	void						*qp_context,
+	IN				ci_async_event_cb_t			event_handler,
 	IN		const	ib_qp_create_t				*p_create_attr,
 		OUT			ib_qp_attr_t				*p_qp_attr,
 		OUT			ib_qp_handle_t				*ph_qp,
@@ -1035,7 +1035,7 @@
 	// prepare the parameters
 	RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));
 	qp_init_attr.qp_type = p_create_attr->qp_type;
-	qp_init_attr.event_handler = qp_event_handler;
+	qp_init_attr.event_handler = event_handler;
 	qp_init_attr.qp_context = hob_p;
 	qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;
 	qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;
@@ -1097,6 +1097,7 @@
 	IN		const	ib_pd_handle_t				h_pd,
 	IN		const	uint8_t						port_num,
 	IN		const	void						*qp_context,
+	IN				ci_async_event_cb_t			event_handler,
 	IN		const	ib_qp_create_t				*p_create_attr,
 		OUT			ib_qp_attr_t				*p_qp_attr,
 		OUT			ib_qp_handle_t				*ph_qp )
@@ -1107,7 +1108,7 @@
 	HCA_ENTER(HCA_DBG_SHIM);
 
 	status =	_create_qp( h_pd, port_num,
-		qp_context, p_create_attr, p_qp_attr, ph_qp, NULL );
+		qp_context, event_handler, p_create_attr, p_qp_attr, ph_qp, NULL );
 		
 	if (status != IB_SUCCESS)
 	{
@@ -1122,6 +1123,7 @@
 mlnx_create_qp (
 	IN		const	ib_pd_handle_t				h_pd,
 	IN		const	void						*qp_context,
+	IN				ci_async_event_cb_t			event_handler,
 	IN		const	ib_qp_create_t				*p_create_attr,
 		OUT			ib_qp_attr_t				*p_qp_attr,
 		OUT			ib_qp_handle_t				*ph_qp,
@@ -1138,7 +1140,7 @@
 	HCA_ENTER(HCA_DBG_QP);
 
 	status = _create_qp( h_pd, port_num,
-		qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );
+		qp_context, event_handler, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );
 		
 	if (status != IB_SUCCESS)
 	{
@@ -1363,6 +1365,8 @@
 mlnx_create_cq (
 	IN		const	ib_ca_handle_t				h_ca,
 	IN		const	void						*cq_context,
+	IN				ci_async_event_cb_t			event_handler,
+	IN				ci_completion_cb_t			cq_comp_handler,
 	IN	OUT			uint32_t					*p_size,
 		OUT			ib_cq_handle_t				*ph_cq,
 	IN	OUT			ci_umv_buf_t				*p_umv_buf )
@@ -1405,7 +1409,7 @@
 
 	// allocate cq	
 	ib_cq_p = ibv_create_cq(ib_dev, 
-		cq_comp_handler, cq_event_handler,
+		cq_comp_handler, event_handler,
 		hob_p, *p_size, p_context, p_umv_buf );
 	if (IS_ERR(ib_cq_p)) {
 		err = PTR_ERR(ib_cq_p);
Index: hw/mthca/kernel/ib_verbs.h
===================================================================
--- hw/mthca/kernel/ib_verbs.h	(revision 1006)
+++ hw/mthca/kernel/ib_verbs.h	(working copy)
@@ -336,7 +336,7 @@
 };
 
 struct ib_srq_init_attr {
-	void					(*event_handler)(struct ib_event *, void *);
+	void					(*event_handler)(ib_event_rec_t *);
 	void					*srq_context;
 	ib_srq_attr_t			attr;
 };
@@ -355,7 +355,7 @@
 };
 
 struct ib_qp_init_attr {
-	void                  (*event_handler)(struct ib_event *, void *);
+	void                  (*event_handler)(ib_event_rec_t *);
 	void		       *qp_context;
 	struct ib_cq	       *send_cq;
 	struct ib_cq	       *recv_cq;
@@ -574,14 +574,14 @@
 	struct ib_ucontext      *ucontext;
 };
 
-typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
+typedef void (*ib_comp_handler)(void *cq_context);
 
 struct ib_cq {
 	struct ib_device       *device;
 	struct ib_ucontext	*ucontext;
 	struct ib_mr *ib_mr;
 	ib_comp_handler   	comp_handler;
-	void                  (*event_handler)(struct ib_event *, void *);
+	void					(*event_handler)(ib_event_rec_t *);
 	void *            	cq_context;
 	int               	cqe;
 	atomic_t          	usecnt; /* count number of work queues */
@@ -592,7 +592,7 @@
 	struct ib_pd	       *pd;
 	struct ib_ucontext	*ucontext;
 	struct ib_mr *ib_mr;
-	void		      (*event_handler)(struct ib_event *, void *);
+	void					(*event_handler)(ib_event_rec_t *);
 	void		       *srq_context;
 	atomic_t          	usecnt; /* count number of work queues */
 };
@@ -605,7 +605,7 @@
 	struct ib_srq	       *srq;
 	struct ib_ucontext	*ucontext;
 	struct ib_mr *ib_mr;
-	void                  (*event_handler)(struct ib_event *, void *);
+	void					(*event_handler)(ib_event_rec_t *);
 	void		       *qp_context;
 	u32			qp_num;
 	enum ib_qp_type_t		qp_type;
@@ -1083,7 +1083,7 @@
  */
 struct ib_cq *ibv_create_cq(struct ib_device *device,
 			   ib_comp_handler comp_handler,
-			   void (*event_handler)(struct ib_event *, void *),
+			   void (*event_handler)(ib_event_rec_t *),
 			   void *cq_context, int cqe, 
 			   struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf);
 
Index: hw/mthca/kernel/mt_verbs.c
===================================================================
--- hw/mthca/kernel/mt_verbs.c	(revision 1006)
+++ hw/mthca/kernel/mt_verbs.c	(working copy)
@@ -581,7 +581,7 @@
 
 struct ib_cq *ibv_create_cq(struct ib_device *device,
 			   ib_comp_handler comp_handler,
-			   void (*event_handler)(struct ib_event *, void *),
+			   void (*event_handler)(ib_event_rec_t *),
 			   void *cq_context, int cqe, 
 			   struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
 {
Index: hw/mthca/kernel/mthca_cq.c
===================================================================
--- hw/mthca/kernel/mthca_cq.c	(revision 1006)
+++ hw/mthca/kernel/mthca_cq.c	(working copy)
@@ -237,14 +237,14 @@
 			++cq->arm_sn;
 	}
 
-	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+	cq->ibcq.comp_handler(cq->cq_context);
 }
 
 void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
 		    enum ib_event_type event_type)
 {
 	struct mthca_cq *cq;
-	struct ib_event event;
+	ib_event_rec_t event;
 	SPIN_LOCK_PREP(lh);
 
 	spin_lock(&dev->cq_table.lock, &lh);
@@ -260,11 +260,10 @@
 		return;
 	}
 
-	event.device      = &dev->ib_dev;
-	event.event       = event_type;
-	event.element.cq  = &cq->ibcq;
-	if (cq->ibcq.event_handler)
-		cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
+	event.type = event_type;
+	event.context = cq->ibcq.cq_context;
+	event.vendor_specific = 0;
+	cq->ibcq.event_handler(&event);
 
 	if (atomic_dec_and_test(&cq->refcount))
 		wake_up(&cq->wait);
Index: hw/mthca/kernel/mthca_qp.c
===================================================================
--- hw/mthca/kernel/mthca_qp.c	(revision 1006)
+++ hw/mthca/kernel/mthca_qp.c	(working copy)
@@ -391,7 +391,7 @@
 		    enum ib_event_type event_type, u8 vendor_code)
 {
 	struct mthca_qp *qp;
-	struct ib_event event;
+	ib_event_rec_t event;
 	SPIN_LOCK_PREP(lh);
 
 	spin_lock(&dev->qp_table.lock, &lh);
@@ -405,14 +405,12 @@
 		return;
 	}
 
-	event.device      = &dev->ib_dev;
-	event.event       = event_type;
-	event.element.qp  = &qp->ibqp;
+	event.type = event_type;
+	event.context = qp->ibqp.qp_context;
 	event.vendor_specific = vendor_code;
 	HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_QP,("QP %06x Async event  event_type 0x%x vendor_code 0x%x\n",
 		qpn,event_type,vendor_code));
-	if (qp->ibqp.event_handler)
-		qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
+	qp->ibqp.event_handler(&event);
 
 	if (atomic_dec_and_test(&qp->refcount))
 		wake_up(&qp->wait);
Index: hw/mthca/kernel/mthca_srq.c
===================================================================
--- hw/mthca/kernel/mthca_srq.c	(revision 1006)
+++ hw/mthca/kernel/mthca_srq.c	(working copy)
@@ -432,7 +432,7 @@
 		     enum ib_event_type event_type, u8 vendor_code)
 {
 	struct mthca_srq *srq;
-	struct ib_event event;
+	ib_event_rec_t event;
 	SPIN_LOCK_PREP(lh);
 
 	spin_lock(&dev->srq_table.lock, &lh);
@@ -449,15 +449,13 @@
 	if (!srq->ibsrq.event_handler)
 		goto out;
 
-	event.device      = &dev->ib_dev;
-	event.event       = event_type;
-	event.element.srq = &srq->ibsrq;
+	event.type = event_type;
+	event.context = srq->ibsrq.srq_context;
 	event.vendor_specific = vendor_code;
 	HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_SRQ,
 		("SRQ %06x Async event  event_type 0x%x vendor_code 0x%x\n",
 		srqn,event_type,vendor_code));
-	if (srq->ibsrq.event_handler)
-		srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
+	srq->ibsrq.event_handler(&event);
 
 out:
 	if (atomic_dec_and_test(&srq->refcount))
Index: inc/iba/ib_ci.h
===================================================================
--- inc/iba/ib_ci.h	(revision 1006)
+++ inc/iba/ib_ci.h	(working copy)
@@ -161,11 +161,11 @@
 * RETURN VALUE
 *	None
 * NOTES
-*	The consumer only gets the cq_context and ca_context. It is the client
+*	The consumer only gets the cq_context. It is the client
 *	responsibility to store the cq_handle in the context after the creation
 *	time. So it can call ci_poll_cq() after the arrival of the notification.
 * SEE ALSO
-*	ci_open_ca, ci_create_cq
+*	ci_create_cq
 ******
 */
 
@@ -181,7 +181,7 @@
 */
 typedef void
 (*ci_async_event_cb_t)(
-	IN	const	ib_event_rec_t* const			p_event_record );
+	IN		ib_event_rec_t*		p_event_record );
 /*
 * PARAMETERS
 *	p_event_record
@@ -204,7 +204,6 @@
 typedef ib_api_status_t
 (*ci_open_ca) (
 	IN		const	ib_net64_t					ca_guid,
-	IN		const	ci_completion_cb_t			pfn_completion_cb,
 	IN		const	ci_async_event_cb_t			pfn_async_event_cb,
 	IN		const	void* const					ca_context,
 		OUT			ib_ca_handle_t				*ph_ca );
@@ -222,8 +221,6 @@
 *		guids in a system, e.g. GetCaGuids(), maintained by the IB
 *		Access Layer. User mode consumers also have the same mechanism
 *		to retrieve this information.
-*	pfn_completion_cb
-*		[in] Completion Handler, one per open instance.
 *	pfn_async_event_cb
 *		[in] Asynchronous event handler, one per open instance.
 *	ca_context
@@ -707,6 +704,7 @@
 (*ci_create_srq) (
 	IN		const	ib_pd_handle_t			h_pd,
 	IN		const	void						*srq_context,
+	IN		const	ci_async_event_cb_t			pfn_async_event_cb,
 	IN		const	ib_srq_attr_t * const		p_srq_attr,
 		OUT			ib_srq_handle_t			*ph_srq,
 	IN	OUT			ci_umv_buf_t				*p_umv_buf OPTIONAL );
@@ -720,6 +718,8 @@
 *		[in] Handle to Protection Domain
 *	srq_context
 *		[in] A user specified context passed in a asynchronous error callback.
+*	pfn_async_event_cb
+*		[in] Asynchronous event handler.
 *	p_srq_attr
 *		[in out] Initial attributes with which the srq must be created.
 *	ph_srq
@@ -890,6 +890,7 @@
 (*ci_create_qp) (
 	IN		const	ib_pd_handle_t				h_pd,
 	IN		const	void						*qp_context,
+	IN		const	ci_async_event_cb_t			pfn_async_event_cb,
 	IN		const	ib_qp_create_t				*p_create_attr,
 		OUT			ib_qp_attr_t				*p_qp_attr,
 		OUT			ib_qp_handle_t				*ph_qp,
@@ -905,6 +906,8 @@
 *		[in] Handle to Protection Domain
 *	qp_context
 *		[in] A user specified context passed in a asynchronous error callback.
+*	pfn_async_event_cb
+*		[in] Asynchronous event handler.
 *	p_create_attr
 *		[in] Initial attributes with which the qp must be created.
 *	p_qp_attr
@@ -953,6 +956,7 @@
 	IN		const	ib_pd_handle_t		h_pd,
 	IN		const	uint8_t				port_num,
 	IN		const	void				*qp_context,
+	IN		const	ci_async_event_cb_t			pfn_async_event_cb,
 	IN		const	ib_qp_create_t		*p_create_attr,
 		OUT			ib_qp_attr_t		*p_qp_attr,
 		OUT			ib_qp_handle_t		*ph_qp );
@@ -969,6 +973,8 @@
 *	qp_context
 *		[in] User specified context passed during the async error callback
 *		routine.
+*	pfn_async_event_cb
+*		[in] Asynchronous event handler.
 *	p_create_attr
 *		[in] Initial set of attributes with which the queue pair is to be
 *		created.
@@ -1217,6 +1223,8 @@
 (*ci_create_cq) (
 	IN		const	ib_ca_handle_t				h_ca,
 	IN		const	void						*cq_context,
+	IN		const	ci_async_event_cb_t			pfn_async_event_cb,
+	IN				ci_completion_cb_t			completion_cb,
 	IN	OUT			uint32_t* const				p_size,
 		OUT			ib_cq_handle_t				*ph_cq,
 	IN	OUT			ci_umv_buf_t				*p_umv_buf OPTIONAL );
@@ -1231,6 +1239,10 @@
 *		[in] A handle to the open HCA
 *	cq_context
 *		[in] The context that is passed during the completion callbacks.
+*	pfn_async_event_cb
+*		[in] Asynchronous event handler.
+*	completion_cb
+*		[in] Callback for completion events
 *	p_size
 *		[in out] Points to a variable containing the number of CQ entries
 *		requested by the consumer. On completion points to the size of the





More information about the ofw mailing list