[ofw] [RFC] [PATCH] verbs/cq: set competion handlers per CQ
Sean Hefty
sean.hefty at intel.com
Mon Apr 28 12:53:28 PDT 2008
The underlying HCA drivers support completion handlers per CQ. Expose
this capability through the kernel verb channel interface. This allows the
HCA driver to callback the user directly, rather than going through
indirect calls.
This is also needed (but is not sufficient) for multiple filter drivers
to access the HCA verb interface directly.
The patch has a nice side effect of reducing the code base.
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
Code compiles, but is untested. A similar change will need to be made
for connectx. IBAL changes were kept to a minimal, but a future patch
could allow it to pass kernel ULP callbacks directly to the HCA, rather
than using a indirect callback handler.
Index: core/al/al_verbs.h
===================================================================
--- core/al/al_verbs.h (revision 1006)
+++ core/al/al_verbs.h (working copy)
@@ -72,6 +72,8 @@
h_ca->obj.p_ci_ca->verbs.modify_ca( h_ca->obj.p_ci_ca->h_ci_ca,\
port_num, ca_mod, p_port_attr_mod )
+void ci_ca_comp_cb(void *cq_context);
+
static inline ib_api_status_t
verbs_create_cq(
IN const ib_ca_handle_t h_ca,
@@ -81,7 +83,7 @@
{
return h_ca->obj.p_ci_ca->verbs.create_cq(
(p_umv_buf) ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca,
- h_cq, &p_cq_create->size, &h_cq->h_ci_cq, p_umv_buf );
+ h_cq, ci_ca_comp_cb, &p_cq_create->size, &h_cq->h_ci_cq, p_umv_buf );
}
#define verbs_check_cq(h_cq) ((h_cq)->h_ci_cq)
Index: core/al/kernel/al_ci_ca.c
===================================================================
--- core/al/kernel/al_ci_ca.c (revision 1006)
+++ core/al/kernel/al_ci_ca.c (working copy)
@@ -154,7 +154,7 @@
p_ci_ca->dereg_async_item.pfn_callback = ci_ca_async_proc_cb;
/* Open the CI CA. */
- status = p_ci_ca->verbs.open_ca( p_ci_ca->verbs.guid, ci_ca_comp_cb,
+ status = p_ci_ca->verbs.open_ca( p_ci_ca->verbs.guid,
ci_ca_async_event_cb, p_ci_ca, &p_ci_ca->h_ci_ca );
if( status != IB_SUCCESS )
{
Index: hw/mthca/kernel/hca_data.c
===================================================================
--- hw/mthca/kernel/hca_data.c (revision 1006)
+++ hw/mthca/kernel/hca_data.c (working copy)
@@ -137,7 +137,6 @@
ib_api_status_t
mlnx_hobs_set_cb(
IN mlnx_hob_t *hob_p,
- IN ci_completion_cb_t comp_cb_p,
IN ci_async_event_cb_t async_cb_p,
IN const void* const ib_context)
{
@@ -162,7 +161,6 @@
}
}
- hob_p->comp_cb_p = comp_cb_p;
hob_p->async_cb_p = async_cb_p;
hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL
HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array),
ib_context));
@@ -185,7 +183,6 @@
p_async_proc = hob_p->async_proc_mgr_p;
hob_p->async_proc_mgr_p = NULL;
- hob_p->comp_cb_p = NULL;
hob_p->async_cb_p = NULL;
hob_p->ca_context = NULL;
hob_p->cl_device_h = NULL;
@@ -330,21 +327,6 @@
}
}
-void cq_comp_handler(struct ib_cq *cq, void *context)
-{
- mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
- struct mthca_cq *mcq =(struct mthca_cq *)cq;
- HCA_ENTER(HCA_DBG_CQ);
- if (hob_p && hob_p->comp_cb_p) {
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));
- (hob_p->comp_cb_p)(mcq->cq_context);
- }
- else {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));
- }
- HCA_EXIT(HCA_DBG_CQ);
-}
-
void ca_event_handler(struct ib_event *ev, void *context)
{
mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
@@ -841,7 +823,7 @@
&ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label );
err = ib_find_cached_gid((struct ib_device *)ib_dev_p,
(union ib_gid *)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index);
- if (err) {
+ if (err) {
HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default:
sgid_index = 0\n", err, err));
gid_index = 0;
Index: hw/mthca/kernel/hca_data.h
===================================================================
--- hw/mthca/kernel/hca_data.h (revision 1006)
+++ hw/mthca/kernel/hca_data.h (working copy)
@@ -171,7 +171,6 @@
typedef struct _ib_ca {
ENUM_MARK mark;
- ci_completion_cb_t comp_cb_p;
ci_async_event_cb_t async_cb_p;
const void *ca_context;
void *cl_device_h;
@@ -275,7 +274,6 @@
ib_api_status_t
mlnx_hobs_set_cb(
IN mlnx_hob_t *hob_p,
- IN ci_completion_cb_t comp_cb_p,
IN ci_async_event_cb_t async_cb_p,
IN const void* const ib_context);
@@ -351,8 +349,6 @@
void unmap_crspace_for_all( struct ib_ucontext *p_context );
-void cq_comp_handler(struct ib_cq *cq, void *context);
-
void ca_event_handler(struct ib_event *ev, void *context);
void srq_event_handler(struct ib_event *ev, void *context);
Index: hw/mthca/kernel/hca_verbs.c
===================================================================
--- hw/mthca/kernel/hca_verbs.c (revision 1006)
+++ hw/mthca/kernel/hca_verbs.c (working copy)
@@ -59,7 +59,6 @@
ib_api_status_t
mlnx_open_ca (
IN const ib_net64_t ca_guid, // IN const char *
ca_name,
- IN const ci_completion_cb_t pfn_completion_cb,
IN const ci_async_event_cb_t pfn_async_event_cb,
IN const void*const ca_context,
OUT ib_ca_handle_t *ph_ca)
@@ -92,7 +91,6 @@
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM,
("context 0x%p\n", ca_context));
status = mlnx_hobs_set_cb(&p_hca->hob,
- pfn_completion_cb,
pfn_async_event_cb,
ca_context);
if (IB_SUCCESS != status) {
@@ -1363,6 +1361,7 @@
mlnx_create_cq (
IN const ib_ca_handle_t h_ca,
IN const void *cq_context,
+ IN ci_completion_cb_t cq_comp_handler,
IN OUT uint32_t *p_size,
OUT ib_cq_handle_t *ph_cq,
IN OUT ci_umv_buf_t *p_umv_buf )
Index: hw/mthca/kernel/ib_verbs.h
===================================================================
--- hw/mthca/kernel/ib_verbs.h (revision 1006)
+++ hw/mthca/kernel/ib_verbs.h (working copy)
@@ -574,7 +574,7 @@
struct ib_ucontext *ucontext;
};
-typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
+typedef void (*ib_comp_handler)(void *cq_context);
struct ib_cq {
struct ib_device *device;
Index: hw/mthca/kernel/mthca_cq.c
===================================================================
--- hw/mthca/kernel/mthca_cq.c (revision 1006)
+++ hw/mthca/kernel/mthca_cq.c (working copy)
@@ -237,7 +237,7 @@
++cq->arm_sn;
}
- cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ cq->ibcq.comp_handler(cq->cq_context);
}
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
Index: inc/iba/ib_ci.h
===================================================================
--- inc/iba/ib_ci.h (revision 1006)
+++ inc/iba/ib_ci.h (working copy)
@@ -161,11 +161,11 @@
* RETURN VALUE
* None
* NOTES
-* The consumer only gets the cq_context and ca_context. It is the client
+* The consumer only gets the cq_context. It is the client
* responsibility to store the cq_handle in the context after the creation
* time. So it can call ci_poll_cq() after the arrival of the notification.
* SEE ALSO
-* ci_open_ca, ci_create_cq
+* ci_create_cq
******
*/
@@ -204,7 +204,6 @@
typedef ib_api_status_t
(*ci_open_ca) (
IN const ib_net64_t ca_guid,
- IN const ci_completion_cb_t pfn_completion_cb,
IN const ci_async_event_cb_t pfn_async_event_cb,
IN const void* const ca_context,
OUT ib_ca_handle_t *ph_ca );
@@ -222,8 +221,6 @@
* guids in a system, e.g. GetCaGuids(), maintained by the IB
* Access Layer. User mode consumers also have the same mechanism
* to retrieve this information.
-* pfn_completion_cb
-* [in] Completion Handler, one per open instance.
* pfn_async_event_cb
* [in] Asynchronous event handler, one per open instance.
* ca_context
@@ -1217,6 +1214,7 @@
(*ci_create_cq) (
IN const ib_ca_handle_t h_ca,
IN const void *cq_context,
+ IN ci_completion_cb_t completion_cb,
IN OUT uint32_t* const p_size,
OUT ib_cq_handle_t *ph_cq,
IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL );
@@ -1231,6 +1229,8 @@
* [in] A handle to the open HCA
* cq_context
* [in] The context that is passed during the completion callbacks.
+* completion_cb
+* [in] Callback for completion events
* p_size
* [in out] Points to a variable containing the number of CQ entries
* requested by the consumer. On completion points to the size of the
More information about the ofw
mailing list