[Openib-windows] MTHCA dependency on IBAL.sys
Fabian Tillier
ftillier at silverstorm.com
Mon Mar 27 12:00:28 PST 2006
Hi again Leonid,
I just checked in a change to the bus driver that exports the
ib_get_err_str function in the CI interface.
Here is a patch to MTHCA to use the exported function rather than
linking in IBAL. I am going to look into doing something similar for
the MT23108 driver, and will send you the patch for that too.
Note that this patch adds the MTHCA driver to the standard build (by
adding an entry in the hw\dirs file).
Let me know if you want anything changed and I will resubmit as needed.
Thanks,
- Fab
-------------- next part --------------
Index: hw/dirs
===================================================================
--- hw/dirs (revision 256)
+++ hw/dirs (working copy)
@@ -1,2 +1,3 @@
DIRS=\
- mt23108
+ mt23108 \
+ mthca
Index: hw/mthca/kernel/mt_verbs.c
===================================================================
--- hw/mthca/kernel/mt_verbs.c (revision 256)
+++ hw/mthca/kernel/mt_verbs.c (working copy)
@@ -56,38 +56,46 @@
int err;
ib_api_status_t status;
struct ib_ucontext *context_p = (struct ib_ucontext *)h_um_ca;
+#if DBG
+ struct ib_device *ib_dev = context_p->device;
+#endif
HCA_ENTER(HCA_DBG_SHIM);
context_p->is_removing = TRUE;
if (atomic_read(&context_p->usecnt)) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("resources are not released (cnt %d)\n", context_p->usecnt));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("resources are not released (cnt %d)\n", context_p->usecnt));
status = IB_RESOURCE_BUSY;
goto err_usage;
}
err = ibv_dealloc_pd( context_p->pd );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_dealloc_pd failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("ibv_dealloc_pd failed (%d)\n", err));
status = errno_to_iberr(err);
}
err = mthca_dealloc_ucontext(context_p);
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("mthca_dealloc_ucontext failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("mthca_dealloc_ucontext failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_dealloc_ucontext;
}
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM , ("pcs %p\n", PsGetCurrentProcess()) );
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM,
+ ("pcs %p\n", PsGetCurrentProcess()) );
status = IB_SUCCESS;
goto end;
err_dealloc_ucontext:
err_usage:
end:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return;
}
Index: hw/mthca/kernel/hca_memory.c
===================================================================
--- hw/mthca/kernel/hca_memory.c (revision 256)
+++ hw/mthca/kernel/hca_memory.c (working copy)
@@ -33,6 +33,7 @@
#include "hca_driver.h"
#include "hca_utils.h"
+#include "mthca_dev.h"
#if defined(EVENT_TRACING)
#ifdef offsetof
@@ -59,7 +60,10 @@
struct ib_mr *mr_p;
struct mthca_mr *mro_p;
struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;
-
+#if DBG
+ struct ib_device *ib_dev = ib_pd_p->device;
+#endif
+
HCA_ENTER(HCA_DBG_MEMORY);
// sanity checks
@@ -68,7 +72,8 @@
goto err_unsupported;
}
if (!p_mr_create || 0 == p_mr_create->length) {
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("invalid attributes"));
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY,
+ ("invalid attributes"));
status = IB_INVALID_PARAMETER;
goto err_invalid_parm;
}
@@ -78,7 +83,8 @@
*/
if (p_mr_create->access_ctrl & (IB_AC_RDMA_WRITE | IB_AC_ATOMIC) &&
!(p_mr_create->access_ctrl & IB_AC_LOCAL_WRITE)) {
- HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("invalid access rights"));
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY,
+ ("invalid access rights"));
status = IB_INVALID_PERMISSION;
goto err_invalid_access;
}
@@ -93,7 +99,8 @@
(int)p_mr_create->access_ctrl,
&iobuf );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("iobuf_register failed(%d)",err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY,
+ ("iobuf_register failed(%d)",err));
status = errno_to_iberr(err);
goto err_lock;
}
@@ -142,7 +149,8 @@
err_invalid_access:
err_invalid_parm:
err_unsupported:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
@@ -161,7 +169,10 @@
struct ib_mr *mr_p;
struct ib_phys_buf *buffer_list;
struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;
-
+#if DBG
+ struct ib_device *ib_dev = ib_pd_p->device;
+#endif
+
UNUSED_PARAM( um_call );
HCA_ENTER(HCA_DBG_SHIM);
@@ -183,8 +194,11 @@
//NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same
// register pmr
- if (p_pmr_create->length == (uint64_t)-1LL)
- mr_p = ibv_get_dma_mr(ib_pd_p, map_qp_ibal_acl(p_pmr_create->access_ctrl) );
+ if (p_pmr_create->length == (uint64_t)-1LL)
+ {
+ mr_p = ibv_get_dma_mr( ib_pd_p,
+ map_qp_ibal_acl(p_pmr_create->access_ctrl) );
+ }
else
mr_p = ibv_reg_phys_mr(ib_pd_p, buffer_list, p_pmr_create->num_ranges,
map_qp_ibal_acl(p_pmr_create->access_ctrl), p_vaddr );
@@ -206,7 +220,8 @@
err_reg_phys_mr:
err_invalid_parm:
err_unsupported:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
@@ -297,6 +312,9 @@
{
ib_api_status_t status;
int err;
+#if DBG
+ struct ib_device *ib_dev = ((struct ib_mr *)h_mr)->device;
+#endif
HCA_ENTER(HCA_DBG_SHIM);
@@ -328,7 +346,8 @@
err_dereg_mr:
err_unsupported:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
Index: hw/mthca/kernel/hca_verbs.c
===================================================================
--- hw/mthca/kernel/hca_verbs.c (revision 256)
+++ hw/mthca/kernel/hca_verbs.c (working copy)
@@ -70,20 +70,26 @@
#ifndef WIN_TO_BE_CHANGED
mlnx_hca_t *p_hca;
- ib_api_status_t status;
+ ib_api_status_t status = IB_NOT_FOUND;
mlnx_cache_t *p_cache;
+ struct ib_device *ib_dev;
HCA_ENTER(HCA_DBG_SHIM);
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("context 0x%p\n", ca_context));
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM,
+ ("context 0x%p\n", ca_context));
// find CA object
p_hca = mlnx_hca_from_guid( ca_guid );
if( !p_hca ) {
- status = IB_NOT_FOUND;
- goto err_hca_from_guid;
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status IB_NOT_FOUND\n"));
+ return IB_NOT_FOUND;
}
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("context 0x%p\n", ca_context));
+ ib_dev = &p_hca->mdev->ib_dev;
+
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM,
+ ("context 0x%p\n", ca_context));
status = mlnx_hobs_set_cb(&p_hca->hob,
pfn_completion_cb,
pfn_async_event_cb,
@@ -109,8 +115,8 @@
err_mad_cache:
err_set_cb:
-err_hca_from_guid:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -385,7 +391,7 @@
err_byte_count:
err_user_unsupported:
if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
HCA_EXIT(HCA_DBG_SHIM);
return status;
@@ -596,7 +602,7 @@
status = IB_SUCCESS;
err_modify_port:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -754,7 +760,8 @@
context_p = mthca_alloc_ucontext(ib_dev, p_umv_buf);
if (IS_ERR(context_p)) {
err = PTR_ERR(context_p);
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("mthca_alloc_ucontext failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("mthca_alloc_ucontext failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_alloc_ucontext;
}
@@ -767,7 +774,8 @@
context_p->pd = ibv_alloc_pd(ib_dev, context_p, &umv_buf);
if (IS_ERR(context_p->pd)) {
err = PTR_ERR(context_p->pd);
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("ibv_alloc_pd failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("ibv_alloc_pd failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_alloc_pd;
}
@@ -793,7 +801,8 @@
end:
if (p_umv_buf && p_umv_buf->command)
p_umv_buf->status = status;
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -978,7 +987,8 @@
ib_pd_p = ibv_alloc_pd(ib_dev, context_p, p_umv_buf);
if (IS_ERR(ib_pd_p)) {
err = PTR_ERR(ib_pd_p);
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("ibv_alloc_pd failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("ibv_alloc_pd failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_alloc_pd;
}
@@ -991,7 +1001,8 @@
err_alloc_pd:
if (p_umv_buf && p_umv_buf->command)
p_umv_buf->status = status;
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -1144,6 +1155,9 @@
ib_api_status_t status;
int err;
struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;
+#if DBG
+ struct ib_device *ib_dev = ib_pd_p->device;
+#endif
HCA_ENTER( HCA_DBG_QP);
@@ -1153,14 +1167,16 @@
// dealloc pd
err = ibv_dealloc_pd( ib_pd_p );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_dealloc_pd failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM
+ ,("ibv_dealloc_pd failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_dealloc_pd;
}
status = IB_SUCCESS;
err_dealloc_pd:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM
+ ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -1259,7 +1275,7 @@
int err = 0;
ib_api_status_t status = IB_SUCCESS;
struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;
- struct ib_device *ib_dev_p = ib_pd_p->device;
+ struct ib_device *ib_dev = ib_pd_p->device;
struct ib_ah *ib_av_p;
struct ib_ah_attr ah_attr;
struct ib_ucontext *context_p = NULL;
@@ -1281,12 +1297,13 @@
// fill parameters
RtlZeroMemory(&ah_attr, sizeof(ah_attr));
- mlnx_conv_ibal_av( ib_dev_p, p_addr_vector, &ah_attr );
+ mlnx_conv_ibal_av( ib_dev, p_addr_vector, &ah_attr );
ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, context_p, p_umv_buf);
if (IS_ERR(ib_pd_p)) {
err = PTR_ERR(ib_pd_p);
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_alloc_pd failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+ ("ibv_alloc_pd failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_alloc_av;
}
@@ -1305,7 +1322,8 @@
err_alloc_av:
err_inval_params:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -1394,12 +1412,16 @@
int err;
ib_api_status_t status = IB_SUCCESS;
struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;
+#if DBG
+ struct ib_device *ib_dev = ib_ah_p->device;
+#endif
HCA_ENTER(HCA_DBG_SHIM);
// sanity checks
if( p_umv_buf && p_umv_buf->command ) {
- HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,("User mode is not supported yet\n"));
+ HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,
+ ("User mode is not supported yet\n"));
status = IB_UNSUPPORTED;
goto err_user_unsupported;
}
@@ -1409,7 +1431,8 @@
//TODO: not implemented in low-level driver
err = ibv_query_ah(ib_ah_p, &ah_attr)
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ibv_query_ah failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,
+ ("ibv_query_ah failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_query_ah;
}
@@ -1419,7 +1442,8 @@
err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector );
if (err) {
- HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,("mlnx_conv_mthca_av failed (%d)\n", err));
+ HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,
+ ("mlnx_conv_mthca_av failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_conv_mthca_av;
}
@@ -1430,7 +1454,8 @@
err_conv_mthca_av:
err_user_unsupported:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -1498,26 +1523,28 @@
struct ib_ah_attr ah_attr;
ib_api_status_t status = IB_SUCCESS;
struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;
- struct ib_device *ib_dev_p = ib_ah_p->pd->device;
+ struct ib_device *ib_dev = ib_ah_p->pd->device;
HCA_ENTER(HCA_DBG_SHIM);
// sanity checks
if( p_umv_buf && p_umv_buf->command ) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("User mode is not supported yet\n"));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("User mode is not supported yet\n"));
status = IB_UNSUPPORTED;
goto err_user_unsupported;
}
// fill parameters
- mlnx_conv_ibal_av( ib_dev_p, p_addr_vector, &ah_attr );
+ mlnx_conv_ibal_av( ib_dev, p_addr_vector, &ah_attr );
// modify AH
#if 0
//TODO: not implemented in low-level driver
err = ibv_modify_ah(ib_ah_p, &ah_attr)
if (err) {
- HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,("ibv_query_ah failed (%d)\n", err));
+ HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM,
+ ("ibv_query_ah failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_query_ah;
}
@@ -1527,7 +1554,8 @@
#endif
err_user_unsupported:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -1585,19 +1613,24 @@
int err;
ib_api_status_t status = IB_SUCCESS;
struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;
+#if DBG
+ struct ib_device *ib_dev = ib_ah_p->device;
+#endif
HCA_ENTER(HCA_DBG_SHIM);
// destroy AV
err = ibv_destroy_ah( ib_ah_p );
if (err) {
- HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,("ibv_destroy_ah failed (%d)\n", err));
+ HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("ibv_destroy_ah failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_destroy_ah;
}
err_destroy_ah:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -1710,7 +1743,8 @@
ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, context_p, p_umv_buf );
if (IS_ERR(ib_qp_p)) {
err = PTR_ERR(ib_qp_p);
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("ibv_create_qp failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP,
+ ("ibv_create_qp failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_create_qp;
}
@@ -1740,7 +1774,8 @@
end:
if (p_umv_buf && p_umv_buf->command)
p_umv_buf->status = status;
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
@@ -1756,13 +1791,17 @@
#ifndef WIN_TO_BE_CHANGED
ib_api_status_t status;
+#if DBG
+ struct ib_device* ib_dev = ((struct ib_pd*)h_pd)->device;
+#endif
HCA_ENTER(HCA_DBG_SHIM);
status = _create_qp( h_pd, port_num,
qp_context, p_create_attr, p_qp_attr, ph_qp, NULL );
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -2008,6 +2047,9 @@
#ifndef WIN_TO_BE_CHANGED
ib_api_status_t status;
+#if DBG
+ struct ib_device* ib_dev = ((struct ib_pd*)h_pd)->device;
+#endif
//NB: algorithm of mthca_alloc_sqp() requires port_num
// PRM states, that special pares are created in couples, so
// looks like we can put here port_num = 1 always
@@ -2018,7 +2060,8 @@
status = _create_qp( h_pd, port_num,
qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -2256,6 +2299,9 @@
struct ib_qp_attr qp_attr;
int qp_attr_mask;
struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;
+#if DBG
+ struct ib_device* ib_dev = ib_qp_p->device;
+#endif
HCA_ENTER(HCA_DBG_QP);
@@ -2315,7 +2361,8 @@
err_inval_params:
if (p_umv_buf && p_umv_buf->command)
p_umv_buf->status = status;
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -2437,6 +2484,9 @@
ib_api_status_t status = IB_SUCCESS;
struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;
struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;
+#if DBG
+ struct ib_device *ib_dev = ib_qp_p->device;
+#endif
UNREFERENCED_PARAMETER(p_umv_buf);
@@ -2480,7 +2530,8 @@
status = IB_SUCCESS;
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
@@ -2558,7 +2609,10 @@
ib_api_status_t status;
int err;
struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;
-
+#if DBG
+ struct ib_device* ib_dev = ib_qp_p->device;
+#endif
+
UNUSED_PARAM( timewait );
HCA_ENTER( HCA_DBG_QP);
@@ -2568,7 +2622,8 @@
err = ibv_destroy_qp( ib_qp_p );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_destroy_qp failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+ ("ibv_destroy_qp failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_destroy_qp;
}
@@ -2576,7 +2631,8 @@
status = IB_SUCCESS;
err_destroy_qp:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -2593,7 +2649,8 @@
UNUSED_PARAM( timewait );
HCA_ENTER( HCA_DBG_QP);
- HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,("hca %d qp 0x%x\n", hca_idx, qp_num));
+ HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP,
+ ("hca %d qp 0x%x\n", hca_idx, qp_num));
VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);
hobul_p = mlnx_hobul_array[hca_idx];
@@ -2603,14 +2660,16 @@
}
qp_idx = qp_num & hobul_p->qp_idx_mask;
- HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,("hobul_p 0x%p mask 0x%x qp_idx 0x%x mark %d\n",
+ HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,
+ ("hobul_p 0x%p mask 0x%x qp_idx 0x%x mark %d\n",
hobul_p, hobul_p->qp_idx_mask, qp_idx, hobul_p->qp_info_tbl[qp_idx].mark));
VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);
if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {
if (E_MARK_INVALID == hobul_p->qp_info_tbl[qp_idx].mark) {
HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,
- ("completes with ERROR status %s\n", ib_get_err_str(IB_INVALID_QP_HANDLE)));
+ ("completes with ERROR status %s\n"
+ IB_GET_ERR_STR(IB_INVALID_QP_HANDLE)));
return IB_SUCCESS; // Already freed
}
status = IB_INVALID_QP_HANDLE;
@@ -2711,6 +2770,10 @@
if( p_umv_buf && p_umv_buf->command ) {
+ context_p = (struct ib_ucontext *)h_ca;
+ hob_p = HOB_FROM_IBDEV(context_p->device);
+ ib_dev = context_p->device;
+
// sanity checks
if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||
p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||
@@ -2718,10 +2781,6 @@
status = IB_INVALID_PARAMETER;
goto err_inval_params;
}
-
- context_p = (struct ib_ucontext *)h_ca;
- hob_p = HOB_FROM_IBDEV(context_p->device);
- ib_dev = context_p->device;
}
else {
hob_p = (mlnx_hob_t *)h_ca;
@@ -2756,7 +2815,8 @@
err_inval_params:
if (p_umv_buf && p_umv_buf->command)
p_umv_buf->status = status;
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
@@ -3083,6 +3143,9 @@
ib_api_status_t status;
int err;
struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;
+#if DBG
+ struct ib_device* ib_dev = ib_cq_p->device;
+#endif
HCA_ENTER( HCA_DBG_QP);
@@ -3092,7 +3155,8 @@
// destroy CQ
err = ibv_destroy_cq( ib_cq_p );
if (err) {
- HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,("ibv_destroy_cq failed (%d)\n", err));
+ HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("ibv_destroy_cq failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_destroy_cq;
}
@@ -3100,7 +3164,8 @@
status = IB_SUCCESS;
err_destroy_cq:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
Index: hw/mthca/kernel/hca_mcast.c
===================================================================
--- hw/mthca/kernel/hca_mcast.c (revision 256)
+++ hw/mthca/kernel/hca_mcast.c (working copy)
@@ -58,14 +58,17 @@
int err;
ib_api_status_t status;
struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;
+#if DBG
+ struct ib_device *ib_dev = ib_qp_p->device;
+#endif
mlnx_mcast_t *mcast_p;
HCA_ENTER(HCA_DBG_SHIM);
// sanity checks
if( p_umv_buf && p_umv_buf->command ) {
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("User mode is not supported yet\n"));
- HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("User mode is not supported yet\n"));
status = IB_UNSUPPORTED;
goto err_user_unsupported;
}
@@ -88,7 +91,8 @@
else {
err = ibv_attach_mcast(ib_qp_p, (union ib_gid *)p_mcast_gid, (u16)mcast_lid);
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_attach_mcast failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,
+ ("ibv_attach_mcast failed (%d)\n", err));
status = errno_to_iberr(err);
goto err_attach;
}
@@ -115,7 +119,8 @@
err_invalid_param:
err_user_unsupported:
end:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
@@ -123,15 +128,21 @@
mlnx_detach_mcast (
IN const ib_mcast_handle_t h_mcast)
{
- ib_api_status_t status;
+ ib_api_status_t status = IB_INVALID_PARAMETER;
int err;
mlnx_mcast_t *mcast_p = (mlnx_mcast_t*)h_mcast;
+ struct ib_device *ib_dev;
// sanity check
- if (!mcast_p || !mcast_p->ib_qp_p) {
- status = IB_INVALID_PARAMETER;
- goto err_invalid_param;
+ if (!mcast_p || !mcast_p->ib_qp_p)
+ {
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status IB_INVALID_PARAMETER\n"));
+ return IB_INVALID_PARAMETER;
}
+
+ ib_dev = mcast_p->ib_qp_p->device;
+
HCA_PRINT(TRACE_LEVEL_WARNING, HCA_DBG_SHIM,("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n",
mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,
*(uint64_t*)&mcast_p->mcast_gid.raw[0],
@@ -155,8 +166,8 @@
err_detach_mcast:
kfree(mcast_p);
-err_invalid_param:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
Index: hw/mthca/kernel/hca_smp.c
===================================================================
--- hw/mthca/kernel/hca_smp.c (revision 256)
+++ hw/mthca/kernel/hca_smp.c (working copy)
@@ -530,7 +530,7 @@
int err;
ib_api_status_t status = IB_SUCCESS;
mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca;
- struct ib_device *ib_dev_p = IBDEV_FROM_HOB( hob_p );
+ struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );
//TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ?
int mad_flags = 0;
struct _ib_wc *wc_p;
@@ -573,7 +573,7 @@
// process mad
if( !mlnx_cachable_mad( h_ca, port_num, p_mad_in, p_mad_out ) )
{
- err = mthca_process_mad(ib_dev_p, mad_flags, (uint8_t)port_num,
+ err = mthca_process_mad(ib_dev, mad_flags, (uint8_t)port_num,
wc_p, grh_p, (struct ib_mad*)p_mad_in, (struct ib_mad*)p_mad_out);
if (!err) {
HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD,
@@ -591,7 +591,8 @@
err_process_mad:
err_port_num:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MAD ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MAD,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
#else
Index: hw/mthca/kernel/hca_driver.h
===================================================================
--- hw/mthca/kernel/hca_driver.h (revision 256)
+++ hw/mthca/kernel/hca_driver.h (working copy)
@@ -160,6 +160,7 @@
#define HOB_FROM_IBDEV(dev_p) (mlnx_hob_t *)&dev_p->mdev->ext->hca.hob
+#define IB_GET_ERR_STR ib_dev->mdev->ext->ci_ifc.get_err_str
/***********************************
Index: hw/mthca/kernel/hca_direct.c
===================================================================
--- hw/mthca/kernel/hca_direct.c (revision 256)
+++ hw/mthca/kernel/hca_direct.c (working copy)
@@ -61,16 +61,17 @@
int err;
ib_api_status_t status;
struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;
- struct ib_device *ib_dev_p = ib_qp_p->device;
-
+ struct ib_device *ib_dev = ib_qp_p->device;
+
HCA_ENTER(HCA_DBG_QP);
// sanity checks
// create CQ
- err = ib_dev_p->post_send(ib_qp_p, p_send_wr, pp_failed );
+ err = ib_dev->post_send(ib_qp_p, p_send_wr, pp_failed );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("post_send failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP,
+ ("post_send failed (%d)\n", err));
if (err == -ENOMEM)
status = IB_INSUFFICIENT_RESOURCES;
else
@@ -81,7 +82,8 @@
status = IB_SUCCESS;
err_post_send:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
@@ -96,16 +98,17 @@
int err;
ib_api_status_t status;
struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;
- struct ib_device *ib_dev_p = ib_qp_p->device;
+ struct ib_device *ib_dev = ib_qp_p->device;
HCA_ENTER(HCA_DBG_QP);
// sanity checks
// create CQ
- err = ib_dev_p->post_recv(ib_qp_p, p_recv_wr, pp_failed );
+ err = ib_dev->post_recv(ib_qp_p, p_recv_wr, pp_failed );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("post_recv failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,
+ ("post_recv failed (%d)\n", err));
if (err == -ENOMEM)
status = IB_INSUFFICIENT_RESOURCES;
else
@@ -116,7 +119,8 @@
status = IB_SUCCESS;
err_post_recv:
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
@@ -145,7 +149,10 @@
int err;
ib_api_status_t status = IB_SUCCESS;
struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;
-
+#if DBG
+ struct ib_device *ib_dev = ib_cq_p->device;
+#endif
+
HCA_ENTER(HCA_DBG_CQ);
// sanity checks
@@ -157,14 +164,16 @@
// poll CQ
err = mthca_poll_cq_list(ib_cq_p, pp_free_wclist, pp_done_wclist );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("mthca_poll_cq_list failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,
+ ("mthca_poll_cq_list failed (%d)\n", err));
status = errno_to_iberr(err);
}else if (!*pp_done_wclist)
status = IB_NOT_FOUND;
err_invalid_params:
if (status != IB_NOT_FOUND){
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_CQ ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_CQ,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
}else
HCA_EXIT(HCA_DBG_CQ);
return status;
@@ -179,17 +188,22 @@
int err;
ib_api_status_t status = IB_SUCCESS;
struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;
-
+#if DBG
+ struct ib_device *ib_dev = ib_cq_p->device;
+#endif
+
HCA_ENTER(HCA_DBG_SHIM);
// REARM CQ
err = ib_req_notify_cq(ib_cq_p, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_req_notify_cq failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("ib_req_notify_cq failed (%d)\n", err));
status = errno_to_iberr(err);
}
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
@@ -201,16 +215,21 @@
int err;
ib_api_status_t status = IB_SUCCESS;
struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;
+#if DBG
+ struct ib_device *ib_dev = ib_cq_p->device;
+#endif
HCA_ENTER(HCA_DBG_SHIM);
err = ib_req_ncomp_notif(ib_cq_p, n_cqes );
if (err) {
- HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_req_ncomp_notif failed (%d)\n", err));
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,
+ ("ib_req_ncomp_notif failed (%d)\n", err));
status = errno_to_iberr(err);
}
- HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM,
+ ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
return status;
}
Index: hw/mthca/kernel/SOURCES
===================================================================
--- hw/mthca/kernel/SOURCES (revision 256)
+++ hw/mthca/kernel/SOURCES (working copy)
@@ -63,7 +63,6 @@
TARGETLIBS= \
$(TARGETPATH)\*\complib.lib \
- $(TARGETPATH)\*\ibal.lib \
$(DDK_LIB_PATH)\wdmguid.lib
More information about the ofw
mailing list