[openib-general] [PATCH][kdapl] clean up async evd flow and init hca flow
Itamar
itamar at mellanox.co.il
Tue May 10 12:44:24 PDT 2005
clean up async evd flow.
clean up init hca flow.
Signed-off-by: Itamar Rabenstein <itamar at mellanox.co.il>
Index: dapl_module.c
===================================================================
--- dapl_module.c (revision 2296)
+++ dapl_module.c (working copy)
@@ -147,6 +147,28 @@
goto bail;
}
+ dat_status = dapl_ib_open_hca(hca_ptr->name, hca_ptr);
+
+ if (dat_status != DAT_SUCCESS) {
+ dapl_dbg_log(DAPL_DBG_TYPE_ERR,
+ "dapl_ib_open_hca failed %x\n",
+ dat_status);
+ goto bail;
+ }
+ /*
+ * Obtain IA attributes from the HCA to limit certain
+ * operations.
+ */
+ dat_status = dapl_ib_query_hca(hca_ptr,
+ &hca_ptr->ia_attr,
+ NULL, &hca_ptr->hca_address);
+ if (dat_status != DAT_SUCCESS) {
+ dapl_dbg_log(DAPL_DBG_TYPE_ERR,
+ "dapl_ib_query_hca failed %x\n",
+ dat_status);
+ goto bail;
+ }
+
provider->extension = hca_ptr;
/* register providers with dat_registry */
Index: dapl_ia_util.c
===================================================================
--- dapl_ia_util.c (revision 2296)
+++ dapl_ia_util.c (working copy)
@@ -468,13 +468,6 @@
{
spin_lock(&hca_ptr->lock);
atomic_dec(&hca_ptr->handle_ref_count);
- if (atomic_read(&hca_ptr->handle_ref_count) == 0) {
- dapl_ib_close_hca(hca_ptr);
-#if 0
- hca_ptr->ib_hca_handle = IB_INVALID_HANDLE;
-#endif
- hca_ptr->async_evd = NULL;
- }
spin_unlock(&hca_ptr->lock);
}
Index: dapl_ia_open.c
===================================================================
--- dapl_ia_open.c (revision 2296)
+++ dapl_ia_open.c (working copy)
@@ -44,12 +44,6 @@
#include "dapl_adapter_util.h"
/*
- * LOCAL PROTOTYPES
- */
-
-void dapl_hca_cleanup(DAPL_HCA * hca_ptr, DAT_BOOLEAN dec_ref);
-
-/*
* Open a provider and return a handle. The handle enables the user
* to invoke operations on this provider.
*
@@ -59,9 +53,9 @@
*/
DAT_RETURN
dapl_ia_open(const DAT_NAME_PTR name,
- DAT_COUNT async_evd_qlen,
- DAT_EVD_HANDLE * async_evd_handle_ptr,
- DAT_IA_HANDLE * ia_handle_ptr)
+ DAT_COUNT async_evd_qlen,
+ DAT_EVD_HANDLE * async_evd_handle_ptr,
+ DAT_IA_HANDLE * ia_handle_ptr)
{
DAT_RETURN dat_status;
struct dat_provider *provider;
@@ -74,8 +68,8 @@
ia_ptr = NULL;
dapl_dbg_log(DAPL_DBG_TYPE_API,
- "dapl_ia_open (%s, %d, %p, %p)\n",
- name, async_evd_qlen, async_evd_handle_ptr, ia_handle_ptr);
+ "dapl_ia_open (%s, %d, %p, %p)\n",
+ name, async_evd_qlen, async_evd_handle_ptr, ia_handle_ptr);
dat_status = dapl_provider_list_search(name, &provider);
if (DAT_SUCCESS != dat_status) {
@@ -99,37 +93,9 @@
/* get the hca_ptr */
hca_ptr = (DAPL_HCA *) provider->extension;
- /*
- * Open the HCA if it has not been done before.
- */
- spin_lock(&hca_ptr->lock);
- /* register with the HW */
- dat_status = dapl_ib_open_hca(hca_ptr->name, hca_ptr);
- if (dat_status != DAT_SUCCESS) {
- dapl_dbg_log(DAPL_DBG_TYPE_ERR,
- "dapl_ib_open_hca failed %x\n",
- dat_status);
- spin_unlock(&hca_ptr->lock);
- goto bail;
- }
-
- /*
- * Obtain IA attributes from the HCA to limit certain
- * operations.
- * If using DAPL_ATS naming, ib_query_hca will also set the ip
- * address.
- */
- dat_status = dapl_ib_query_hca(hca_ptr,
- &hca_ptr->ia_attr,
- NULL, &hca_ptr->hca_address);
- if (dat_status != DAT_SUCCESS) {
- dapl_hca_cleanup(hca_ptr, DAT_FALSE);
- spin_unlock(&hca_ptr->lock);
- goto bail;
- }
-
/* Take a reference on the hca_handle */
+ spin_lock(&hca_ptr->lock);
atomic_inc(&hca_ptr->handle_ref_count);
spin_unlock(&hca_ptr->lock);
@@ -137,10 +103,10 @@
ia_ptr = dapl_ia_alloc(provider, hca_ptr);
if (!ia_ptr) {
spin_lock(&hca_ptr->lock);
- dapl_hca_cleanup(hca_ptr, DAT_TRUE);
+ atomic_dec(&hca_ptr->handle_ref_count);
spin_unlock(&hca_ptr->lock);
dat_status =
- DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);
+ DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);
goto bail;
}
@@ -151,74 +117,52 @@
evd_ptr = (DAPL_EVD *) * async_evd_handle_ptr;
if (evd_ptr) {
if (DAPL_BAD_HANDLE(evd_ptr, DAPL_MAGIC_EVD) ||
- !(evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG)) {
+ !(evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG)) {
dat_status =
- DAT_ERROR(DAT_INVALID_HANDLE,
- DAT_INVALID_HANDLE_EVD_ASYNC);
+ DAT_ERROR(DAT_INVALID_HANDLE,
+ DAT_INVALID_HANDLE_EVD_ASYNC);
goto bail;
}
-
- /* InfiniBand allows only 1 asychronous event handler per HCA */
- /* (see InfiniBand Spec, release 1.1, vol I, section 11.5.2, */
- /* page 559). */
- /* */
- /* We only need to make sure that this EVD's CQ belongs to */
- /* the same HCA as is being opened. */
-
if (evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle !=
- hca_ptr->ib_hca_handle) {
+ hca_ptr->ib_hca_handle) {
dat_status =
- DAT_ERROR(DAT_INVALID_HANDLE,
- DAT_INVALID_HANDLE_EVD_ASYNC);
+ DAT_ERROR(DAT_INVALID_HANDLE,
+ DAT_INVALID_HANDLE_EVD_ASYNC);
goto bail;
}
-
ia_ptr->cleanup_async_error_evd = DAT_FALSE;
ia_ptr->async_error_evd = evd_ptr;
} else {
/* Verify we have >0 length, and let the provider check the size */
if (async_evd_qlen <= 0) {
dat_status =
- DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2);
+ DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2);
goto bail;
}
dat_status = dapl_evd_internal_create(ia_ptr,
- NULL, /* CNO ptr */
- async_evd_qlen,
- DAT_EVD_ASYNC_FLAG,
- &evd_ptr);
+ NULL, /* CNO ptr */
+ async_evd_qlen,
+ DAT_EVD_ASYNC_FLAG,
+ &evd_ptr);
if (dat_status != DAT_SUCCESS) {
goto bail;
}
-
atomic_inc(&evd_ptr->evd_ref_count);
-
- spin_lock(&hca_ptr->lock);
- if (hca_ptr->async_evd != (DAPL_EVD *) 0) {
- spin_unlock(&hca_ptr->lock);
- } else {
- hca_ptr->async_evd = evd_ptr;
- spin_unlock(&hca_ptr->lock);
-
- /* Register the handlers associated with the async EVD. */
- dat_status = dapl_ia_setup_callbacks(ia_ptr, evd_ptr);
- if (dat_status != DAT_SUCCESS) {
- /* Assign the EVD so it gets cleaned up */
- ia_ptr->cleanup_async_error_evd = DAT_TRUE;
- ia_ptr->async_error_evd = evd_ptr;
- goto bail;
- }
- }
-
+ /* Register the handlers associated with the async EVD. */
+ dat_status = dapl_ia_setup_callbacks(ia_ptr, evd_ptr);
+ /* Assign the EVD so it gets cleaned up */
ia_ptr->cleanup_async_error_evd = DAT_TRUE;
ia_ptr->async_error_evd = evd_ptr;
+ if (dat_status != DAT_SUCCESS) {
+ goto bail;
+ }
}
dat_status = DAT_SUCCESS;
*ia_handle_ptr = ia_ptr;
*async_evd_handle_ptr = evd_ptr;
- bail:
+ bail:
if (dat_status != DAT_SUCCESS) {
if (ia_ptr) {
/* This will release the async EVD if needed. */
@@ -227,19 +171,8 @@
}
dapl_dbg_log(DAPL_DBG_TYPE_RTN,
- "dapl_ia_open () returns 0x%x\n", dat_status);
+ "dapl_ia_open () returns 0x%x\n", dat_status);
return dat_status;
}
-/*
- * Clean up partially allocated HCA stuff. Strictly to make cleanup
- * simple.
- */
-void dapl_hca_cleanup(DAPL_HCA * hca_ptr, DAT_BOOLEAN dec_ref)
-{
- dapl_ib_close_hca(hca_ptr);
- if (dec_ref == DAT_TRUE) {
- atomic_dec(&hca_ptr->handle_ref_count);
- }
-}
Index: dapl.h
===================================================================
--- dapl.h (revision 2296)
+++ dapl.h (working copy)
@@ -211,8 +211,6 @@
spinlock_t lock;
DAPL_LLIST_HEAD ia_list_head; /* list of all open IAs */
atomic_t handle_ref_count; /* count of ia_opens on handle */
- DAPL_EVD *async_evd;
- DAPL_EVD *async_error_evd;
struct sockaddr_in6 hca_address; /* local address of HCA */
char *name; /* provider name */
ib_hca_handle_t ib_hca_handle;
--
Itamar
More information about the general
mailing list