[openib-general] [PATCH][KDAPL] clean up code : evd flow
Itamar
itamar at mellanox.co.il
Thu May 5 07:37:53 PDT 2005
Clean up kdapl : evd flow
Signed-off-by: Itamar Rabenstein <itamar at mellanox.co.il>
Index: dapl_evd_dto_callb.c
===================================================================
--- dapl_evd_dto_callb.c (revision 2257)
+++ dapl_evd_dto_callb.c (working copy)
@@ -98,46 +98,26 @@ void dapl_evd_dto_callback(ib_cq_handle_
* EVD is enabled.
*/
- if (state == DAPL_EVD_STATE_WAITED) {
- /*
- * If we could, it would be best to avoid this wakeup
- * (and the context switch) unless the number of events/CQs
- * waiting for the waiter was its threshold. We don't
- * currently have the ability to determine that without
- * dequeueing the events, and we can't do that for
- * synchronization reasons (racing with the waiter waking
- * up and dequeuing, sparked by other callbacks).
- */
+ if (state == DAPL_EVD_STATE_OPEN && (evd_ptr->cno_ptr != NULL)) {
+ /*
+ * Re-enable callback, *then* trigger.
+ * This guarantees we won't miss any events.
+ */
+ dat_status = dapl_ib_completion_notify(evd_ptr,
+ IB_NOTIFY_ON_NEXT_COMP);
- /*
- * We don't need to worry about taking the lock for the
- * wakeup because wakeups are sticky.
- */
- dapl_os_wait_object_wakeup(&evd_ptr->wait_object);
- } else if (state == DAPL_EVD_STATE_OPEN) {
- DAPL_CNO *cno = evd_ptr->cno_ptr;
- if (evd_ptr->evd_enabled && (evd_ptr->cno_ptr != NULL)) {
- /*
- * Re-enable callback, *then* trigger.
- * This guarantees we won't miss any events.
- */
- dat_status = dapl_ib_completion_notify(evd_ptr,
- IB_NOTIFY_ON_NEXT_COMP);
-
- if (DAT_SUCCESS != dat_status) {
- (void)dapl_evd_post_async_error_event(evd_ptr->
- header.
- owner_ia->
+ if (DAT_SUCCESS != dat_status) {
+ (void)dapl_evd_post_async_error_event(evd_ptr->
+ header.
+ owner_ia->
async_error_evd,
DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR,
(DAT_IA_HANDLE)
evd_ptr->
header.
owner_ia);
- }
-
- dapl_cno_trigger(cno, evd_ptr);
- }
- }
+ }
+ dapl_cno_trigger(evd_ptr->cno_ptr, evd_ptr);
+ }
dapl_dbg_log(DAPL_DBG_TYPE_RTN, "dapl_evd_dto_callback () returns\n");
}
Index: dapl_ep_create.c
===================================================================
--- dapl_ep_create.c (revision 2257)
+++ dapl_ep_create.c (working copy)
@@ -74,7 +74,6 @@ dapl_ep_create(DAT_IA_HANDLE ia_handle,
DAPL_IA *ia_ptr;
DAPL_EP *ep_ptr;
DAT_EP_ATTR ep_attr_limit;
- DAPL_EVD *evd_ptr;
DAT_RETURN dat_status;
dapl_dbg_log(DAPL_DBG_TYPE_API,
@@ -194,39 +193,6 @@ dapl_ep_create(DAT_IA_HANDLE ia_handle,
}
}
- /*
- * Verify the completion flags for the EVD and the EP
- */
- /*
- * XXX FIXME
- * XXX Need to make assign the EVD to the right completion type
- * XXX depending on the EP attributes. Fail if the types don't
- * XXX match, they are mutually exclusive.
- */
- evd_ptr = (DAPL_EVD *) recv_evd_handle;
- if (evd_ptr != NULL && evd_ptr->completion_type == DAPL_EVD_STATE_INIT) {
- if (ep_attr != NULL &&
- ep_attr->recv_completion_flags ==
- DAT_COMPLETION_DEFAULT_FLAG) {
- evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD;
- } else {
- evd_ptr->completion_type =
- ep_attr->recv_completion_flags;
- }
- }
-
- evd_ptr = (DAPL_EVD *) request_evd_handle;
- if (evd_ptr != NULL && evd_ptr->completion_type == DAPL_EVD_STATE_INIT) {
- if (ep_attr != NULL &&
- ep_attr->recv_completion_flags ==
- DAT_COMPLETION_DEFAULT_FLAG) {
- evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD;
- } else {
- evd_ptr->completion_type =
- ep_attr->recv_completion_flags;
- }
- }
-
/* Allocate EP */
ep_ptr = dapl_ep_alloc(ia_ptr, ep_attr);
if (ep_ptr == NULL) {
Index: dapl_openib_dto.h
===================================================================
--- dapl_openib_dto.h (revision 2257)
+++ dapl_openib_dto.h (working copy)
@@ -167,5 +167,6 @@ dapl_ib_post_send(DAPL_EP * ep_ptr,
#define DAPL_GET_CQE_OPTYPE(cqe_p) ((ib_work_completion_t *)cqe_p)->opcode
#define DAPL_GET_CQE_BYTESNUM(cqe_p) ((ib_work_completion_t *)cqe_p)->byte_len
#define DAPL_GET_CQE_STATUS(cqe_p) ((ib_work_completion_t *)cqe_p)->status
+#define DAPL_GET_CQE_VND_SYND(cqe_p) ((ib_work_completion_t *)cqe_ptr)->vendor_err
#endif /* DAPL_OPENIB_DTO_H */
Index: dapl_evd_resize.c
===================================================================
--- dapl_evd_resize.c (revision 2257)
+++ dapl_evd_resize.c (working copy)
@@ -93,13 +93,6 @@ DAT_RETURN dapl_evd_resize(DAT_EVD_HANDL
spin_lock(&evd_ptr->header.lock);
- /* Don't try to resize if we are actively waiting */
- if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED) {
- spin_unlock(&evd_ptr->header.lock);
- dat_status = DAT_ERROR(DAT_INVALID_STATE, 0);
- goto bail;
- }
-
pend_cnt = dapl_rbuf_count(&evd_ptr->pending_event_queue);
if (pend_cnt > evd_qlen) {
spin_unlock(&evd_ptr->header.lock);
Index: dapl_evd_kcreate.c
===================================================================
--- dapl_evd_kcreate.c (revision 2257)
+++ dapl_evd_kcreate.c (working copy)
@@ -96,16 +96,13 @@ DAT_RETURN dapl_evd_kcreate(DAT_IA_HANDL
goto bail;
}
- cno_ptr = dapl_cno_alloc (ia_ptr, upcall);
-
- if (!cno_ptr)
- {
- dat_status = DAT_INSUFFICIENT_RESOURCES;
- goto bail;
- }
-
- cno_ptr->cno_upcall_policy = upcall_policy;
+ cno_ptr = dapl_cno_alloc (ia_ptr, upcall);
+ if (!cno_ptr) {
+ dat_status = DAT_INSUFFICIENT_RESOURCES;
+ goto bail;
+ }
+ cno_ptr->cno_upcall_policy = upcall_policy;
dat_status = dapl_evd_internal_create(ia_ptr,
cno_ptr,
@@ -115,9 +112,6 @@ DAT_RETURN dapl_evd_kcreate(DAT_IA_HANDL
goto bail;
}
- evd_ptr->evd_state = DAPL_EVD_STATE_OPEN;
- evd_ptr->evd_enabled = DAT_TRUE;
-
evdhandle = *evd_handle = (DAT_EVD_HANDLE) evd_ptr;
bail:
Index: dapl_openib_util.h
===================================================================
--- dapl_openib_util.h (revision 2257)
+++ dapl_openib_util.h (working copy)
@@ -89,22 +89,6 @@ typedef struct ib_hca_transport {
u16 lid;
} ib_hca_transport_t;
-/*
- * ib_shm_transport_t structure. Define fields specific to this
- * provider implementation necessary to maintain shared memory state
- *
- * OpenIB does not have an API to directly register memory as shared
- * memory. Instead, we must register memory, then register that memory
- * as shared. This gives us a new mr_handle, as well as a new l_key and
- * r_key, so we need to save the original handle until it can be
- * disposed of when the user does an lmr_free. The l_key and r_key are
- * not available and will disappear when we deallocate the mr_handle, so
- * we don't save them.
- */
-typedef struct ib_shm_transport {
- ib_mr_handle_t mr_handle;
-} ib_shm_transport_t;
-
#define IB_INVALID_HANDLE NULL
#define IB_MAX_REQ_PDATA_SIZE 92
Index: dapl_ep_create_with_srq.c
===================================================================
--- dapl_ep_create_with_srq.c (revision 2257)
+++ dapl_ep_create_with_srq.c (working copy)
@@ -81,7 +81,6 @@ dapl_ep_create_with_srq(DAT_IA_HANDLE ia
DAPL_IA *ia_ptr;
DAPL_EP *ep_ptr;
DAT_EP_ATTR ep_attr_limit;
- DAPL_EVD *evd_ptr;
DAT_RETURN dat_status;
dat_status = DAT_SUCCESS;
@@ -209,39 +208,6 @@ dapl_ep_create_with_srq(DAT_IA_HANDLE ia
}
}
- /*
- * Verify the completion flags for the EVD and the EP
- */
- /*
- * XXX FIXME
- * XXX Need to make assign the EVD to the right completion type
- * XXX depending on the EP attributes. Fail if the types don't
- * XXX match, they are mutually exclusive.
- */
- evd_ptr = (DAPL_EVD *) recv_evd_handle;
- if (evd_ptr != NULL && evd_ptr->completion_type == DAPL_EVD_STATE_INIT) {
- if (ep_attr != NULL &&
- ep_attr->recv_completion_flags ==
- DAT_COMPLETION_DEFAULT_FLAG) {
- evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD;
- } else {
- evd_ptr->completion_type =
- ep_attr->recv_completion_flags;
- }
- }
-
- evd_ptr = (DAPL_EVD *) request_evd_handle;
- if (evd_ptr != NULL && evd_ptr->completion_type == DAPL_EVD_STATE_INIT) {
- if (ep_attr != NULL &&
- ep_attr->recv_completion_flags ==
- DAT_COMPLETION_DEFAULT_FLAG) {
- evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD;
- } else {
- evd_ptr->completion_type =
- ep_attr->recv_completion_flags;
- }
- }
-
dat_status = DAT_NOT_IMPLEMENTED;
/*
Index: dapl_evd_util.c
===================================================================
--- dapl_evd_util.c (revision 2257)
+++ dapl_evd_util.c (working copy)
@@ -77,9 +77,6 @@ dapl_evd_internal_create(DAPL_IA * ia_pt
evd_ptr->evd_producer_locking_needed =
((evd_flags & ~(DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG)) != 0);
- /* Before we setup any callbacks, transition state to OPEN. */
- evd_ptr->evd_state = DAPL_EVD_STATE_OPEN;
-
if (evd_flags & DAT_EVD_ASYNC_FLAG) {
/*
* There is no cq associate with async evd. Set it to invalid
@@ -161,19 +158,13 @@ DAPL_EVD *dapl_evd_alloc(DAPL_IA * ia_pt
/*
* Initialize the body
*/
- evd_ptr->evd_state = DAPL_EVD_STATE_INITIAL;
+ evd_ptr->evd_state = DAPL_EVD_STATE_OPEN;
evd_ptr->evd_flags = evd_flags;
- evd_ptr->evd_enabled = DAT_TRUE;
- evd_ptr->evd_waitable = DAT_TRUE;
evd_ptr->evd_producer_locking_needed = 1; /* Conservative value. */
evd_ptr->ib_cq_handle = IB_INVALID_HANDLE;
atomic_set(&evd_ptr->evd_ref_count, 0);
evd_ptr->catastrophic_overflow = DAT_FALSE;
evd_ptr->qlen = qlen;
- evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD; /* FIXME: should be DAPL_EVD_STATE_INIT */
- dapl_os_wait_object_init(&evd_ptr->wait_object);
-
- evd_ptr->cno_active_count = 0;
evd_ptr->cno_ptr = cno_ptr;
bail:
@@ -219,10 +210,6 @@ DAT_RETURN dapl_evd_event_alloc(DAPL_EVD
event_ptr++;
}
- evd_ptr->cq_notified = DAT_FALSE;
- evd_ptr->cq_notified_when = 0;
- evd_ptr->threshold = 0;
-
bail:
return dat_status;
}
@@ -331,7 +318,6 @@ DAT_RETURN dapl_evd_dealloc(DAPL_EVD * e
kfree(evd_ptr->events);
}
- dapl_os_wait_object_destroy(&evd_ptr->wait_object);
kfree(evd_ptr);
bail:
@@ -445,73 +431,20 @@ static void dapl_evd_post_event(DAPL_EVD
(void *)event_ptr);
dapl_os_assert(dat_status == DAT_SUCCESS);
- dapl_os_assert(evd_ptr->evd_state == DAPL_EVD_STATE_WAITED
- || evd_ptr->evd_state == DAPL_EVD_STATE_OPEN);
+ dapl_os_assert(evd_ptr->evd_state == DAPL_EVD_STATE_OPEN);
if (evd_ptr->evd_state == DAPL_EVD_STATE_OPEN) {
/* No waiter. Arrange to trigger a CNO if it exists. */
-
- if (evd_ptr->evd_enabled) {
- cno_to_trigger = evd_ptr->cno_ptr;
- }
-
- if (evd_ptr->evd_producer_locking_needed) {
- spin_unlock(&evd_ptr->header.lock);
- }
- } else {
- /*
- * We're in DAPL_EVD_STATE_WAITED. Take the lock if
- * we don't have it, recheck, and signal.
- */
- if (!evd_ptr->evd_producer_locking_needed) {
- spin_lock(&evd_ptr->header.lock);
- }
-
- if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED
- && (dapl_rbuf_count(&evd_ptr->pending_event_queue)
- >= evd_ptr->threshold)) {
- spin_unlock(&evd_ptr->header.lock);
- dapl_os_wait_object_wakeup(&evd_ptr->wait_object);
- } else {
- spin_unlock(&evd_ptr->header.lock);
- }
+ cno_to_trigger = evd_ptr->cno_ptr;
+ if (evd_ptr->evd_producer_locking_needed) {
+ spin_unlock(&evd_ptr->header.lock);
+ }
}
-
if (cno_to_trigger != NULL) {
dapl_cno_trigger (cno_to_trigger, evd_ptr);
}
}
-/*
- * Post the <event> to the evd. Do not do any wakeup processing.
- * This function should only be called if it is known that there are
- * no waiters that it is appropriate to wakeup on this EVD. An example
- * of such a situation is during internal dat_evd_wait() processing.
- *
- * If producer side locking is required, the EVD lock must be held upon
- * entry to this function.
- */
-
-static void
-dapl_evd_post_event_nosignal(DAPL_EVD * evd_ptr, const DAT_EVENT * event_ptr)
-{
- DAT_RETURN dat_status;
-
- dapl_dbg_log(DAPL_DBG_TYPE_EVD,
- "dapl_evd_post_event_nonsignal: Called with event # %x\n",
- event_ptr->event_number);
-
- dat_status = dapl_rbuf_add(&evd_ptr->pending_event_queue,
- (void *)event_ptr);
- dapl_os_assert(dat_status == DAT_SUCCESS);
-
- dapl_os_assert(evd_ptr->evd_state == DAPL_EVD_STATE_WAITED
- || evd_ptr->evd_state == DAPL_EVD_STATE_OPEN);
-
- if (evd_ptr->evd_producer_locking_needed) {
- spin_unlock(&evd_ptr->header.lock);
- }
-}
/*
* format an overflow event for posting
@@ -689,49 +622,6 @@ dapl_evd_post_software_event(DAPL_EVD *
}
/*
- * dapl_evd_post_generic_event
- *
- * Post a generic event type. Not used by all providers
- *
- * Input:
- * evd_ptr
- * event_number
- * data
- *
- * Output:
- * none
- *
- * Returns:
- * DAT_SUCCESS
- *
- */
-DAT_RETURN
-dapl_evd_post_generic_event(DAPL_EVD * evd_ptr,
- DAT_EVENT_NUMBER event_number,
- DAT_EVENT_DATA * data)
-{
- DAT_EVENT *event_ptr;
-
- event_ptr = dapl_evd_get_and_init_event(evd_ptr, event_number);
- /*
- * Note event lock may be held on successful return
- * to be released by dapl_evd_post_event(), if provider side locking
- * is needed.
- */
-
- if (event_ptr == NULL) {
- return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,
- DAT_RESOURCE_MEMORY);
- }
-
- event_ptr->event_data = *data;
-
- dapl_evd_post_event(evd_ptr, event_ptr);
-
- return DAT_SUCCESS;
-}
-
-/*
* dapl_evd_cqe_to_event
*
* Convert a CQE into an event structure.
@@ -866,128 +756,13 @@ dapl_evd_cqe_to_event(DAPL_EVD * evd_ptr
}
} /* end switch */
- /*
- * Most error DTO ops result in disconnecting the EP. See
- * IBTA Vol 1.1, Chapter 10,Table 68, for expected effect on
- * state.
- */
- if ((dto_status != DAT_DTO_SUCCESS) &&
- (dto_status != DAT_DTO_ERR_FLUSHED)) {
-#ifdef CONN_EV_BROKEN_SUPPORTED
- DAPL_EVD *evd_ptr;
-
- /*
- * If we are connected, generate disconnect and generate an
- * event. We may be racing with other disconnect ops, so we
- * need to check. We may also be racing CM connection events,
- * requiring us to check for connection pending states too.
- */
- spin_lock(&ep_ptr->header.lock);
- if (ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED ||
- ep_ptr->param.ep_state ==
- DAT_EP_STATE_ACTIVE_CONNECTION_PENDING
- || ep_ptr->param.ep_state ==
- DAT_EP_STATE_PASSIVE_CONNECTION_PENDING
- || ep_ptr->param.ep_state ==
- DAT_EP_STATE_COMPLETION_PENDING) {
- ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED;
- spin_unlock(&ep_ptr->header.lock);
-
- /* Let the other side know we have disconnected */
- // cant call to dapl_ib_disconnect besause it is in interrupt context
- // and this call is blocking need to fix ???
- // (void) dapl_ib_disconnect (ep_ptr, DAT_CLOSE_ABRUPT_FLAG);
- printk("FIXME DAPL BUG %s %s %d \n", __FILE__,
- __FUNCTION__, __LINE__);
- /* ... and clean up the local side */
- evd_ptr = (DAPL_EVD *) ep_ptr->param.connect_evd_handle;
- if (evd_ptr != NULL) {
- dapl_evd_post_connection_event(evd_ptr,
- DAT_CONNECTION_EVENT_BROKEN,
- (DAT_HANDLE)
- ep_ptr, 0, 0);
- }
- } else {
- spin_unlock(&ep_ptr->header.lock);
- }
-
- dapl_dbg_log(DAPL_DBG_TYPE_DTO_COMP_ERR,
- " DTO completion ERROR: %d: op %#x (ep disconnected)\n",
- DAPL_GET_CQE_STATUS(cqe_ptr),
- DAPL_GET_CQE_OPTYPE(cqe_ptr));
-#else
- dapl_dbg_log(DAPL_DBG_TYPE_DTO_COMP_ERR,
- " DTO completion ERROR: %d: op %#x\n",
- DAPL_GET_CQE_STATUS(cqe_ptr),
- DAPL_GET_CQE_OPTYPE(cqe_ptr));
-#endif
- }
-}
-
-/*
- * dapl_evd_copy_cq
- *
- * Copy all entries on a CQ associated with the EVD onto that EVD
- * Up to caller to handle races, if any. Note that no EVD waiters will
- * be awoken by this copy.
- *
- * Input:
- * evd_ptr
- *
- * Output:
- * None
- *
- * Returns:
- * none
- *
- */
-void dapl_evd_copy_cq(DAPL_EVD * evd_ptr)
-{
- ib_work_completion_t cur_cqe;
- DAT_RETURN dat_status;
- DAT_EVENT *event;
-
- if (evd_ptr->ib_cq_handle == IB_INVALID_HANDLE) {
- /* Nothing to do if no CQ. */
- return;
- }
-
- while (1) {
- dat_status =
- dapl_ib_completion_poll(evd_ptr->header.owner_ia->hca_ptr,
- evd_ptr, &cur_cqe);
-
- if (dat_status != DAT_SUCCESS) {
- break;
- }
-
- /* For debugging. */
- dapl_evd_eh_print_cqe(&cur_cqe);
-
- /*
- * Can use DAT_DTO_COMPLETION_EVENT because dapl_evd_cqe_to_event
- * will overwrite.
- */
-
- event =
- dapl_evd_get_and_init_event(evd_ptr,
- DAT_DTO_COMPLETION_EVENT);
- if (event == NULL) {
- /* We've already attempted the overflow post; return. */
- return;
- }
-
- dapl_evd_cqe_to_event(evd_ptr, &cur_cqe, event);
-
- dapl_evd_post_event_nosignal(evd_ptr, event);
- }
-
- if (DAT_GET_TYPE(dat_status) != DAT_QUEUE_EMPTY) {
- dapl_dbg_log(DAPL_DBG_TYPE_EVD,
- "dapl_evd_copy_cq: dapl_ib_completion_poll returned 0x%x\n",
- dat_status);
- dapl_os_assert(!"Bad return from dapl_ib_completion_poll");
- }
+ if (dto_status != DAT_DTO_SUCCESS) {
+ dapl_dbg_log(DAPL_DBG_TYPE_DTO_COMP_ERR,
+ " DTO completion ERROR:status %d: op %#x vendore synd: 0x%x\n",
+ DAPL_GET_CQE_STATUS (cqe_ptr),
+ DAPL_GET_CQE_OPTYPE (cqe_ptr),
+ DAPL_GET_CQE_VND_SYND(cqe_ptr));
+ }
}
/*
Index: dapl_evd_util.h
===================================================================
--- dapl_evd_util.h (revision 2257)
+++ dapl_evd_util.h (working copy)
@@ -87,10 +87,6 @@ DAT_RETURN
dapl_evd_post_software_event(DAPL_EVD * evd_ptr,
DAT_EVENT_NUMBER event_number, DAT_PVOID pointer);
-DAT_RETURN
-dapl_evd_post_generic_event(DAPL_EVD * evd_ptr,
- DAT_EVENT_NUMBER event_number,
- DAT_EVENT_DATA * data);
/*************************************
* dapl internal callbacks functions *
@@ -115,8 +111,6 @@ extern void dapl_evd_cq_async_error_call
extern void dapl_evd_qp_async_error_callback(ib_error_record_t * cause_ptr,
void *context);
-extern void dapl_evd_copy_cq(DAPL_EVD * evd_ptr);
-
extern DAT_RETURN dapl_evd_cq_poll_to_event(DAPL_EVD * evd_ptr,
DAT_EVENT * event);
Index: dapl.h
===================================================================
--- dapl.h (revision 2257)
+++ dapl.h (working copy)
@@ -64,10 +64,7 @@ typedef enum dapl_magic {
} DAPL_MAGIC;
typedef enum dapl_evd_state {
- DAPL_EVD_STATE_TERMINAL,
- DAPL_EVD_STATE_INITIAL,
DAPL_EVD_STATE_OPEN,
- DAPL_EVD_STATE_WAITED,
DAPL_EVD_STATE_DEAD = 0xDEAD
} DAPL_EVD_STATE;
@@ -270,8 +267,6 @@ struct dapl_evd
DAPL_EVD_STATE evd_state;
DAT_EVD_FLAGS evd_flags;
- DAT_BOOLEAN evd_enabled; /* For attached CNO. */
- DAT_BOOLEAN evd_waitable; /* EVD state. */
/* Derived from evd_flags; see dapls_evd_internal_create. */
DAT_BOOLEAN evd_producer_locking_needed;
@@ -279,9 +274,6 @@ struct dapl_evd
/* Every EVD has a CQ unless it is a SOFTWARE_EVENT only EVD */
ib_cq_handle_t ib_cq_handle;
- /* Mellanox Specific completion handle for registration/de-registration */
- ib_comp_handle_t ib_comp_handle;
-
/* An Event Dispatcher cannot be freed while
* it is referenced elsewhere.
*/
@@ -295,20 +287,7 @@ struct dapl_evd
DAT_EVENT *events;
DAPL_RING_BUFFER free_event_queue;
DAPL_RING_BUFFER pending_event_queue;
-
- /* CQ Completions are not placed into 'deferred_events'
- ** rather they are simply left on the Completion Queue
- ** and the fact that there was a notification is flagged.
- */
- DAT_BOOLEAN cq_notified;
- DAPL_OS_TICKS cq_notified_when;
-
- DAT_COUNT cno_active_count;
DAPL_CNO *cno_ptr;
-
- DAPL_OS_WAIT_OBJECT wait_object;
- DAT_COUNT threshold;
- DAPL_EVD_COMPLETION completion_type;
};
/* uDAPL timer entry, used to queue timeouts */
--
Itamar
More information about the general
mailing list