[openib-general][PATCH][kdapl]:

Guy German guyg at voltaire.com
Thu Aug 11 06:57:46 PDT 2005


James,
This patch allows the dapl consumer to control the evd upcall policy.
Some consumers (e.g. ISER) receives one upcall, disable
the upcall policy, and retrieve the rest of the events from a 
kernel_thread, via dat_evd_dequeue.
This fashion of work improves performance by saving the context 
switching that is involved in many upcalls.
If the consumer does not behave that way and leaves the upcall policy 
enabled at all times (e.g. kdapltest), the behavior will stay the same and 
the consumer will get an upcall for each event.

Signed-off-by: Guy German <guyg at voltaire.com>

Index: dapl_evd.c
===================================================================
--- dapl_evd.c	(revision 3056)
+++ dapl_evd.c	(working copy)
@@ -38,28 +38,39 @@
 #include "dapl_ring_buffer_util.h"
 
 /*
- * DAPL Internal routine to trigger the specified CNO.
- * Called by the callback of some EVD associated with the CNO.
+ * DAPL Internal routine to trigger the callback of the EVD
  */
 static void dapl_evd_upcall_trigger(struct dapl_evd *evd)
 {
 	int status = 0;
 	struct dat_event event;
+	unsigned long flags;
 
-	/* Only process events if there is an enabled callback function. */
-	if ((evd->upcall.upcall_func == (DAT_UPCALL_FUNC) NULL) ||
-	    (evd->upcall_policy == DAT_UPCALL_DISABLE)) {
+	
+	/* The function is not re-entrant (change when implementing DAT_UPCALL_MANY)*/
+	if (evd->is_triggered)
 		return;
-	}
 
-	for (;;) {
+	spin_lock_irqsave (&evd->common.lock, flags);
+	if (evd->is_triggered) {
+		spin_unlock_irqrestore (&evd->common.lock, flags);
+		return;
+	}
+	evd->is_triggered = 1;
+	spin_unlock_irqrestore (&evd->common.lock, flags);
+	/* Only process events if there is an enabled callback function */
+	while ((evd->upcall.upcall_func != (DAT_UPCALL_FUNC)NULL) &&
+	       (evd->upcall_policy != DAT_UPCALL_TEARDOWN) &&
+	       (evd->upcall_policy != DAT_UPCALL_DISABLE)) {
 		status = dapl_evd_dequeue((struct dat_evd *)evd, &event);
-		if (0 != status) 
-			return;
-
+		if (status)
+			break;
 		evd->upcall.upcall_func(evd->upcall.instance_data, &event,
 					FALSE);
 	}
+	evd->is_triggered = 0;
+
+	return;
 }
 
 static void dapl_evd_eh_print_wc(struct ib_wc *wc)
@@ -820,24 +831,19 @@ static void dapl_evd_dto_callback(struct
 	 * This function does not dequeue from the CQ; only the consumer
 	 * can do that. Instead, it wakes up waiters if any exist.
 	 * It rearms the completion only if completions should always occur
-	 * (specifically if a CNO is associated with the EVD and the
-	 * EVD is enabled).
 	 */
-
-	if (state == DAPL_EVD_STATE_OPEN && 
-	    evd->upcall_policy != DAT_UPCALL_DISABLE) {
-		/*
-		 * Re-enable callback, *then* trigger.
-		 * This guarantees we won't miss any events.
-		 */
-		status = ib_req_notify_cq(evd->cq, IB_CQ_NEXT_COMP);
-		if (0 != status) 
-			(void)dapl_evd_post_async_error_event(
-				evd->common.owner_ia->async_error_evd,
-				DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR,
-				evd->common.owner_ia);
-
+	
+	if (state == DAPL_EVD_STATE_OPEN) {
 		dapl_evd_upcall_trigger(evd);
+		if ((evd->upcall_policy != DAT_UPCALL_TEARDOWN) &&
+		    (evd->upcall_policy != DAT_UPCALL_DISABLE)) {
+			status = ib_req_notify_cq(evd->cq, IB_CQ_NEXT_COMP);
+			if (0 != status) 
+				(void)dapl_evd_post_async_error_event(
+					evd->common.owner_ia->async_error_evd,
+					DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR,
+					evd->common.owner_ia);
+		}
 	}
 	dapl_dbg_log(DAPL_DBG_TYPE_RTN, "%s() returns\n", __func__);
 }
@@ -890,7 +896,7 @@ int dapl_evd_internal_create(struct dapl
 
 		/* reset the qlen in the attributes, it may have changed */
 		evd->qlen = evd->cq->cqe;
-
+		evd->is_triggered = 0;
 		status = ib_req_notify_cq(evd->cq, IB_CQ_NEXT_COMP);
 
 		if (status != 0)
@@ -1035,15 +1041,41 @@ int dapl_evd_modify_upcall(struct dat_ev
 			   const struct dat_upcall_object *upcall)
 {
 	struct dapl_evd *evd;
-
-	dapl_dbg_log(DAPL_DBG_TYPE_API, "dapl_modify_upcall(%p)\n", evd_handle);
+	int status = 0;
+	int pending_events;
+	unsigned long flags;
 
 	evd = (struct dapl_evd *)evd_handle;
+	dapl_dbg_log (DAPL_DBG_TYPE_API, "%s: (evd=%p) set to %d\n",
+			__func__, evd_handle, upcall_policy);
 
+	spin_lock_irqsave(&evd->common.lock, flags);
+	if ((upcall_policy != DAT_UPCALL_TEARDOWN) &&
+	    (upcall_policy != DAT_UPCALL_DISABLE)) {
+		pending_events = dapl_rbuf_count(&evd->pending_event_queue);
+		if (pending_events) {
+			dapl_dbg_log (DAPL_DBG_TYPE_WARN,
+				"%s: (evd %p) there are still %d pending "
+				"events in the queue - policy stays disabled\n",
+				__func__, evd_handle, pending_events);
+			status = -EBUSY;
+			goto bail;
+		}
+		if (evd->evd_flags & DAT_EVD_DTO_FLAG) {
+			status = ib_req_notify_cq(evd->cq, IB_CQ_NEXT_COMP);
+			if (status) {
+				printk(KERN_ERR "%s: dapls_ib_completion_notify"
+					" failed (status=0x%x) \n",__func__,
+					status);
+				goto bail;
+			}
+		}
+	}
 	evd->upcall_policy = upcall_policy;
 	evd->upcall = *upcall;
-
-	return 0;
+bail:
+	spin_unlock_irqrestore(&evd->common.lock, flags);
+	return status;
 }
 
 int dapl_evd_post_se(struct dat_evd *evd_handle, const struct dat_event *event)
@@ -1076,7 +1108,7 @@ int dapl_evd_post_se(struct dat_evd *evd
 					      event->event_data.
 					      software_event_data.pointer);
 
-      bail:
+bail:
 	return status;
 }
 
@@ -1124,7 +1156,7 @@ int dapl_evd_dequeue(struct dat_evd *evd
 	}
 
 	spin_unlock_irqrestore(&evd->common.lock, evd->common.flags);
-      bail:
+bail:
 	dapl_dbg_log(DAPL_DBG_TYPE_RTN,
 		     "dapl_evd_dequeue () returns 0x%x\n", status);
 



More information about the general mailing list