[openib-general] [PATCH] request/response matching in MAD code

Sean Hefty mshefty at ichips.intel.com
Thu Sep 30 12:16:28 PDT 2004


The following patch should match response MADs with the corresponding request.  A response without a matching request is discarded, and responses are reported before requests.

Timeouts of request MADs are not yet handled.

- Sean

-- 
Index: access/ib_mad_priv.h
===================================================================
--- access/ib_mad_priv.h	(revision 915)
+++ access/ib_mad_priv.h	(working copy)
@@ -119,6 +119,7 @@
 	struct list_head agent_send_list;
 	struct ib_mad_agent *agent;
 	u64 wr_id;			/* client WRID */
+	u64 tid;
 	int timeout_ms;
 	int refcount;
 	enum ib_wc_status status;
Index: access/ib_mad.c
===================================================================
--- access/ib_mad.c	(revision 915)
+++ access/ib_mad.c	(working copy)
@@ -87,6 +87,8 @@
 static int ib_mad_post_receive_mads(struct ib_mad_port_private *priv);
 static inline u8 convert_mgmt_class(u8 mgmt_class);
 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
+static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
+				    struct ib_mad_send_wc *mad_send_wc);
 
 /*
  * ib_register_mad_agent - Register to send/receive MADs
@@ -344,6 +346,8 @@
 			return -ENOMEM;	
 		}
 
+		mad_send_wr->tid = ((struct ib_mad_hdr*)(unsigned long)
+				   send_wr->sg_list->addr)->tid;
 		mad_send_wr->agent = mad_agent;
 		mad_send_wr->timeout_ms = cur_send_wr->wr.ud.timeout_ms;
 		if (mad_send_wr->timeout_ms)
@@ -740,6 +744,81 @@
 	return valid;
 }
 
+/*
+ * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet.
+ */
+static struct ib_mad_private* reassemble_recv(struct ib_mad_agent_private *mad_agent_priv,
+					      struct ib_mad_private *recv)
+{
+	/* Until we have RMPP, all receives are reassembled!... */
+	return recv;
+}
+
+static struct ib_mad_send_wr_private*
+find_send_req(struct ib_mad_agent_private *mad_agent_priv,
+	      u64 tid)
+{
+	struct ib_mad_send_wr_private *mad_send_wr;
+
+	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
+			    agent_send_list) {
+
+		if (mad_send_wr->tid == tid) {
+			/* Verify request is still valid. */
+			if (mad_send_wr->status == IB_WC_SUCCESS &&
+			    mad_send_wr->timeout_ms)
+				return mad_send_wr;
+			else
+				return NULL;
+		}
+	}
+	return NULL;
+}
+
+static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
+				 struct ib_mad_private *recv,
+				 int solicited)
+{
+	struct ib_mad_send_wr_private *mad_send_wr;
+	struct ib_mad_send_wc mad_send_wc;
+	unsigned long flags;
+
+	/* Fully reassemble receive before processing. */
+	recv = reassemble_recv(mad_agent_priv, recv);
+	if (!recv)
+		return;
+
+	/* Complete corresponding request. */
+	if (solicited) {
+		spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags);
+		mad_send_wr = find_send_req(mad_agent_priv,
+					    recv->mad.mad.mad_hdr.tid);
+		if (!mad_send_wr) {
+			spin_unlock_irqrestore(&mad_agent_priv->send_list_lock,
+					       flags);
+			ib_free_recv_mad(&recv->header.recv_wc);
+			return;
+		}
+		mad_send_wr->timeout_ms = 0;
+		spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags);
+
+		/* Defined behavior is to complete response before request. */
+		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
+						&recv->header.recv_wc);
+		atomic_dec(&mad_agent_priv->refcount);
+
+		mad_send_wc.status = IB_WC_SUCCESS;
+		mad_send_wc.vendor_err = 0;
+		mad_send_wc.wr_id = mad_send_wr->wr_id;
+		ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
+	} else {
+		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
+						&recv->header.recv_wc);
+		if (atomic_dec_and_test(&mad_agent_priv->refcount))
+			wake_up(&mad_agent_priv->wait);
+	}
+}
+
 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
 				     struct ib_wc *wc)
 {
@@ -797,17 +876,10 @@
 
 	/* Setup MAD receive work completion from "normal" work completion */
 	recv->header.recv_wc.wc = wc;
-	recv->header.recv_wc.mad_len = sizeof(struct ib_mad); /* Should this be based on wc->byte_len ? Also, RMPP !!! */
+	recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
 	recv->header.recv_wc.recv_buf = &recv->header.recv_buf;
-
-	/* Setup MAD receive buffer */
-	INIT_LIST_HEAD(&recv->header.recv_buf.list); /* More for RMPP!!! */
 	recv->header.recv_buf.mad = (struct ib_mad *)&recv->mad;
-	if (wc->wc_flags & IB_WC_GRH) {
-		recv->header.recv_buf.grh = &recv->grh;
-	} else {
-		recv->header.recv_buf.grh = NULL;
-	}
+	recv->header.recv_buf.grh = &recv->grh;
 
 	/* Validate MAD */
 	if (!validate_mad(recv->header.recv_buf.mad, qp_num))
@@ -820,21 +892,11 @@
 				   solicited);
 	if (!mad_agent) {
 		spin_unlock_irqrestore(&port_priv->reg_lock, flags);
-		printk(KERN_ERR "No matching mad agent found for receive MAD\n");	
+		printk(KERN_NOTICE "No matching mad agent found for receive MAD\n");	
 	} else {
 		atomic_inc(&mad_agent->refcount);
 		spin_unlock_irqrestore(&port_priv->reg_lock, flags);
-		if (solicited) {
-			/* Walk the send posted list to find the match !!! */
-			printk(KERN_DEBUG "Receive solicited MAD currently unsupported\n");
-		}
-
-		/* Invoke receive callback */	
-		mad_agent->agent.recv_handler(&mad_agent->agent,
-					      &recv->header.recv_wc);
-
-		if (atomic_dec_and_test(&mad_agent->refcount))
-			wake_up(&mad_agent->wait);
+		ib_mad_complete_recv(mad_agent, recv, solicited);
 	}
 
 ret:



More information about the general mailing list