[openib-general] [PATCH] SMI update

Sean Hefty mshefty at ichips.intel.com
Thu Sep 16 19:14:38 PDT 2004


After spending a couple of days floundering in SMI-related code, I finally gave in and studied that part of the spec, comparing it against the existing implementations.  I tried to separate the SMI requirements into send and receive handling of SMPs.  There's some pseudo-code near the end of ib_smi.c (to be converted into real code) that describes how the SMI checks will eventually work as the code is merged with the ib_mad.c routines.

I'd appreciate comments from anyone, but particularly someone who's worked on SMI code before, to make sure that I'm not completely off here. n (Btw, you can pretty much ignore the diffs for ib_smi.c.  The previous code matched to the wrong function.)

- Sean

-- 
Index: access/ib_mad_priv.h
===================================================================
--- access/ib_mad_priv.h	(revision 859)
+++ access/ib_mad_priv.h	(working copy)
@@ -128,7 +128,7 @@
 	struct list_head port_list;
 	struct task_struct *mad_thread;
 	struct ib_device *device;
-	int port;
+	int port_num;
 	struct ib_qp *qp[IB_MAD_QPS_SUPPORTED];
 	struct ib_cq *cq;
 	struct ib_pd *pd;
Index: access/ib_smi.c
===================================================================
--- access/ib_smi.c	(revision 859)
+++ access/ib_smi.c	(working copy)
@@ -26,91 +26,293 @@
 #include <ib_smi.h>
 #include "ib_mad_priv.h"
 
-int smi_process_dr_smp(struct ib_mad_port_private *port_priv,
-		       struct ib_smp *smp)
+/*
+ * Fixup a directed route SMP for sending.  Return 0 if the SMP should be
+ * discarded.
+ */
+static int smi_handle_dr_smp_send(struct ib_mad_port_private *port_priv,
+				  struct ib_smp *smp)
 {
 	u8 hop_ptr, hop_cnt;
 
 	hop_ptr = smp->hop_ptr;
 	hop_cnt = smp->hop_cnt;
 
-	/*
-	 * Outgoing MAD processing.  "Outgoing" means from initiator to responder.
-	 * Section 14.2.2.2, Vol 1 IB spec
-	 */
+	 /* See section 14.2.2.2, Vol 1 IB spec */
 	if (!ib_get_smp_direction(smp)) {
 		/* C14-9:1 */
-		if (hop_ptr == 0 && hop_cnt)
-			return 0;
+		if (hop_cnt && hop_ptr == 0) {
+			smp->hop_ptr++;
+			return (smp->initial_path[smp->hop_ptr] == 
+				port_priv->port_num);
+		}
 
 		/* C14-9:2 */
 		if (hop_ptr && hop_ptr < hop_cnt) {
-			if (port_priv->device->node_type == IB_NODE_SWITCH) {
-				printk(KERN_NOTICE
-					"Need to handle DR Mad on switch\n");
-			}
-			return 0;
+			if (port_priv->device->node_type != IB_NODE_SWITCH)
+				return 0;
+			
+			/* smp->return_path set when received */
+			smp->hop_ptr++;
+			return (smp->initial_path[smp->hop_ptr] == 
+				port_priv->port_num);
 		}
 
 		/* C14-9:3 -- We're at the end of the DR segment of path */
 		if (hop_ptr == hop_cnt) {
-			if (hop_cnt)
-				smp->return_path[hop_ptr] = port_priv->port;
+			/* smp->return_path set when received */
 			smp->hop_ptr++;
+			return (port_priv->device->node_type != IB_NODE_CA ||
+				smp->dr_dlid == IB_LID_PERMISSIVE);
+		}
 
-			if (port_priv->device->node_type == IB_NODE_SWITCH) {
-				printk(KERN_NOTICE
-					"Need to handle DR Mad on switch\n");
-				return 0;
-			} else if (smp->dr_dlid != IB_LID_PERMISSIVE) {
+		/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM. */
+		/* C14-9:5 -- Fail unreasonable hop pointer. */
+		return (hop_ptr == hop_cnt + 1);
+
+	} else {
+		/* C14-13:1 */
+		if (hop_cnt && hop_ptr == hop_cnt + 1) {
+			smp->hop_ptr--;
+			return (smp->return_path[smp->hop_ptr] == 
+				port_priv->port_num);
+		}
+
+		/* C14-13:2 */
+		if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
+			if (port_priv->device->node_type != IB_NODE_SWITCH)
 				return 0;
-			}
 
-			return 1;
+			smp->hop_ptr--;
+			return (smp->return_path[smp->hop_ptr] == 
+				port_priv->port_num);
 		}
 
-		/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM. */
-		/* C14-9:5 -- Check for unreasonable hop pointer. */
-		if (hop_ptr > hop_cnt + 1)
+		/* C14-13:3 -- at the end of the DR segment of path */
+		if (hop_ptr == 1) {
+			smp->hop_ptr--;
+			/* C14-13:3 -- SMPs destined for SM shouldn't be here */
+			return (port_priv->device->node_type == IB_NODE_SWITCH &&
+				smp->dr_slid != IB_LID_PERMISSIVE);
+		}
+
+		/* C14-13:4 -- hop_ptr = 0 -> should have gone to SM. */
+		/* C14-13:5 -- Check for unreasonable hop pointer. */
+		return 0;
+	}
+}
+
+/*
+ * Sender side handling of outgoing SMPs.  Fixup the SMP as required by
+ * the spec.  Return 0 if the SMP should be dropped.
+ */
+static int smi_handle_smp_send(struct ib_mad_port_private *port_priv,
+			       struct ib_smp *smp)
+{
+	switch (smp->mgmt_class)
+	{
+	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+		return smi_handle_dr_smp_send(port_priv, smp);
+	default:
+		return 0; /* write me... */
+	}
+}
+
+/*
+ * Return 1 if the SMP should be handled by the local SMA via process_mad.
+ */
+static inline int smi_check_local_smp(struct ib_mad_port_private *port_priv,
+				      struct ib_smp *smp)
+{
+	/* C14-9:3 -- We're at the end of the DR segment of path */
+	/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM. */
+	return (port_priv->device->process_mad &&
+		!ib_get_smp_direction(smp) &&
+		(smp->hop_ptr == smp->hop_cnt + 1));
+}
+
+/*
+ * Adjust information for a received SMP.  Return 0 if the SMP should be
+ * dropped.
+ */
+static int smi_handle_dr_smp_recv(struct ib_mad_port_private *port_priv,
+				  struct ib_smp *smp)
+{
+	u8 hop_ptr, hop_cnt;
+
+	hop_ptr = smp->hop_ptr;
+	hop_cnt = smp->hop_cnt;
+
+	 /* See section 14.2.2.2, Vol 1 IB spec */
+	if (!ib_get_smp_direction(smp)) {
+		/* C14-9:1 -- sender should have incremented hop_ptr */
+		if (hop_cnt && hop_ptr == 0)
 			return 0;
 
-	} else {  /* Returning MAD (From responder to initiator) */
+		/* C14-9:2 -- intermediate hop */
+		if (hop_ptr && hop_ptr < hop_cnt) {
+			if (port_priv->device->node_type != IB_NODE_SWITCH)
+				return 0;
 
-		/* C14-13:1 */
+			smp->return_path[hop_ptr] = port_priv->port_num;
+			/* smp->hop_ptr updated when sending */
+			return 1; /*(smp->initial_path[hop_ptr+1] <=
+				port_priv->device->phys_port_cnt); */
+		}
+
+		/* C14-9:3 -- We're at the end of the DR segment of path */
+		if (hop_ptr == hop_cnt) {
+			if (hop_cnt)
+				smp->return_path[hop_ptr] = port_priv->port_num;
+			/* smp->hop_ptr updated when sending */
+
+			return (port_priv->device->node_type != IB_NODE_CA ||
+				smp->dr_dlid == IB_LID_PERMISSIVE);
+		}
+		
+		/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM. */
+		/* C14-9:5 -- fail unreasonable hop pointer. */
+		return (hop_ptr == hop_cnt + 1);
+
+	} else {
+
+		/* C14-13:1 -- sender should have decremented hop_ptr */
 		if (hop_cnt && hop_ptr == hop_cnt + 1)
 			return 0;
 
 		/* C14-13:2 */
 		if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
-			if (port_priv->device->node_type == IB_NODE_SWITCH) {
-				printk(KERN_NOTICE
-					"Need to handle DR Mad on switch\n");
-			}
-			return 0;
+			if (port_priv->device->node_type != IB_NODE_SWITCH)
+				return 0;
+
+			/* smp->hop_ptr updated when sending */
+			return 1; /*(smp->return_path[hop_ptr-1] <=
+				port_priv->device->phys_port_cnt); */
 		}
 
 		/* C14-13:3 -- We're at the end of the DR segment of path */
 		if (hop_ptr == 1) {
-			smp->hop_ptr--;
-
-			if (port_priv->device->node_type == IB_NODE_SWITCH) {
-				printk(KERN_NOTICE
-					"Need to handle DR Mad on switch\n");
-				return 0;
-			} else if (smp->dr_dlid != IB_LID_PERMISSIVE) {
-				return 0;
+			if (smp->dr_slid == IB_LID_PERMISSIVE) {
+				/* giving SMP to SM - update hop_ptr */
+                                smp->hop_ptr--;
+				return 1;
 			}
+			/* smp->hop_ptr updated when sending */
+			return (port_priv->device->node_type != IB_NODE_CA);
+		}
+
+		/* C14-13:4 -- hop_ptr = 0 -> give to SM. */
+		/* C14-13:5 -- Check for unreasonable hop pointer. */
+		return (hop_ptr == 0);
+	}
+}
 
+/*
+ * Receive side handling SMPs.  Save receive information as required by
+ * the spec.  Return 0 if the SMP should be dropped.
+ */
+static int smi_handle_smp_recv(struct ib_mad_port_private *port_priv,
+			       struct ib_smp *smp)
+{
+	switch (smp->mgmt_class)
+	{
+	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+		return smi_handle_dr_smp_recv(port_priv, smp);
+	default:
+		return 0; /* write me... */
+	}
+}
+
+/*
+ * Return 1 if the received DR SMP should be forwarded to the send queue.
+ * Return 0 if the SMP should be completed up the stack.
+ */
+static int smi_check_forward_dr_smp(struct ib_mad_port_private *port_priv,
+				    struct ib_smp *smp)
+{
+	u8 hop_ptr, hop_cnt;
+
+	hop_ptr = smp->hop_ptr;
+	hop_cnt = smp->hop_cnt;
+
+	if (!ib_get_smp_direction(smp)) {
+		/* C14-9:2 -- intermediate hop */
+		if (hop_ptr && hop_ptr < hop_cnt)
 			return 1;
-		}
 
-		/* C14-13:4 -- Hop Pointer = 0 -> give to SM. */
-		if (hop_ptr == 0)
+		/* C14-9:3 -- at the end of the DR segment of path */
+		if (hop_ptr == hop_cnt)
+			return (smp->dr_dlid == IB_LID_PERMISSIVE);
+
+		/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM. */
+		if (hop_ptr == hop_cnt + 1)
+			return 1;
+	} else {
+		/* C14-13:2 */
+		if (2 <= hop_ptr && hop_ptr <= hop_cnt)
 			return 1;
 
-		/* C14-13:5 -- Check for unreasonable hop pointer. */
-		if (hop_ptr > hop_cnt + 1)
-			return 0;
+		/* C14-13:3 -- at the end of the DR segment of path */
+		if (hop_ptr == 1)
+			return (smp->dr_slid != IB_LID_PERMISSIVE);
+	}
+	return 0;
+}
+
+/*
+ * Return 1 if the received SMP should be forwarded to the send queue.
+ * Return 0 if the SMP should be completed up the stack.
+ */
+static int smi_check_forward_smp(struct ib_mad_port_private *port_priv,
+				 struct ib_smp *smp)
+{
+	switch (smp->mgmt_class)
+	{
+	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+		return smi_check_forward_dr_smp(port_priv, smp);
+	default:
+		return 0; /* write me... */
+	}
+}
+
+/*
+static int smi_process_local(struct ib_mad_port_private *port_priv,
+			     struct ib_smp *smp)
+{
+	port_priv->device->process_mad( ... );
+}
+
+int smi_send_smp(struct ib_mad_port_private *port_priv,
+		 struct ib_smp *smp)
+{
+	if (!smi_handle_smp_send(port_priv, smp)) {
+		smi_fail_send()
+		return 0;
+	}
+
+	if (smi_check_local_smp(port_priv, smp)) {
+		smi_process_local(port_priv, smp);
+		return 0;
+	}
+
+	* Post the send on the QP *
+	return 1;
+}
+
+int smi_recv_smp(struct ib_mad_port_private *port_priv,
+		 struct ib_smp *smp)
+{
+	if (!smi_handle_smp_recv(port_priv, smp)) {
+		smi_fail_recv();
+		return 0;
+	}
+
+	if (smi_check_forward_smp(port_priv, smp)) {
+		smi_send_smp(port_priv, smp);
+		return 0;
 	}
+	
+	* Complete receive up stack *
 	return 1;
 }
+*/
Index: access/ib_mad.c
===================================================================
--- access/ib_mad.c	(revision 859)
+++ access/ib_mad.c	(working copy)
@@ -91,7 +91,7 @@
  * ib_register_mad_agent - Register to send/receive MADs
  */
 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
-					   u8 port,
+					   u8 port_num,
 					   enum ib_qp_type qp_type,
 					   struct ib_mad_reg_req *mad_reg_req,
 					   u8 rmpp_version,
@@ -150,7 +150,7 @@
 	/* Validate device and port */
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
-		if (entry->device == device && entry->port == port) {
+		if (entry->device == device && entry->port_num == port_num) {
 			port_priv = entry;
 			break;
 		}
@@ -372,7 +372,7 @@
 	 * Walk receive buffer list associated with this WC
 	 * No need to remove them from list of receive buffers
 	 */
-	list_for_each_entry(entry, &mad_recv_wc->recv_buf->list, list) {
+	list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
 		/* Free previous receive buffer */
 		kmem_cache_free(ib_mad_cache, buffer);
 		buffer = (void *)entry - sizeof(struct ib_mad_private_header);
@@ -909,7 +909,7 @@
 					       port_priv,
 					       "ib_mad-%-6s-%-2d",
 					       port_priv->device->name,
-					       port_priv->port);
+					       port_priv->port_num);
 	if (IS_ERR(port_priv->mad_thread)) {
 		printk(KERN_ERR "couldn't start mad thread\n");
 		return 1;
@@ -1068,7 +1068,7 @@
 /*
  * Modify QP into Init state
  */
-static inline int ib_mad_change_qp_state_to_init(struct ib_qp *qp, int port)
+static inline int ib_mad_change_qp_state_to_init(struct ib_qp *qp, int port_num)
 {
 	int ret;
 	struct ib_qp_attr *attr = NULL;
@@ -1087,7 +1087,7 @@
 	 * one is needed for the Reset to Init transition.
 	 */
 	attr->pkey_index = 0;
-	attr->port_num = port;
+	attr->port_num = port_num;
 	/* QKey is 0 for QP0 */
 	if (qp->qp_num == 0)
 		attr->qkey = 0;
@@ -1190,7 +1190,7 @@
 
 	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
 		ret = ib_mad_change_qp_state_to_init(port_priv->qp[i],
-						     port_priv->port);
+						     port_priv->port_num);
 		if (ret) {
 			printk(KERN_ERR "Could not change QP%d state to INIT\n", i);
 			return ret;
@@ -1259,7 +1259,7 @@
 	ret = ib_mad_port_start(port_priv);
 	if (ret) {
 		printk(KERN_ERR "Could not restart port%s/%d\n",
-			port_priv->device->name, port_priv->port);
+			port_priv->device->name, port_priv->port_num);
 	}	
 
 	return ret;
@@ -1269,7 +1269,7 @@
  * Open the port
  * Create the QP, PD, MR, and CQ if needed
  */
-static int ib_mad_port_open(struct ib_device *device, int port)
+static int ib_mad_port_open(struct ib_device *device, int port_num)
 {
 	int ret, cq_size, i;
 	u64 iova = 0;
@@ -1285,7 +1285,7 @@
 	/* First, check if port already open at MAD layer */
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
-		if (entry->device == device && entry->port == port) {
+		if (entry->device == device && entry->port_num == port_num) {
 			port_priv = entry;
 			break;
 		}
@@ -1306,7 +1306,7 @@
 	memset(port_priv, 0, sizeof *port_priv);
 	device->mad = port_priv;
 	port_priv->device = device;
-	port_priv->port = port;
+	port_priv->port_num = port_num;
 	spin_lock_init(&port_priv->reg_lock);
 	for (i = 0; i < MAX_MGMT_VERSION; i++) {
 		port_priv->version[i] = NULL;
@@ -1351,7 +1351,7 @@
 			qp_init_attr.qp_type = IB_QPT_SMI;
 		else
 			qp_init_attr.qp_type = IB_QPT_GSI;
-		qp_init_attr.port_num = port_priv->port;
+		qp_init_attr.port_num = port_priv->port_num;
 		port_priv->qp[i] = ib_create_qp(port_priv->pd, &qp_init_attr,
 						&qp_cap);
 		if (IS_ERR(port_priv->qp[i])) {
@@ -1414,14 +1414,14 @@
  * If there are no classes using the port, free the port 
  * resources (CQ, MR, PD, QP) and remove the port's info structure
  */
-static int ib_mad_port_close(struct ib_device *device, int port)
+static int ib_mad_port_close(struct ib_device *device, int port_num)
 {
 	struct ib_mad_port_private *entry, *port_priv = NULL;
 	unsigned long flags;
 
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
-		if (entry->device == device && entry->port == port) {
+		if (entry->device == device && entry->port_num == port_num) {
 			port_priv = entry;
 			break;
 		}
Index: include/ib_mad.h
===================================================================
--- include/ib_mad.h	(revision 859)
+++ include/ib_mad.h	(working copy)
@@ -207,7 +207,7 @@
 /**
  * ib_register_mad_agent - Register to send/receive MADs.
  * @device - The device to register with.
- * @port - The port on the specified device to use.
+ * @port_num - The port on the specified device to use.
  * @qp_type - Specifies which QP to access.  Must be either
  *   IB_QPT_SMI or IB_QPT_GSI.
  * @mad_reg_req - Specifies which unsolicited MADs should be received
@@ -223,7 +223,7 @@
  * @context - User specified context associated with the registration.
  */
 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
-					   u8 port,
+					   u8 port_num,
 					   enum ib_qp_type qp_type,
 					   struct ib_mad_reg_req *mad_reg_req,
 					   u8 rmpp_version,



More information about the general mailing list