[openib-general] [PATCH] mad: Handle outgoing SMPs in ib_post_send_mad

Hal Rosenstock halr at voltaire.com
Fri Nov 5 10:49:39 PST 2004


mad: Handle outgoing SMPs in ib_post_send_mad
The MAD layer is now ready to support the SM :-)

I have not yet handled the additional special cases: hop count increment
done by device, use send queue rather than process MAD for 0 hop SMPs).

Index: mad_priv.h
===================================================================
--- mad_priv.h	(revision 1161)
+++ mad_priv.h	(working copy)
@@ -115,6 +115,7 @@
 
 	atomic_t refcount;
 	wait_queue_head_t wait;
+	int phys_port_cnt;
 	u8 rmpp_version;
 };
 
Index: mad.c
===================================================================
--- mad.c	(revision 1162)
+++ mad.c	(working copy)
@@ -89,6 +89,7 @@
 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
 				    struct ib_mad_send_wc *mad_send_wc);
 static void timeout_sends(void *data);
+static int solicited_mad(struct ib_mad *mad);
 
 /*
  * Returns a ib_mad_port_private structure or NULL for a device/port.
@@ -243,6 +244,7 @@
 	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
 	mad_agent_priv->reg_req = reg_req;
 	mad_agent_priv->rmpp_version = rmpp_version;
+	mad_agent_priv->phys_port_cnt = port_priv->phys_port_cnt;
 	mad_agent_priv->agent.device = device;
 	mad_agent_priv->agent.recv_handler = recv_handler;
 	mad_agent_priv->agent.send_handler = send_handler;
@@ -368,6 +370,105 @@
 	spin_unlock_irqrestore(&mad_queue->lock, flags);
 }
 
+/*
+ * Return 0 if SMP is to be sent
+ * Return 1 if SMP was consumed locally (whether or not solicited)
+ * Return < 0 if error 
+ */
+static int handle_outgoing_smp(struct ib_mad_agent *mad_agent,
+			       struct ib_smp *smp,
+			       struct ib_send_wr *send_wr)
+{
+	int ret;
+
+	if (!smi_handle_dr_smp_send(smp,
+				    mad_agent->device->node_type,
+				    mad_agent->port_num)) {
+		ret = -EINVAL;
+		printk(KERN_ERR "Invalid directed route\n");
+		goto error1;
+	}
+	if (smi_check_local_dr_smp(smp,
+				   mad_agent->device,
+				   mad_agent->port_num)) {
+		struct ib_mad_private *mad_priv;
+		struct ib_mad_agent_private *mad_agent_priv;
+		struct ib_mad_send_wc mad_send_wc;
+
+		mad_priv = kmem_cache_alloc(ib_mad_cache,
+					    (in_atomic() || irqs_disabled()) ?
+					    GFP_ATOMIC : GFP_KERNEL);
+		if (!mad_priv) {
+			ret = -ENOMEM;
+			printk(KERN_ERR PFX "No memory for local response MAD\n");
+			goto error1;
+		}
+
+		mad_agent_priv = container_of(mad_agent,
+					      struct ib_mad_agent_private,
+					      agent);
+		ret = mad_agent->device->process_mad(mad_agent->device,
+						     0,
+						     mad_agent->port_num,
+						     smp->dr_slid, /* ? */
+						     (struct ib_mad *)smp,
+						     (struct ib_mad *)&mad_priv->mad);
+		if ((ret & IB_MAD_RESULT_SUCCESS) &&
+		    (ret & IB_MAD_RESULT_REPLY)) {
+			if (!smi_handle_dr_smp_recv((struct ib_smp *)&mad_priv->mad,
+						    mad_agent->device->node_type,
+						    mad_agent->port_num,
+						    mad_agent_priv->phys_port_cnt)) {
+				ret = -EINVAL;
+				kmem_cache_free(ib_mad_cache, mad_priv);
+				goto error1;
+			}
+		}
+
+		/* See if response is solicited and there is a recv handler */
+		if (solicited_mad(&mad_priv->mad.mad) && 
+		    mad_agent_priv->agent.recv_handler) {
+			struct ib_wc wc;
+
+			/* Defined behavior is to complete response before request */
+			wc.wr_id = send_wr->wr_id;
+			wc.status = IB_WC_SUCCESS;
+			wc.opcode = IB_WC_RECV;
+			wc.vendor_err = 0;
+			wc.byte_len = sizeof(struct ib_mad);
+			wc.src_qp = 0;	/* IB_QPT_SMI ? */
+			wc.wc_flags = 0;
+			wc.pkey_index = 0;
+			wc.slid = IB_LID_PERMISSIVE;
+			wc.sl = 0;
+			wc.dlid_path_bits = 0;
+			mad_priv->header.recv_wc.wc = &wc;
+			mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
+			INIT_LIST_HEAD(&mad_priv->header.recv_buf.list);
+			mad_priv->header.recv_buf.grh = NULL;
+			mad_priv->header.recv_buf.mad = &mad_priv->mad.mad;
+			mad_priv->header.recv_wc.recv_buf = &mad_priv->header.recv_buf;
+			mad_agent_priv->agent.recv_handler(mad_agent,
+							   &mad_priv->header.recv_wc);
+		} else
+			kmem_cache_free(ib_mad_cache, mad_priv);
+
+		if (mad_agent_priv->agent.send_handler) {
+			/* Now, complete send */
+			mad_send_wc.status = IB_WC_SUCCESS;
+			mad_send_wc.vendor_err = 0;
+			mad_send_wc.wr_id = send_wr->wr_id;
+			mad_agent_priv->agent.send_handler(mad_agent, &mad_send_wc);
+			ret = 1;
+		} else
+			ret = -EINVAL;
+	} else 
+		ret = 0;
+
+error1:
+	return ret;
+}
+
 static int ib_send_mad(struct ib_mad_agent_private *mad_agent_priv,
 		       struct ib_mad_send_wr_private *mad_send_wr,
 		       struct ib_send_wr *send_wr,
@@ -422,9 +523,27 @@
 	while (cur_send_wr) {
 		unsigned long			flags;
 		struct ib_mad_send_wr_private	*mad_send_wr;
+		struct ib_smp			*smp;
 
+		if (!cur_send_wr->wr.ud.mad_hdr) {
+			*bad_send_wr = cur_send_wr;
+			printk(KERN_ERR PFX "MAD header must be supplied in WR %p\n", cur_send_wr);
+			goto error1;
+		}
+
 		next_send_wr = (struct ib_send_wr *)cur_send_wr->next;
 
+		smp = (struct ib_smp *)cur_send_wr->wr.ud.mad_hdr;
+		if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+			ret = handle_outgoing_smp(mad_agent, smp, cur_send_wr);
+			if (ret < 0) {	/* error */
+				*bad_send_wr = cur_send_wr;
+				goto error1;
+			} else if (ret == 1) {	/* locally consumed */
+				goto next;
+			} 
+		}
+
 		/* Allocate MAD send WR tracking structure */
 		mad_send_wr = kmalloc(sizeof *mad_send_wr, 
 				      (in_atomic() || irqs_disabled()) ?
@@ -467,7 +586,8 @@
 			atomic_dec(&mad_agent_priv->refcount);
 			return ret;		
 		}
-		cur_send_wr= next_send_wr;
+next:
+		cur_send_wr = next_send_wr;
 	}
 
 	return 0;	






More information about the general mailing list