[openib-general] [PATCH] ib_mad.c: Consolidate flags variables for spinlocks

Hal Rosenstock halr at voltaire.com
Sat Sep 11 11:25:04 PDT 2004


ib_mad.c: Consolidate flags variables for spinlocks
Also, update TODO list (OpenIB MAD and Old GSI sections)

Index: ib_mad.c
===================================================================
--- ib_mad.c	(revision 783)
+++ ib_mad.c	(working copy)
@@ -115,8 +115,7 @@
 	struct ib_mad_mgmt_class_table *class;
 	struct ib_mad_mgmt_method_table *method;
 	int ret2;
-	unsigned long ib_mad_port_list_sflags;
-	unsigned long ib_mad_agent_list_sflags;	
+	unsigned long flags;
 	u8 mgmt_class;
 
 	/* Validate parameters */
@@ -155,14 +154,14 @@
 	}
 
 	/* Validate device and port */
-	spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_for_each(entry, head) {
 		if (entry->device == device && entry->port == port) {
 			priv = entry;
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 	if (!priv) {
 		ret = ERR_PTR(-ENODEV);
 		goto error1;
@@ -218,9 +217,9 @@
 	mad_agent->hi_tid = ++ib_mad_client_id;
 
 	/* Add to mad agent list */
-	spin_lock_irqsave(&ib_mad_agent_list_lock, ib_mad_agent_list_sflags);
+	spin_lock_irqsave(&ib_mad_agent_list_lock, flags);
 	list_add_tail((struct list_head *) mad_agent_priv,
&ib_mad_agent_list);
-	spin_unlock_irqrestore(&ib_mad_agent_list_lock,
ib_mad_agent_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_agent_list_lock, flags);
 
 	ret2 = add_mad_reg_req(mad_reg_req, mad_agent_priv);
 	if (ret2) {
@@ -232,14 +231,14 @@
 
 error3:
 	/* Remove from mad agent list */
-	spin_lock_irqsave(&ib_mad_agent_list_lock, ib_mad_agent_list_sflags);
+	spin_lock_irqsave(&ib_mad_agent_list_lock, flags);
 	list_for_each(entry2, head2) {
 		if (entry2->agent == mad_agent_priv->agent) {
 			list_del((struct list_head *)entry2);
 			break;
 		}
 	}	
-	spin_unlock_irqrestore(&ib_mad_agent_list_lock,
ib_mad_agent_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_agent_list_lock, flags);
 	kfree(reg_req);
 error2:
 	kfree(mad_agent);
@@ -256,9 +255,9 @@
 {
 	struct ib_mad_agent_private *entry,
 				    *head = (struct ib_mad_agent_private *)&ib_mad_agent_list;
-	unsigned long ib_mad_agent_list_sflags;
+	unsigned long flags;
 
-	spin_lock_irqsave(&ib_mad_agent_list_lock, ib_mad_agent_list_sflags);
+	spin_lock_irqsave(&ib_mad_agent_list_lock, flags);
 	list_for_each(entry, head) {
 		if (entry->agent == mad_agent) {
 			remove_mad_reg_req(entry);
@@ -270,7 +269,7 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&ib_mad_agent_list_lock,
ib_mad_agent_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_agent_list_lock, flags);
 
 	return 0;
 }
@@ -289,7 +288,7 @@
 	struct ib_send_wr	wr;
 	struct ib_send_wr	*bad_wr;
 	struct ib_mad_send_wr_private *mad_send_wr;
-	unsigned long ib_mad_send_list_sflags;
+	unsigned long flags;
 
 	cur_send_wr = send_wr;
 	/* Validate supplied parameters */
@@ -328,17 +327,17 @@
 		wr.send_flags = IB_SEND_SIGNALED; /* cur_send_wr->send_flags ? */
 
 		/* Link send WR into posted send MAD list */
-		spin_lock_irqsave(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
+		spin_lock_irqsave(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
 		list_add_tail((struct list_head *)mad_send_wr,
 			      &((struct ib_mad_port_private
*)mad_agent->device->mad)->send_posted_mad_list);
-		spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
+		spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
 
 		ret = ib_post_send(mad_agent->qp, &wr, &bad_wr);
 		if (ret) {
 			/* Unlink from posted send MAD list */
-			spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
+			spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
 			list_del((struct list_head *)send_wr);
-			spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
+			spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
 			*bad_send_wr = cur_send_wr;
 			printk(KERN_NOTICE "ib_mad_post_send failed\n");
 			return ret;		
@@ -547,10 +546,10 @@
 	struct ib_mad_private_header *entry,
 				     *head = (struct ib_mad_private_header
*)&priv->recv_posted_mad_list;
 	struct ib_mad_private *recv = NULL;
-	unsigned long ib_mad_recv_list_sflags;
+	unsigned long flags;
 
 	/* Find entry on posted MAD receive list which corresponds to this
completion */
-	spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
+	spin_lock_irqsave(&priv->recv_list_lock, flags);
 	list_for_each(entry, head) {
 		if ((unsigned long)entry == wc->wr_id) {
 			recv = (struct ib_mad_private *)entry;	
@@ -559,7 +558,7 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
+	spin_unlock_irqrestore(&priv->recv_list_lock, flags);
 	if (!recv) {
 		printk(KERN_ERR "No matching posted receive WR 0x%Lx\n", wc->wr_id);
 	}
@@ -595,10 +594,10 @@
 {
 	struct ib_mad_send_wr_private *entry, *send_wr = NULL,
 				      *head = (struct ib_mad_send_wr_private
*)&priv->send_posted_mad_list;
-	unsigned long ib_mad_send_list_sflags;
+	unsigned long flags;
 
 	/* Find entry on posted MAD send list which corresponds to this
completion */
-	spin_lock_irqsave(&priv->send_list_lock, ib_mad_send_list_sflags);
+	spin_lock_irqsave(&priv->send_list_lock, flags);
 	list_for_each(entry, head) {
 		if (entry->wr_id == wc->wr_id) {
 			send_wr = entry;
@@ -607,7 +606,7 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&priv->send_list_lock,
ib_mad_send_list_sflags);
+	spin_unlock_irqrestore(&priv->send_list_lock, flags);
 	if (!send_wr) {
 		printk(KERN_ERR "No matching posted send WR 0x%Lx\n", wc->wr_id);
 	} else {
@@ -746,7 +745,7 @@
 	struct ib_sge sg_list;
 	struct ib_recv_wr recv_wr;
 	struct ib_recv_wr *bad_recv_wr;
-	unsigned long ib_mad_recv_list_sflags;
+	unsigned long flags;
 
 	/* Allocate memory for receive MAD (and private header) */
 	mad_priv = kmalloc(sizeof *mad_priv, GFP_KERNEL);
@@ -772,18 +771,18 @@
 	recv_wr.wr_id = (unsigned long)mad_priv;
 
 	/* Link receive WR into posted receive MAD list */
-	spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
+	spin_lock_irqsave(&priv->recv_list_lock, flags);
 	list_add_tail((struct list_head *)mad_priv,
&priv->recv_posted_mad_list);
-	spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
+	spin_unlock_irqrestore(&priv->recv_list_lock, flags);
 
 	pci_unmap_addr_set(&mad_priv->header.buf, mapping, sg_list.addr);
 
 	/* Now, post receive WR */
 	if (ib_post_recv(qp, &recv_wr, &bad_recv_wr)) {
 		/* Unlink from posted receive MAD list */
-		spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
+		spin_lock_irqsave(&priv->recv_list_lock, flags);
 		list_del((struct list_head *)mad_priv);
-		spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
+		spin_unlock_irqrestore(&priv->recv_list_lock, flags);
 
 		pci_unmap_single(priv->device->dma_device,
 				 pci_unmap_addr(&mad_priv->header.buf, mapping),
@@ -820,16 +819,16 @@
  */
 static void ib_mad_return_posted_recv_mads(struct ib_mad_port_private
*priv)
 {
-	unsigned long ib_mad_recv_list_sflags;
+	unsigned long flags;
 
 	/* PCI mapping ? */
 
-	spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
+	spin_lock_irqsave(&priv->recv_list_lock, flags);
 	while (!list_empty(&priv->recv_posted_mad_list)) {
 
 	}
 	INIT_LIST_HEAD(&priv->recv_posted_mad_list);
-	spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
+	spin_unlock_irqrestore(&priv->recv_list_lock, flags);
 }
 
 /*
@@ -837,17 +836,17 @@
  */
 static void ib_mad_return_posted_send_mads(struct ib_mad_port_private
*priv)
 {
-	unsigned long ib_mad_send_list_sflags;
+	unsigned long flags;
 
 	/* PCI mapping ? */
 
-	spin_lock_irqsave(&priv->send_list_lock, ib_mad_send_list_sflags);
+	spin_lock_irqsave(&priv->send_list_lock, flags);
 	while (!list_empty(&priv->send_posted_mad_list)) {
 		list_del(priv->send_posted_mad_list.next);
 		/* Call completion handler ? */
 	}
 	INIT_LIST_HEAD(&priv->send_posted_mad_list);
-	spin_unlock_irqrestore(&priv->send_list_lock,
ib_mad_send_list_sflags);
+	spin_unlock_irqrestore(&priv->send_list_lock, flags);
 }
 
 /*
@@ -972,7 +971,7 @@
 static int ib_mad_port_start(struct ib_mad_port_private *priv)
 {
 	int ret, i;
-	unsigned long ib_mad_port_list_sflags;
+	unsigned long flags;
 
 	for (i = 0; i < 2; i++) {
 		ret = ib_mad_change_qp_state_to_init(priv->qp[i], priv->port);
@@ -1008,9 +1007,9 @@
 		}
 	}
 
-	spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	priv->up = 1;
-	spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 	return 0;
 error:
 	ib_mad_return_posted_recv_mads(priv);
@@ -1027,11 +1026,11 @@
 static void ib_mad_port_stop(struct ib_mad_port_private *priv)
 {
 	int i;
-	unsigned long ib_mad_port_list_sflags;
+	unsigned long flags;
 
-	spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	priv->up = 0;
-	spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 
 	for (i = 0; i < 2; i++) {
 		ib_mad_change_qp_state_to_reset(priv->qp[i]);
@@ -1074,17 +1073,17 @@
 	struct ib_qp_cap qp_cap;
 	struct ib_mad_port_private *entry, *priv = NULL,
 				     *head = (struct ib_mad_port_private *) &ib_mad_port_list;
-	unsigned long ib_mad_port_list_sflags;
+	unsigned long flags;
 
 	/* First, check if port already open at MAD layer */
-	spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_for_each(entry, head) {
 		if (entry->device == device && entry->port == port) {
 			priv = entry;
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 	if (priv) {
 		printk(KERN_DEBUG "Port already open\n");
 		return 0;
@@ -1169,9 +1168,9 @@
 		goto error8;
 	}
 
-	spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_add_tail((struct list_head *)priv, &ib_mad_port_list);
-	spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 
 	return 0;
 
@@ -1200,9 +1199,9 @@
 {
 	struct ib_mad_port_private *entry, *priv = NULL,
 				     *head = (struct ib_mad_port_private *)&ib_mad_port_list;
-	unsigned long ib_mad_port_list_sflags;
+	unsigned long flags;
 
-	spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_for_each(entry, head) {
 		if (entry->device == device && entry->port == port) {
 			priv = entry;
@@ -1212,12 +1211,12 @@
 
 	if (priv == NULL) {
 		printk(KERN_ERR "Port not found\n");
-		spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
+		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 		return -ENODEV;
 	}
 
 	list_del((struct list_head *)priv);
-	spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
+	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 
 	ib_mad_port_stop(priv);
 	ib_mad_thread_stop(priv);
Index: ib_mad_priv.h
===================================================================
--- ib_mad_priv.h	(revision 781)
+++ ib_mad_priv.h	(working copy)
@@ -133,6 +133,7 @@
 	spinlock_t recv_list_lock;
 	struct list_head send_posted_mad_list;
 	struct list_head recv_posted_mad_list;
+
 	struct ib_mad_thread_data thread_data;
 };
 
Index: TODO
===================================================================
--- TODO	(revision 781)
+++ TODO	(working copy)
@@ -1,5 +1,29 @@
-8/31/04
+9/11/04
 
+OpenIB MAD Layer
+
+Short Term
+Track count of posted sends and receives
+Support call of ib_mad_post_send from any context
+Receive list per QP rather than 1 receive list
+Fix list handling
+Use wait queue and wait_event rather than signals and semaphores
+Finish coding receive path
+
+Revisit
+Handle post send overruns
+PD per device rather than per port
+Use tasklets/softirq rather than process context
+
+Futures
+RMPP support
+Redirection support (including receive list per QP)
+Replace locking with RCU
+
+
+
+(Old) GSI (will be orphaned)
+
 Update API to proposed openib GSI interface (ib_mad.h)
 Makefile needs to use standard kbuild 
 Sync with latest ib_verbs.h when appropriate
@@ -10,5 +34,5 @@
 Add GRH support for RMPP (low priority)
 Static rate handling (low priority)
 
-Migrate from /proc to /sysfs (may only apply to old GSI)
+Migrate from /proc to /sysfs (only applies to original GSI)
 





More information about the general mailing list