[openib-general] [PATCH] ib_mad: Fix list handling

Hal Rosenstock halr at voltaire.com
Sun Sep 12 10:05:24 PDT 2004


Fix list handling (hopefully :-)

Index: ib_mad.c
===================================================================
--- ib_mad.c	(revision 791)
+++ ib_mad.c	(working copy)
@@ -106,10 +106,7 @@
 				ib_mad_recv_handler recv_handler,
 				void *context)
 {
-	struct ib_mad_port_private *entry, *priv = NULL,
-				     *head = (struct ib_mad_port_private *) &ib_mad_port_list;
-	struct ib_mad_agent_private *entry2,
-				    *head2 =  (struct ib_mad_agent_private *)&ib_mad_agent_list;
+	struct ib_mad_port_private *entry, *priv = NULL;
 	struct ib_mad_agent *mad_agent, *ret;
 	struct ib_mad_agent_private *mad_agent_priv;
 	struct ib_mad_reg_req *reg_req = NULL;
@@ -148,7 +145,7 @@
 				goto error1;
 			}
 		} else if (mad_reg_req->mgmt_class == 0) {
-			/* class 0 is reserved and used for aliasing
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */
+			/* Class 0 is reserved and used for aliasing
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */
 			ret = ERR_PTR(-EINVAL);
 			goto error1;
 		}
@@ -156,7 +153,7 @@
 
 	/* Validate device and port */
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
-	list_for_each(entry, head) {
+	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
 		if (entry->device == device && entry->port == port) {
 			priv = entry;
 			break;
@@ -222,7 +219,7 @@
 
 	/* Add to mad agent list */
 	spin_lock_irqsave(&ib_mad_agent_list_lock, flags);
-	list_add_tail((struct list_head *) mad_agent_priv,
&ib_mad_agent_list);
+	list_add_tail(&mad_agent_priv->agent_list, &ib_mad_agent_list);
 	spin_unlock_irqrestore(&ib_mad_agent_list_lock, flags);
 
 	ret2 = add_mad_reg_req(mad_reg_req, mad_agent_priv);
@@ -237,13 +234,10 @@
 error3:
 	/* Remove from mad agent list */
 	spin_lock_irqsave(&ib_mad_agent_list_lock, flags);
-	list_for_each(entry2, head2) {
-		if (entry2->agent == mad_agent_priv->agent) {
-			list_del((struct list_head *)entry2);
-			break;
-		}
-	}	
+	list_del(&mad_agent_priv->agent_list);
 	spin_unlock_irqrestore(&ib_mad_agent_list_lock, flags);
+
+	/* Release allocated structures */
 	kfree(reg_req);
 error2:
 	kfree(mad_agent);
@@ -258,15 +252,15 @@
  */
 int ib_mad_dereg(struct ib_mad_agent *mad_agent)
 {
-	struct ib_mad_agent_private *entry,
-				    *head = (struct ib_mad_agent_private *)&ib_mad_agent_list;
+	struct ib_mad_agent_private *entry, *temp;
 	unsigned long flags;
 
 	spin_lock_irqsave(&ib_mad_agent_list_lock, flags);
-	list_for_each(entry, head) {
+	list_for_each_entry_safe(entry, temp, &ib_mad_agent_list, agent_list)
{
 		if (entry->agent == mad_agent) {
 			remove_mad_reg_req(entry);
-			list_del((struct list_head *)entry);
+			list_del(&entry->agent_list);
+
 			/* Release allocated structures */
 			kfree(entry->reg_req);
 			kfree(entry->agent);
@@ -316,8 +310,7 @@
 			printk(KERN_ERR "No memory for ib_mad_send_wr_private\n");
 			return -ENOMEM;	
 		}
-		/* Initialized MAD send WR tracking structure */
-		mad_send_wr->next = NULL;
+		/* Initialize MAD send WR tracking structure */
 		mad_send_wr->agent = mad_agent;
 		mad_send_wr->wr_id = cur_send_wr->wr_id;
 		mad_send_wr->timeout_ms = cur_send_wr->wr.ud.timeout_ms;
@@ -335,7 +328,7 @@
 
 		/* Link send WR into posted send MAD list */
 		spin_lock_irqsave(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
-		list_add_tail((struct list_head *)mad_send_wr,
+		list_add_tail(&mad_send_wr->send_list,
 			      &((struct ib_mad_port_private
*)mad_agent->device->mad)->send_posted_mad_list);
 		((struct ib_mad_port_private
*)mad_agent->device->mad)->send_posted_mad_count++;
 		spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
@@ -344,7 +337,7 @@
 		if (ret) {
 			/* Unlink from posted send MAD list */
 			spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
-			list_del((struct list_head *)send_wr);
+			list_del(&mad_send_wr->send_list);
 			((struct ib_mad_port_private
*)mad_agent->device->mad)->send_posted_mad_count--;
 			spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, flags);
 			*bad_send_wr = cur_send_wr;
@@ -566,8 +559,7 @@
 				     struct ib_wc *wc)
 {
 	struct ib_mad_recv_wc recv_wc;
-	struct ib_mad_private_header *entry,
-				     *head = (struct ib_mad_private_header
*)&priv->recv_posted_mad_list;
+	struct ib_mad_private_header *entry, *temp;
 	struct ib_mad_private *recv = NULL;
 	unsigned long flags;
 	u32 qp_num;
@@ -577,11 +569,13 @@
 
 	/* Find entry on posted MAD receive list which corresponds to this
completion */
 	spin_lock_irqsave(&priv->recv_list_lock, flags);
-	list_for_each(entry, head) {
+	list_for_each_entry_safe(entry, temp,
+				 &priv->recv_posted_mad_list[convert_qpnum(qp_num)],
+				 mad_list) {
 		if ((unsigned long)entry == wc->wr_id) {
 			recv = (struct ib_mad_private *)entry;	
 			/* Remove from posted receive MAD list */
-			list_del((struct list_head *)entry);
+			list_del(&entry->mad_list);
 			priv->recv_posted_mad_count[convert_qpnum(qp_num)]--;
 			break;
 		}
@@ -620,17 +614,17 @@
 static void ib_mad_send_done_handler(struct ib_mad_port_private *priv,
 				     struct ib_wc *wc)
 {
-	struct ib_mad_send_wr_private *entry, *send_wr = NULL,
-				      *head = (struct ib_mad_send_wr_private
*)&priv->send_posted_mad_list;
+	struct ib_mad_send_wr_private *entry, *temp, *send_wr = NULL;
 	unsigned long flags;
 
 	/* Find entry on posted MAD send list which corresponds to this
completion */
 	spin_lock_irqsave(&priv->send_list_lock, flags);
-	list_for_each(entry, head) {
+	list_for_each_entry_safe(entry, temp,
+				 &priv->send_posted_mad_list, send_list) {
 		if (entry->wr_id == wc->wr_id) {
 			send_wr = entry;
 			/* Remove from posted send MAD list */
-			list_del((struct list_head *)entry);
+			list_del(&entry->send_list);
 			priv->send_posted_mad_count--;	
 			break;
 		}
@@ -782,7 +776,6 @@
 		printk(KERN_ERR "No memory for receive MAD\n");
 		return -ENOMEM;
 	}
-	mad_priv->header.next = NULL;
 
 	/* Setup scatter list */
 	sg_list.addr = pci_map_single(priv->device->dma_device,
@@ -801,7 +794,7 @@
 
 	/* Link receive WR into posted receive MAD list */
 	spin_lock_irqsave(&priv->recv_list_lock, flags);
-	list_add_tail((struct list_head *)mad_priv,
&priv->recv_posted_mad_list[convert_qpnum(qp->qp_num)]);
+	list_add_tail(&mad_priv->header.mad_list,
&priv->recv_posted_mad_list[convert_qpnum(qp->qp_num)]);
 	priv->recv_posted_mad_count[convert_qpnum(qp->qp_num)]++;
 	spin_unlock_irqrestore(&priv->recv_list_lock, flags);
 
@@ -811,7 +804,7 @@
 	if (ib_post_recv(qp, &recv_wr, &bad_recv_wr)) {
 		/* Unlink from posted receive MAD list */
 		spin_lock_irqsave(&priv->recv_list_lock, flags);
-		list_del((struct list_head *)mad_priv);
+		list_del(&mad_priv->header.mad_list);
 		priv->recv_posted_mad_count[convert_qpnum(qp->qp_num)]--;
 		spin_unlock_irqrestore(&priv->recv_list_lock, flags);
 
@@ -877,7 +870,7 @@
 
 	spin_lock_irqsave(&priv->send_list_lock, flags);
 	while (!list_empty(&priv->send_posted_mad_list)) {
-		list_del(priv->send_posted_mad_list.next);
+		list_del(&priv->send_posted_mad_list);
 		/* Call completion handler with some status ? */
 	}
 	INIT_LIST_HEAD(&priv->send_posted_mad_list);
@@ -1098,13 +1091,12 @@
 	};
 	struct ib_qp_init_attr qp_init_attr;
 	struct ib_qp_cap qp_cap;
-	struct ib_mad_port_private *entry, *priv = NULL,
-				     *head = (struct ib_mad_port_private *) &ib_mad_port_list;
+	struct ib_mad_port_private *entry, *priv = NULL;
 	unsigned long flags;
 
 	/* First, check if port already open at MAD layer */
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
-	list_for_each(entry, head) {
+	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
 		if (entry->device == device && entry->port == port) {
 			priv = entry;
 			break;
@@ -1201,7 +1193,7 @@
 	}
 
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
-	list_add_tail((struct list_head *)priv, &ib_mad_port_list);
+	list_add_tail(&priv->port_list, &ib_mad_port_list);
 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 
 	return 0;
@@ -1229,12 +1221,11 @@
  */
 static int ib_mad_port_close(struct ib_device *device, int port)
 {
-	struct ib_mad_port_private *entry, *priv = NULL,
-				     *head = (struct ib_mad_port_private *)&ib_mad_port_list;
+	struct ib_mad_port_private *entry, *priv = NULL;
 	unsigned long flags;
 
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
-	list_for_each(entry, head) {
+	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
 		if (entry->device == device && entry->port == port) {
 			priv = entry;
 			break;
@@ -1247,7 +1238,7 @@
 		return -ENODEV;
 	}
 
-	list_del((struct list_head *)priv);
+	list_del(&priv->port_list);
 	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
 
 	ib_mad_port_stop(priv);
@@ -1257,7 +1248,7 @@
 	ib_dereg_mr(priv->mr);
 	ib_dealloc_pd(priv->pd);
 	ib_destroy_cq(priv->cq);
-	/* Handle MAD registration tables!!! */
+	/* Handle deallocation of MAD registration tables!!! */
 	kfree(priv);
 	device->mad = NULL;
 
Index: ib_mad_priv.h
===================================================================
--- ib_mad_priv.h	(revision 791)
+++ ib_mad_priv.h	(working copy)
@@ -81,7 +81,7 @@
 };
 
 struct ib_mad_private_header {
-	struct ib_mad_private_header *next;
+	struct list_head mad_list;
 	struct ib_mad_buf buf;
 } __attribute__ ((packed));
 
@@ -95,14 +95,14 @@
 } __attribute__ ((packed));
 
 struct ib_mad_agent_private {
-	struct ib_mad_agent_private *next;
+	struct list_head agent_list;
 	struct ib_mad_agent *agent;
 	struct ib_mad_reg_req *reg_req;
 	u8 rmpp_version;
 };
 
 struct ib_mad_send_wr_private {
-	struct ib_mad_send_wr_private *next;
+	struct list_head send_list;
 	struct ib_mad_agent *agent;
 	u64 wr_id;
 	int timeout_ms;
@@ -122,7 +122,7 @@
 };
 
 struct ib_mad_port_private {
-	struct ib_mad_port_private *next;
+	struct list_head port_list;
 	struct ib_device *device;
 	int port;
 	struct ib_qp *qp[IB_MAD_QPS_SUPPORTED];
Index: TODO
===================================================================
--- TODO	(revision 791)
+++ TODO	(working copy)
@@ -3,7 +3,6 @@
 OpenIB MAD Layer
 
 Short Term
-Fix list handling
 Client ID table
 Use wait queue and wait_event rather than signals and semaphores
 Finish receive path coding





More information about the general mailing list