[ewg] Re: Continue of "defer skb_orphan() until irqs enabled"

akepner at sgi.com akepner at sgi.com
Mon Oct 6 09:32:54 PDT 2008


Sorry for the delay in getting back to you on this, but we've 
done our testing and haven't found any problems. As I mentioned, 
we're using OFED 1.3.1, so the patch had to be tweaked a bit. 
The patch we used follows.


--- a/drivers/infiniband/ulp/ipoib/ipoib.h	2008-09-09 15:53:24.856316458 -0700
+++ e/drivers/infiniband/ulp/ipoib/ipoib.h	2008-09-29 10:19:00.833519991 -0700
@@ -345,10 +345,9 @@ struct ipoib_ethtool_st {
 };
 
 /*
- * Device private locking: tx_lock protects members used in TX fast
- * path (and we use LLTX so upper layers don't do extra locking).
- * lock protects everything else.  lock nests inside of tx_lock (ie
- * tx_lock must be acquired first if needed).
+ * Device private locking: network stack tx_lock protects members used
+ * in TX fast path, lock protects everything else.  lock nests inside
+ * of tx_lock (ie tx_lock must be acquired first if needed).
  */
 struct ipoib_dev_priv {
 	spinlock_t lock;
@@ -397,7 +396,6 @@ struct ipoib_dev_priv {
 	struct ipoib_vmap	rx_vmap_ring;
 	struct ipoib_sg_rx_buf *rx_ring;
 
-	spinlock_t           tx_lock;
 	struct ipoib_vmap    tx_vmap_ring;
 	struct ipoib_tx_buf *tx_ring;
 	unsigned             tx_head;
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-09-09 15:53:24.856316458 -0700
+++ e/drivers/infiniband/ulp/ipoib/ipoib_cm.c	2008-09-26 13:38:00.066208156 -0700
@@ -776,7 +776,8 @@ void ipoib_cm_handle_tx_wc(struct net_de
 
 	dev_kfree_skb_any(tx_req->skb);
 
-	spin_lock_irqsave(&priv->tx_lock, flags);
+	netif_tx_lock(dev);
+
 	++tx->tx_tail;
 	if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
 	    netif_queue_stopped(dev) &&
@@ -791,7 +792,7 @@ void ipoib_cm_handle_tx_wc(struct net_de
 			   "(status=%d, wrid=%d vend_err %x)\n",
 			   wc->status, wr_id, wc->vendor_err);
 
-		spin_lock(&priv->lock);
+		spin_lock_irqsave(&priv->lock, flags);
 		neigh = tx->neigh;
 
 		if (neigh) {
@@ -811,10 +812,10 @@ void ipoib_cm_handle_tx_wc(struct net_de
 
 		clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
 
-		spin_unlock(&priv->lock);
+		spin_unlock_irqrestore(&priv->lock, flags);
 	}
 
-	spin_unlock_irqrestore(&priv->tx_lock, flags);
+	netif_tx_unlock(dev);
 }
 
 int ipoib_cm_dev_open(struct net_device *dev)
@@ -1134,7 +1135,6 @@ static void ipoib_cm_tx_destroy(struct i
 {
 	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
 	struct ipoib_cm_tx_buf *tx_req;
-	unsigned long flags;
 	unsigned long begin;
 
 	ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1165,12 +1165,12 @@ timeout:
 				    DMA_TO_DEVICE);
 		dev_kfree_skb_any(tx_req->skb);
 		++p->tx_tail;
-		spin_lock_irqsave(&priv->tx_lock, flags);
+		netif_tx_lock_bh(p->dev);
 		if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
 		    netif_queue_stopped(p->dev) &&
 		    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
 			netif_wake_queue(p->dev);
-		spin_unlock_irqrestore(&priv->tx_lock, flags);
+		netif_tx_unlock_bh(p->dev);
 	}
 
 	if (p->qp)
@@ -1187,6 +1187,7 @@ static int ipoib_cm_tx_handler(struct ib
 	struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
 	struct net_device *dev = priv->dev;
 	struct ipoib_neigh *neigh;
+	unsigned long flags;
 	int ret;
 
 	switch (event->event) {
@@ -1205,8 +1206,8 @@ static int ipoib_cm_tx_handler(struct ib
 	case IB_CM_REJ_RECEIVED:
 	case IB_CM_TIMEWAIT_EXIT:
 		ipoib_dbg(priv, "CM error %d.\n", event->event);
-		spin_lock_irq(&priv->tx_lock);
-		spin_lock(&priv->lock);
+		netif_tx_lock_bh(dev);
+		spin_lock_irqsave(&priv->lock, flags);
 		neigh = tx->neigh;
 
 		if (neigh) {
@@ -1224,8 +1225,8 @@ static int ipoib_cm_tx_handler(struct ib
 			queue_work(ipoib_workqueue, &priv->cm.reap_task);
 		}
 
-		spin_unlock(&priv->lock);
-		spin_unlock_irq(&priv->tx_lock);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		netif_tx_unlock_bh(dev);
 		break;
 	default:
 		break;
@@ -1279,19 +1280,24 @@ static void ipoib_cm_tx_start(struct wor
 	struct ib_sa_path_rec pathrec;
 	u32 qpn;
 
-	spin_lock_irqsave(&priv->tx_lock, flags);
-	spin_lock(&priv->lock);
+	netif_tx_lock_bh(dev);
+	spin_lock_irqsave(&priv->lock, flags);
+
 	while (!list_empty(&priv->cm.start_list)) {
 		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
 		list_del_init(&p->list);
 		neigh = p->neigh;
 		qpn = IPOIB_QPN(neigh->neighbour->ha);
 		memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
-		spin_unlock(&priv->lock);
-		spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+		spin_unlock_irqrestore(&priv->lock, flags);
+		netif_tx_unlock_bh(dev);
+
 		ret = ipoib_cm_tx_init(p, qpn, &pathrec);
-		spin_lock_irqsave(&priv->tx_lock, flags);
-		spin_lock(&priv->lock);
+
+		netif_tx_lock_bh(dev);
+		spin_lock_irqsave(&priv->lock, flags);
+
 		if (ret) {
 			neigh = p->neigh;
 			if (neigh) {
@@ -1305,44 +1311,52 @@ static void ipoib_cm_tx_start(struct wor
 			kfree(p);
 		}
 	}
-	spin_unlock(&priv->lock);
-	spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+	netif_tx_unlock_bh(dev);
 }
 
 static void ipoib_cm_tx_reap(struct work_struct *work)
 {
 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
 						   cm.reap_task);
+	struct net_device *dev = priv->dev;
 	struct ipoib_cm_tx *p;
+	unsigned long flags;
+
+	netif_tx_lock_bh(dev);
+	spin_lock_irqsave(&priv->lock, flags);
 
-	spin_lock_irq(&priv->tx_lock);
-	spin_lock(&priv->lock);
 	while (!list_empty(&priv->cm.reap_list)) {
 		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
 		list_del(&p->list);
-		spin_unlock(&priv->lock);
-		spin_unlock_irq(&priv->tx_lock);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		netif_tx_unlock_bh(dev);
 		ipoib_cm_tx_destroy(p);
-		spin_lock_irq(&priv->tx_lock);
-		spin_lock(&priv->lock);
+		netif_tx_lock_bh(dev);
+		spin_lock_irqsave(&priv->lock, flags);
 	}
-	spin_unlock(&priv->lock);
-	spin_unlock_irq(&priv->tx_lock);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+	netif_tx_unlock_bh(dev);
 }
 
 static void ipoib_cm_skb_reap(struct work_struct *work)
 {
 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
 						   cm.skb_task);
+	struct net_device *dev = priv->dev;
 	struct sk_buff *skb;
-
+	unsigned long flags;
 	unsigned mtu = priv->mcast_mtu;
 
-	spin_lock_irq(&priv->tx_lock);
-	spin_lock(&priv->lock);
+	netif_tx_lock_bh(dev);
+	spin_lock_irqsave(&priv->lock, flags);
+
 	while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
-		spin_unlock(&priv->lock);
-		spin_unlock_irq(&priv->tx_lock);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		netif_tx_unlock_bh(dev);
+
 		if (skb->protocol == htons(ETH_P_IP))
 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1350,11 +1364,13 @@ static void ipoib_cm_skb_reap(struct wor
 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
 #endif
 		dev_kfree_skb_any(skb);
-		spin_lock_irq(&priv->tx_lock);
-		spin_lock(&priv->lock);
+
+		netif_tx_lock_bh(dev);
+		spin_lock_irqsave(&priv->lock, flags);
 	}
-	spin_unlock(&priv->lock);
-	spin_unlock_irq(&priv->tx_lock);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+	netif_tx_unlock_bh(dev);
 }
 
 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-09-09 15:53:24.856316458 -0700
+++ e/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2008-09-29 13:21:15.017397128 -0700
@@ -457,10 +457,9 @@ static void ipoib_ib_tx_timer_func(unsig
 {
 	struct net_device *dev = (struct net_device *)dev_ptr;
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	unsigned long flags;
 	unsigned int wrid;
 
-	spin_lock_irqsave(&priv->tx_lock, flags);
+	netif_tx_lock(dev);
 	if (((int)priv->tx_tail - (int)priv->tx_head < 0) &&
 		time_after(jiffies, dev->trans_start + 10) &&
 		priv->tx_outstanding < ipoib_sendq_size &&
@@ -479,17 +478,16 @@ static void ipoib_ib_tx_timer_func(unsig
 		}
 	}
 	poll_tx(priv);
-	spin_unlock_irqrestore(&priv->tx_lock, flags);
+	netif_tx_unlock(dev);
 
 	mod_timer(&priv->poll_timer, jiffies + HZ / 2);
 }
 
 static void flush_tx_queue(struct ipoib_dev_priv *priv)
 {
-	unsigned long flags;
 	unsigned int wrid;
 
-	spin_lock_irqsave(&priv->tx_lock, flags);
+	netif_tx_lock_bh(priv->dev);
 	wrid = priv->tx_head & (ipoib_sendq_size - 1);
 	priv->tx_ring[wrid].skb = NULL;
 	if (!post_zlen_send_wr(priv, wrid)) {
@@ -499,7 +497,7 @@ static void flush_tx_queue(struct ipoib_
 		ipoib_warn(priv, "post_zlen failed\n");
 
 	poll_tx(priv);
-	spin_unlock_irqrestore(&priv->tx_lock, flags);
+	netif_tx_unlock_bh(priv->dev);
 }
 
 static inline int post_send(struct ipoib_dev_priv *priv,
@@ -645,17 +643,20 @@ static void __ipoib_reap_ah(struct net_d
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_ah *ah, *tah;
 	LIST_HEAD(remove_list);
+	unsigned long flags;
+
+	netif_tx_lock_bh(dev);
+	spin_lock_irqsave(&priv->lock, flags);
 
-	spin_lock_irq(&priv->tx_lock);
-	spin_lock(&priv->lock);
 	list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
 		if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
 			list_del(&ah->list);
 			ib_destroy_ah(ah->ah);
 			kfree(ah);
 		}
-	spin_unlock(&priv->lock);
-	spin_unlock_irq(&priv->tx_lock);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+	netif_tx_unlock_bh(dev);
 }
 
 void ipoib_reap_ah(struct work_struct *work)
@@ -789,6 +790,14 @@ void ipoib_drain_cq(struct net_device *d
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	int i, n;
+
+	/*
+	 * We call completion handling routines that expect to be
+	 * called from the BH-disabled NAPI poll context, so disable
+	 * BHs here too.
+	 */
+	local_bh_disable();
+
 	do {
 		n = ib_poll_cq(priv->rcq, IPOIB_NUM_WC, priv->ibwc);
 		for (i = 0; i < n; ++i) {
@@ -813,6 +822,8 @@ void ipoib_drain_cq(struct net_device *d
 			}
 		}
 	} while (n == IPOIB_NUM_WC);
+
+	local_bh_enable();
 }
 
 int ipoib_ib_dev_stop(struct net_device *dev, int flush)
@@ -821,7 +832,6 @@ int ipoib_ib_dev_stop(struct net_device 
 	struct ib_qp_attr qp_attr;
 	unsigned long begin;
 	int i;
-	unsigned long flags;
 	int timer_works;
 
 	timer_works = test_and_clear_bit(IPOIB_FLAG_TIME_ON, &priv->flags);
@@ -872,9 +882,9 @@ int ipoib_ib_dev_stop(struct net_device 
 		}
 
 		if ((int) priv->tx_tail - (int) priv->tx_head < 0) {
-			spin_lock_irqsave(&priv->tx_lock, flags);
+			netif_tx_lock_bh(dev);
 			poll_tx(priv);
-			spin_unlock_irqrestore(&priv->tx_lock, flags);
+			netif_tx_unlock_bh(dev);
 		}
 
 		ipoib_drain_cq(dev);
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-09-09 15:53:24.856316458 -0700
+++ e/drivers/infiniband/ulp/ipoib/ipoib_main.c	2008-09-29 10:27:00.046616538 -0700
@@ -357,9 +357,10 @@ void ipoib_flush_paths(struct net_device
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_path *path, *tp;
 	LIST_HEAD(remove_list);
+	unsigned long flags;
 
-	spin_lock_irq(&priv->tx_lock);
-	spin_lock(&priv->lock);
+	netif_tx_lock_bh(dev);
+	spin_lock_irqsave(&priv->lock, flags);
 
 	list_splice(&priv->path_list, &remove_list);
 	INIT_LIST_HEAD(&priv->path_list);
@@ -370,15 +371,16 @@ void ipoib_flush_paths(struct net_device
 	list_for_each_entry_safe(path, tp, &remove_list, list) {
 		if (path->query)
 			ib_sa_cancel_query(path->query_id, path->query);
-		spin_unlock(&priv->lock);
-		spin_unlock_irq(&priv->tx_lock);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		netif_tx_unlock_bh(dev);
 		wait_for_completion(&path->done);
 		path_free(dev, path);
-		spin_lock_irq(&priv->tx_lock);
-		spin_lock(&priv->lock);
+		netif_tx_lock_bh(dev);
+		spin_lock_irqsave(&priv->lock, flags);
 	}
-	spin_unlock(&priv->lock);
-	spin_unlock_irq(&priv->tx_lock);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+	netif_tx_unlock_bh(dev);
 }
 
 static void path_rec_completion(int status,
@@ -547,6 +549,7 @@ static void neigh_add_path(struct sk_buf
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_path *path;
 	struct ipoib_neigh *neigh;
+	unsigned long flags;
 
 	neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
 	if (!neigh) {
@@ -555,11 +558,7 @@ static void neigh_add_path(struct sk_buf
 		return;
 	}
 
-	/*
-	 * We can only be called from ipoib_start_xmit, so we're
-	 * inside tx_lock -- no need to save/restore flags.
-	 */
-	spin_lock(&priv->lock);
+	spin_lock_irqsave(&priv->lock, flags);
 
 	path = __path_find(dev, skb->dst->neighbour->ha + 4);
 	if (!path) {
@@ -606,7 +605,7 @@ static void neigh_add_path(struct sk_buf
 		__skb_queue_tail(&neigh->queue, skb);
 	}
 
-	spin_unlock(&priv->lock);
+	spin_unlock_irqrestore(&priv->lock, flags);
 	return;
 
 err_list:
@@ -618,7 +617,7 @@ err_drop:
 	++priv->stats.tx_dropped;
 	dev_kfree_skb_any(skb);
 
-	spin_unlock(&priv->lock);
+	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
@@ -642,12 +641,9 @@ static void unicast_arp_send(struct sk_b
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_path *path;
+	unsigned long flags;
 
-	/*
-	 * We can only be called from ipoib_start_xmit, so we're
-	 * inside tx_lock -- no need to save/restore flags.
-	 */
-	spin_lock(&priv->lock);
+	spin_lock_irqsave(&priv->lock, flags);
 
 	path = __path_find(dev, phdr->hwaddr + 4);
 	if (!path) {
@@ -658,7 +654,7 @@ static void unicast_arp_send(struct sk_b
 			__skb_queue_tail(&path->queue, skb);
 
 			if (path_rec_start(dev, path)) {
-				spin_unlock(&priv->lock);
+				spin_unlock_irqrestore(&priv->lock, flags);
 				path_free(dev, path);
 				return;
 			} else
@@ -668,7 +664,7 @@ static void unicast_arp_send(struct sk_b
 			dev_kfree_skb_any(skb);
 		}
 
-		spin_unlock(&priv->lock);
+		spin_unlock_irqrestore(&priv->lock, flags);
 		return;
 	}
 
@@ -687,7 +683,7 @@ static void unicast_arp_send(struct sk_b
 		dev_kfree_skb_any(skb);
 	}
 
-	spin_unlock(&priv->lock);
+	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -696,23 +692,10 @@ static int ipoib_start_xmit(struct sk_bu
 	struct ipoib_neigh *neigh;
 	unsigned long flags;
 
-	if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
-		return NETDEV_TX_LOCKED;
-
-	/*
-	 * Check if our queue is stopped.  Since we have the LLTX bit
-	 * set, we can't rely on netif_stop_queue() preventing our
-	 * xmit function from being called with a full queue.
-	 */
-	if (unlikely(netif_queue_stopped(dev))) {
-		spin_unlock_irqrestore(&priv->tx_lock, flags);
-		return NETDEV_TX_BUSY;
-	}
-
 	if (likely(skb->dst && skb->dst->neighbour)) {
 		if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
 			ipoib_path_lookup(skb, dev);
-			goto out;
+			return NETDEV_TX_OK;
 		}
 
 		neigh = *to_ipoib_neigh(skb->dst->neighbour);
@@ -722,7 +705,7 @@ static int ipoib_start_xmit(struct sk_bu
 					    skb->dst->neighbour->ha + 4,
 					    sizeof(union ib_gid))) ||
 					 (neigh->dev != dev))) {
-				spin_lock(&priv->lock);
+				spin_lock_irqsave(&priv->lock, flags);
 				/*
 				 * It's safe to call ipoib_put_ah() inside
 				 * priv->lock here, because we know that
@@ -733,26 +716,26 @@ static int ipoib_start_xmit(struct sk_bu
 				ipoib_put_ah(neigh->ah);
 				list_del(&neigh->list);
 				ipoib_neigh_free(dev, neigh);
-				spin_unlock(&priv->lock);
+				spin_unlock_irqrestore(&priv->lock, flags);
 				ipoib_path_lookup(skb, dev);
-				goto out;
+				return NETDEV_TX_OK;
 			}
 
 		if (ipoib_cm_get(neigh)) {
 			if (ipoib_cm_up(neigh)) {
 				ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
-				goto out;
+				return NETDEV_TX_OK;
 			}
 		} else if (neigh->ah) {
 			ipoib_send(dev, skb, neigh->ah,
 				   IPOIB_QPN(skb->dst->neighbour->ha));
-			goto out;
+			return NETDEV_TX_OK;
 		}
 
 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
-			spin_lock(&priv->lock);
+			spin_lock_irqsave(&priv->lock, flags);
 			__skb_queue_tail(&neigh->queue, skb);
-			spin_unlock(&priv->lock);
+			spin_unlock_irqrestore(&priv->lock, flags);
 		} else {
 			++priv->stats.tx_dropped;
 			dev_kfree_skb_any(skb);
@@ -781,16 +764,13 @@ static int ipoib_start_xmit(struct sk_bu
 					   IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
 				dev_kfree_skb_any(skb);
 				++priv->stats.tx_dropped;
-				goto out;
+				return NETDEV_TX_OK;
 			}
 
 			unicast_arp_send(skb, dev, phdr);
 		}
 	}
 
-out:
-	spin_unlock_irqrestore(&priv->tx_lock, flags);
-
 	return NETDEV_TX_OK;
 }
 
@@ -1088,7 +1068,7 @@ static void ipoib_setup(struct net_devic
 	dev->addr_len 		 = INFINIBAND_ALEN;
 	dev->type 		 = ARPHRD_INFINIBAND;
 	dev->tx_queue_len 	 = ipoib_sendq_size * 2;
-	dev->features            = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
+	dev->features            = NETIF_F_VLAN_CHALLENGED;
 
 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
 
@@ -1097,7 +1077,6 @@ static void ipoib_setup(struct net_devic
 	priv->dev = dev;
 
 	spin_lock_init(&priv->lock);
-	spin_lock_init(&priv->tx_lock);
 
 	mutex_init(&priv->mcast_mutex);
 	mutex_init(&priv->vlan_mutex);
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-09-09 15:53:24.860316701 -0700
+++ e/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-09-29 13:34:30.137656855 -0700
@@ -71,14 +71,13 @@ static void ipoib_mcast_free(struct ipoi
 	struct net_device *dev = mcast->dev;
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_neigh *neigh, *tmp;
-	unsigned long flags;
 	int tx_dropped = 0;
 
 	ipoib_dbg_mcast(netdev_priv(dev),
 			"deleting multicast group " IPOIB_GID_FMT "\n",
 			IPOIB_GID_ARG(mcast->mcmember.mgid));
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irq(&priv->lock);
 
 	list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
 		/*
@@ -92,7 +91,7 @@ static void ipoib_mcast_free(struct ipoi
 		ipoib_neigh_free(dev, neigh);
 	}
 
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock_irq(&priv->lock);
 
 	if (mcast->ah)
 		ipoib_put_ah(mcast->ah);
@@ -102,9 +101,9 @@ static void ipoib_mcast_free(struct ipoi
 		dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
 	}
 
-	spin_lock_irqsave(&priv->tx_lock, flags);
+	netif_tx_lock_bh(dev);
 	priv->stats.tx_dropped += tx_dropped;
-	spin_unlock_irqrestore(&priv->tx_lock, flags);
+	netif_tx_unlock_bh(dev);
 
 	kfree(mcast);
 }
@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struc
 	}
 
 	/* actually send any queued packets */
-	spin_lock_irq(&priv->tx_lock);
+	netif_tx_lock_bh(dev);
 	while (!skb_queue_empty(&mcast->pkt_queue)) {
 		struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
-		spin_unlock_irq(&priv->tx_lock);
+		netif_tx_unlock_bh(dev);
 
 		skb->dev = dev;
 
@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struc
 
 		if (dev_queue_xmit(skb))
 			ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
-		spin_lock_irq(&priv->tx_lock);
+		netif_tx_lock_bh(dev);
 	}
-	spin_unlock_irq(&priv->tx_lock);
+	netif_tx_unlock_bh(dev);
 
 	return 0;
 }
@@ -302,12 +301,12 @@ ipoib_mcast_sendonly_join_complete(int s
 					IPOIB_GID_ARG(mcast->mcmember.mgid), status);
 
 		/* Flush out any queued packets */
-		spin_lock_irq(&priv->tx_lock);
+		netif_tx_lock_bh(dev);
 		while (!skb_queue_empty(&mcast->pkt_queue)) {
 			++priv->stats.tx_dropped;
 			dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
 		}
-		spin_unlock_irq(&priv->tx_lock);
+		netif_tx_unlock_bh(dev);
 
 		/* Clear the busy flag so we try again */
 		status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
@@ -520,18 +519,16 @@ static int create_own_ah(struct ipoib_de
 
 void destroy_own_ah(struct ipoib_dev_priv *priv)
 {
-	unsigned long flags;
-
 	if (!priv->own_ah) {
 		ipoib_dbg(priv, "own ah already destroyed\n");
 		return;
 	} else
 		ipoib_dbg(priv, "destroying own ah\n");
 
-	spin_lock_irqsave(&priv->tx_lock, flags);
+	netif_tx_lock_bh(priv->dev);
 	ib_destroy_ah(priv->own_ah);
 	priv->own_ah = NULL;
-	spin_unlock_irqrestore(&priv->tx_lock, flags);
+	netif_tx_unlock_bh(priv->dev);
 }
 
 void ipoib_mcast_join_task(struct work_struct *work)
@@ -687,12 +684,9 @@ void ipoib_mcast_send(struct net_device 
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct ipoib_mcast *mcast;
+	unsigned long flags;
 
-	/*
-	 * We can only be called from ipoib_start_xmit, so we're
-	 * inside tx_lock -- no need to save/restore flags.
-	 */
-	spin_lock(&priv->lock);
+	spin_lock_irqsave(&priv->lock, flags);
 
 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)		||
 	    !priv->broadcast					||
@@ -763,7 +757,7 @@ out:
 	}
 
 unlock:
-	spin_unlock(&priv->lock);
+	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 void ipoib_mcast_dev_flush(struct net_device *dev)




More information about the ewg mailing list