[ofa-general] [PATCH] IPoIB-UD S/G 4K MTU patch against OFED-1.3 RC2

Shirley Ma mashirle at us.ibm.com
Wed Jan 30 22:19:05 PST 2008


Hello Vlad,

	This is a patch build against OFED-1.3 RC2 on top of Pradeep's noSRQ
patch. I am not sure whether you can apply cleanly to your current
OFED-1.3 tree. If not, please let me know.

	This patch is in our test bed. This patch has been tested against:
IPoIB-CM SRQ, IPoIB-UD 2K mtu, IPoIB 4K mtu, IPoIB-CM noSRQ for two
nodes. Cluster testing is going on.

Thanks
Shirley


This patch makes IPoIB-CM S/G RX more generic, and enables IPoIB UD S/G
4K MTU support.

--Signed-off-by: Shirley Ma <xma at us.ibm.com>
diff -urpN
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
---
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2008-01-27 14:20:17.000000000 -0600
+++
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2008-01-27 14:26:18.000000000 -0600
@@ -77,17 +77,6 @@ static struct ib_send_wr ipoib_cm_rx_dra
 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
 			       struct ib_cm_event *event);
 
-static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int
frags,
-				  u64 mapping[IPOIB_CM_RX_SG])
-{
-	int i;
-
-	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
-
-	for (i = 0; i < frags; ++i)
-		ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE,
DMA_FROM_DEVICE);
-}
-
 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -102,8 +91,9 @@ static int ipoib_cm_post_receive_srq(str
 	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
 	if (unlikely(ret)) {
 		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
-		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
-				      priv->cm.srq_ring[id].mapping);
+		ipoib_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+				   IPOIB_CM_HEAD_SIZE,
+				   priv->cm.srq_ring[id].mapping);
 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
 		priv->cm.srq_ring[id].skb = NULL;
 	}
@@ -126,8 +116,9 @@ static int ipoib_cm_post_receive_nonsrq(
 	ret = ib_post_recv(rx->qp, &priv->cm.rx_wr, &bad_wr);
 	if (unlikely(ret)) {
 		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
-		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
-				      rx->rx_ring[id].mapping);
+		ipoib_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+				   IPOIB_CM_HEAD_SIZE,
+				   rx->rx_ring[id].mapping);
 		dev_kfree_skb_any(rx->rx_ring[id].skb);
 		rx->rx_ring[id].skb = NULL;
 	}
@@ -135,69 +126,16 @@ static int ipoib_cm_post_receive_nonsrq(
 	return ret;
 }
 
-static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
-					     struct ipoib_cm_rx_buf *rx_ring,
-					     int id, int frags,
-					     u64 mapping[IPOIB_CM_RX_SG])
-{
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	struct sk_buff *skb;
-	int i;
-
-	skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
-	if (unlikely(!skb))
-		return NULL;
-
-	/*
-	 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
-	 * IP header to a multiple of 16.
-	 */
-	skb_reserve(skb, 12);
-
-	mapping[0] = ib_dma_map_single(priv->ca, skb->data,
IPOIB_CM_HEAD_SIZE,
-				       DMA_FROM_DEVICE);
-	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
-		dev_kfree_skb_any(skb);
-		return NULL;
-	}
-
-	for (i = 0; i < frags; i++) {
-		struct page *page = alloc_page(GFP_ATOMIC);
-
-		if (!page)
-			goto partial_error;
-		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
-
-		mapping[i + 1] = ib_dma_map_page(priv->ca,
skb_shinfo(skb)->frags[i].page,
-						 0, PAGE_SIZE, DMA_FROM_DEVICE);
-		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
-			goto partial_error;
-	}
-
-	rx_ring[id].skb = skb;
-	return skb;
-
-partial_error:
-
-	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
-
-	for (; i > 0; --i)
-		ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE,
DMA_FROM_DEVICE);
-
-	dev_kfree_skb_any(skb);
-	return NULL;
-}
-
 static void ipoib_cm_free_rx_ring(struct net_device *dev,
-				  struct ipoib_cm_rx_buf *rx_ring)
+				  struct ipoib_rx_buf *rx_ring)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	int i;
 
 	for (i = 0; i < ipoib_recvq_size; ++i)
 		if (rx_ring[i].skb) {
-			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
-					      rx_ring[i].mapping);
+			ipoib_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+					   IPOIB_CM_HEAD_SIZE, rx_ring[i].mapping);
 			dev_kfree_skb_any(rx_ring[i].skb);
 		}
 
@@ -345,9 +283,11 @@ static int ipoib_cm_nonsrq_init_rx(struc
 	spin_unlock_irq(&priv->lock);
 
 	for (i = 0; i < ipoib_recvq_size; ++i) {
-		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
-					   rx->rx_ring[i].mapping)) {
-			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+			rx->rx_ring[i].skb = ipoib_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
+						   		      IPOIB_CM_HEAD_SIZE, 12,
+								      rx->rx_ring[i].mapping);
+			if (!rx->rx_ring[i].skb) {
+				ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
 				ret = -ENOMEM;
 				goto err_count;
 			}
@@ -480,43 +420,11 @@ static int ipoib_cm_rx_handler(struct ib
 		return 0;
 	}
 }
-/* Adjust length of skb with fragments to match received data */
-static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
-			  unsigned int length, struct sk_buff *toskb)
-{
-	int i, num_frags;
-	unsigned int size;
-
-	/* put header into skb */
-	size = min(length, hdr_space);
-	skb->tail += size;
-	skb->len += size;
-	length -= size;
-
-	num_frags = skb_shinfo(skb)->nr_frags;
-	for (i = 0; i < num_frags; i++) {
-		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-		if (length == 0) {
-			/* don't need this page */
-			skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
-			--skb_shinfo(skb)->nr_frags;
-		} else {
-			size = min(length, (unsigned) PAGE_SIZE);
-
-			frag->size = size;
-			skb->data_len += size;
-			skb->truesize += size;
-			skb->len += size;
-			length -= size;
-		}
-	}
-}
 
 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	struct ipoib_cm_rx_buf *rx_ring;
+	struct ipoib_rx_buf *rx_ring;
 	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
 	struct sk_buff *skb, *newskb;
 	struct ipoib_cm_rx *p;
@@ -582,7 +490,7 @@ void ipoib_cm_handle_rx_wc(struct net_de
 	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
 					      (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
 
-	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
+	newskb = ipoib_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE, 12,
mapping);
 	if (unlikely(!newskb)) {
 		/*
 		 * If we can't allocate a new RX buffer, dump
@@ -592,8 +500,9 @@ void ipoib_cm_handle_rx_wc(struct net_de
 		++dev->stats.rx_dropped;
 		goto repost;
 	}
+	rx_ring[wr_id].skb = newskb;
 
-	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
+	ipoib_dma_unmap_rx(priv, frags, IPOIB_CM_HEAD_SIZE,
rx_ring[wr_id].mapping);
 	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof
*mapping);
 
 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
@@ -1508,9 +1417,10 @@ int ipoib_cm_dev_init(struct net_device 
 
 	if (ipoib_cm_has_srq(dev)) {
 		for (i = 0; i < ipoib_recvq_size; ++i) {
-			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
-						   priv->cm.num_frags - 1,
-						   priv->cm.srq_ring[i].mapping)) {
+			priv->cm.srq_ring[i].skb = ipoib_alloc_rx_skb(dev, i,
priv->cm.num_frags - 1,
+						   		      IPOIB_CM_HEAD_SIZE, 12,
+								      priv->cm.srq_ring[i].mapping);
+			if (!priv->cm.srq_ring[i].skb) {
 				ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
 				ipoib_cm_dev_cleanup(dev);
 				return -ENOMEM;
diff -urpN
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib.h
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib.h
--- ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib.h
2008-01-27 14:20:17.000000000 -0600
+++ ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib.h
2008-01-27 14:23:34.000000000 -0600
@@ -56,10 +56,9 @@
 /* constants */
 
 enum {
-	IPOIB_PACKET_SIZE         = 2048,
-	IPOIB_BUF_SIZE 		  = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
 	IPOIB_ENCAP_LEN 	  = 4,
+	IPOIB_MAX_IB_MTU	  = 4096, /* max ib device payload is 4096 */
+	IPOIB_UD_MAX_RX_SG	  = ALIGN(IPOIB_MAX_IB_MTU + IB_GRH_BYTES + 4,
PAGE_SIZE) / PAGE_SIZE,  /* padding to align IP header */	
 
 	IPOIB_CM_MTU              = 0x10000 - 0x10, /* padding to align header
to 16 */
 	IPOIB_CM_BUF_SIZE         = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
@@ -142,7 +141,7 @@ struct ipoib_mcast {
 
 struct ipoib_rx_buf {
 	struct sk_buff *skb;
-	u64		mapping;
+	u64		mapping[IPOIB_CM_RX_SG];
 };
 
 struct ipoib_tx_buf {
@@ -261,7 +260,7 @@ enum ipoib_cm_state {
 struct ipoib_cm_rx {
 	struct ib_cm_id     *id;
 	struct ib_qp        *qp;
-	struct ipoib_cm_rx_buf *rx_ring;
+	struct ipoib_rx_buf *rx_ring;
 	struct list_head     list;
 	struct net_device   *dev;
 	unsigned long        jiffies;
@@ -285,14 +284,9 @@ struct ipoib_cm_tx {
 	struct ib_wc         ibwc[IPOIB_NUM_WC];
 };
 
-struct ipoib_cm_rx_buf {
-	struct sk_buff *skb;
-	u64 mapping[IPOIB_CM_RX_SG];
-};
-
 struct ipoib_cm_dev_priv {
 	struct ib_srq  	       *srq;
-	struct ipoib_cm_rx_buf *srq_ring;
+	struct ipoib_rx_buf *srq_ring;
 	struct ib_cm_id        *id;
 	struct list_head        passive_ids;   /* state: LIVE */
 	struct list_head        rx_error_list; /* state: ERROR */
@@ -398,6 +392,9 @@ struct ipoib_dev_priv {
 	struct dentry *path_dentry;
 #endif
 	struct ipoib_ethtool_st etool;
+	unsigned int   max_ib_mtu;
+	struct ib_sge        rx_sge[IPOIB_UD_MAX_RX_SG];
+	struct ib_recv_wr    rx_wr;
 };
 
 struct ipoib_ah {
@@ -493,6 +490,14 @@ int ipoib_ib_dev_stop(struct net_device 
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int
port);
 void ipoib_dev_cleanup(struct net_device *dev);
 
+void ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, int
head_size,
+			u64 *mapping);
+void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+		   unsigned int length, struct sk_buff *toskb);
+struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev,
+				   int id, int frags, int head_size,
+				   int pad, u64 *mapping);
+
 void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct
sk_buff *skb);
 
@@ -542,6 +547,11 @@ void ipoib_drain_cq(struct net_device *d
 
 void ipoib_set_ethtool_ops(struct net_device *dev);
 
+#define IPOIB_UD_MTU(ib_mtu)		(ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu)	(ib_mtu + IB_GRH_BYTES + 4) /*
padding to align IP header */
+#define IPOIB_UD_HEAD_SIZE(ib_mtu)	(IPOIB_UD_BUF_SIZE(ib_mtu)) %
PAGE_SIZE
+#define IPOIB_UD_RX_SG(ib_mtu)	ALIGN(IPOIB_UD_BUF_SIZE(ib_mtu),
PAGE_SIZE) / PAGE_SIZE
+
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
 
 #define IPOIB_FLAGS_RC          0x80
diff -urpN
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
---
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
2008-01-27 14:20:17.000000000 -0600
+++
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
2008-01-27 14:26:05.000000000 -0600
@@ -89,63 +89,118 @@ void ipoib_free_ah(struct kref *kref)
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static int ipoib_ib_post_receive(struct net_device *dev, int id)
+/* Adjust length of skb with fragments to match received data */
+void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+			  unsigned int length, struct sk_buff *toskb)
 {
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	struct ib_sge list;
-	struct ib_recv_wr param;
-	struct ib_recv_wr *bad_wr;
-	int ret;
+	int i, num_frags;
+	unsigned int size;
 
-	list.addr     = priv->rx_ring[id].mapping;
-	list.length   = IPOIB_BUF_SIZE;
-	list.lkey     = priv->mr->lkey;
-
-	param.next    = NULL;
-	param.wr_id   = id | IPOIB_OP_RECV;
-	param.sg_list = &list;
-	param.num_sge = 1;
+	/* put header into skb */
+	size = min(length, hdr_space);
+	skb->tail += size;
+	skb->len += size;
+	length -= size;
 
-	ret = ib_post_recv(priv->qp, &param, &bad_wr);
-	if (unlikely(ret)) {
-		ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
-		ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
-				    IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
-		dev_kfree_skb_any(priv->rx_ring[id].skb);
-		priv->rx_ring[id].skb = NULL;
+	num_frags = skb_shinfo(skb)->nr_frags;
+	for (i = 0; i < num_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		if (length == 0) {
+			/* don't need this page */
+			skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
+			--skb_shinfo(skb)->nr_frags;
+		} else {
+			size = min(length, (unsigned) PAGE_SIZE);
+
+			frag->size = size;
+			skb->data_len += size;
+			skb->truesize += size;
+			skb->len += size;
+			length -= size;
+		}
 	}
+}
 
-	return ret;
+void ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, int
head_size, u64 *mapping)
+{
+	int i;
+	ib_dma_unmap_single(priv->ca, mapping[0], head_size,
+			    DMA_FROM_DEVICE);
+	for (i = 0; i < frags; i++) 
+             ib_dma_unmap_single(priv->ca, mapping[i+1], PAGE_SIZE, 
+				 DMA_FROM_DEVICE); 
 }
 
-static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
+struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id, int
frags,
+				   int head_size, int pad, u64 *mapping)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct sk_buff *skb;
-	u64 addr;
+	int i;
 
-	skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
-	if (!skb)
-		return -ENOMEM;
+	skb = dev_alloc_skb(head_size + pad);
+	if (unlikely(!skb))
+		return NULL;
 
 	/*
-	 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
-	 * header.  So we need 4 more bytes to get to 48 and align the
+	 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
 	 * IP header to a multiple of 16.
 	 */
-	skb_reserve(skb, 4);
+	skb_reserve(skb, pad);
 
-	addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
-				 DMA_FROM_DEVICE);
-	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+	mapping[0] = ib_dma_map_single(priv->ca, skb->data, head_size,
+				       DMA_FROM_DEVICE);
+	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
 		dev_kfree_skb_any(skb);
-		return -EIO;
+		return NULL;
 	}
 
-	priv->rx_ring[id].skb     = skb;
-	priv->rx_ring[id].mapping = addr;
+	for (i = 0; i < frags; i++) {
+		struct page *page = alloc_page(GFP_ATOMIC);
 
-	return 0;
+		if (!page)
+			goto partial_error;
+		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+
+		mapping[i + 1] = ib_dma_map_page(priv->ca,
skb_shinfo(skb)->frags[i].page,
+						 0, PAGE_SIZE, DMA_FROM_DEVICE);
+		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
+			goto partial_error;
+	}
+	
+	return skb;
+
+partial_error:
+
+	ib_dma_unmap_single(priv->ca, mapping[0], head_size, DMA_FROM_DEVICE);
+
+	for (; i > 0; --i)
+		ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE,
DMA_FROM_DEVICE);
+
+	dev_kfree_skb_any(skb);
+	return NULL;
+}
+
+static int ipoib_ib_post_receive(struct net_device *dev, int id)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ib_recv_wr *bad_wr;
+	int ret, i;
+
+	priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+	for (i = 0; i < IPOIB_UD_RX_SG(priv->max_ib_mtu); ++i)
+		priv->rx_sge[i].addr = priv->rx_ring[id].mapping[i];
+	ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
+	if (unlikely(ret)) {
+		ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
+		ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1, 
+				   IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
priv->rx_ring[id].mapping);
+		dev_kfree_skb_any(priv->rx_ring[id].skb);
+		priv->rx_ring[id].skb = NULL;
+	}
+
+	return ret;
 }
 
 static int ipoib_ib_post_receives(struct net_device *dev)
@@ -153,13 +208,24 @@ static int ipoib_ib_post_receives(struct
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	int i;
 
+	for (i = 0; i < IPOIB_UD_RX_SG(priv->max_ib_mtu); ++i)
+		priv->rx_sge[i].lkey = priv->mr->lkey;
+	priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu);
+	for (i = 0; i < IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1; ++i)
+		priv->rx_sge[i+1].length = PAGE_SIZE;
+	priv->rx_wr.num_sge = IPOIB_UD_RX_SG(priv->max_ib_mtu);
+	priv->rx_wr.next = NULL;
+	priv->rx_wr.sg_list = priv->rx_sge;
+
 	for (i = 0; i < ipoib_recvq_size; ++i) {
-		if (ipoib_alloc_rx_skb(dev, i)) {
-			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+		priv->rx_ring[i].skb = ipoib_alloc_rx_skb(dev, i,
IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+							  IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 4, 
+							  priv->rx_ring[i].mapping);
+		if (!priv->rx_ring[i].skb)
 			return -ENOMEM;
-		}
 		if (ipoib_ib_post_receive(dev, i)) {
 			ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
+			ipoib_dev_cleanup(dev);
 			return -EIO;
 		}
 	}
@@ -171,9 +237,10 @@ static void ipoib_ib_handle_rx_wc(struct
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *newskb;
+	u64 mapping[IPOIB_UD_RX_SG(priv->max_ib_mtu)];
 	struct ipoib_header *header;
-	u64 addr;
+	int frags;
 
 	ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
 		       wr_id, wc->status);
@@ -185,15 +252,15 @@ static void ipoib_ib_handle_rx_wc(struct
 	}
 
 	skb  = priv->rx_ring[wr_id].skb;
-	addr = priv->rx_ring[wr_id].mapping;
 
 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
 		if (wc->status != IB_WC_WR_FLUSH_ERR)
 			ipoib_warn(priv, "failed recv event "
 				   "(status=%d, wrid=%d vend_err %x)\n",
 				   wc->status, wr_id, wc->vendor_err);
-		ib_dma_unmap_single(priv->ca, addr,
-				    IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+		ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+				   IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 
+				   priv->rx_ring[wr_id].mapping);
 		dev_kfree_skb_any(skb);
 		priv->rx_ring[wr_id].skb = NULL;
 		return;
@@ -206,21 +273,28 @@ static void ipoib_ib_handle_rx_wc(struct
 	if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
 		goto repost;
 
+ 	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
+ 					      (unsigned)IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu))) /
PAGE_SIZE;
+ 	newskb = ipoib_alloc_rx_skb(dev, wr_id, frags, 
+ 				    IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 4, mapping);
 	/*
 	 * If we can't allocate a new RX buffer, dump
 	 * this packet and reuse the old buffer.
 	 */
-	if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
-		++dev->stats.rx_dropped;
-		goto repost;
-	}
+ 	if (unlikely(!newskb)) {
+ 		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
+  		++dev->stats.rx_dropped;
+  		goto repost;
+  	}
+ 	priv->rx_ring[wr_id].skb = newskb;
 
 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
 		       wc->byte_len, wc->slid);
 
-	ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+	ipoib_dma_unmap_rx(priv, frags, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
priv->rx_ring[wr_id].mapping);
+	memcpy(priv->rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof
*mapping);
 
-	skb_put(skb, wc->byte_len);
+	skb_put_frags(skb, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), wc->byte_len,
newskb);
 	skb_pull(skb, IB_GRH_BYTES);
 
 	header = (struct ipoib_header *)skb->data;
@@ -687,10 +761,10 @@ int ipoib_ib_dev_stop(struct net_device 
 				rx_req = &priv->rx_ring[i];
 				if (!rx_req->skb)
 					continue;
-				ib_dma_unmap_single(priv->ca,
-						    rx_req->mapping,
-						    IPOIB_BUF_SIZE,
-						    DMA_FROM_DEVICE);
+				ipoib_dma_unmap_rx(priv,
+						   IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1, 
+						   IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 
+						   priv->rx_ring[i].mapping);
 				dev_kfree_skb_any(rx_req->skb);
 				rx_req->skb = NULL;
 			}
diff -urpN
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_main.c
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_main.c
---
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_main.c
2008-01-27 14:20:17.000000000 -0600
+++
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_main.c
2008-01-27 14:23:34.000000000 -0600
@@ -196,7 +196,7 @@ static int ipoib_change_mtu(struct net_d
 		return 0;
 	}
 
-	if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+	if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
 		return -EINVAL;
 
 	priv->admin_mtu = new_mtu;
@@ -1024,10 +1024,6 @@ static void ipoib_setup(struct net_devic
 		set_bit(IPOIB_FLAG_HW_CSUM, &priv->flags);
 	}
 
-	/* MTU will be reset when mcast join happens */
-	dev->mtu 		 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
-	priv->mcast_mtu 	 = priv->admin_mtu = dev->mtu;
-
 	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
 
 	netif_carrier_off(dev);
@@ -1182,6 +1178,7 @@ static struct net_device *ipoib_add_port
 					 struct ib_device *hca, u8 port)
 {
 	struct ipoib_dev_priv *priv;
+	struct ib_port_attr attr;
 	int result = -ENOMEM;
 
 	priv = ipoib_intf_alloc(format);
@@ -1192,6 +1189,18 @@ static struct net_device *ipoib_add_port
 
 	priv->dev->features |= NETIF_F_HIGHDMA;
 
+	if (!ib_query_port(hca, port, &attr))
+		priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+	else {
+		printk(KERN_WARNING "%s: ib_query_port %d failed\n", 
+		       hca->name, port);
+		goto device_init_failed;
+	}
+
+	/* MTU will be reset when mcast join happens */
+	priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
+	priv->mcast_mtu	 = priv->admin_mtu = priv->dev->mtu;
+
 	result = ib_query_pkey(hca, port, 0, &priv->pkey);
 	if (result) {
 		printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff -urpN
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
---
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-01-14 04:00:04.000000000 -0600
+++
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2008-01-27 14:23:34.000000000 -0600
@@ -567,9 +567,7 @@ void ipoib_mcast_join_task(struct work_s
 		return;
 	}
 
-	priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
-		IPOIB_ENCAP_LEN;
-
+	priv->mcast_mtu =
IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
 	if (!ipoib_cm_admin_enabled(dev))
 		dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
 
diff -urpN
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
---
ofa_1_3_kernel-20080114-0200_a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2008-01-27 14:20:17.000000000 -0600
+++
ofa_1_3_kernel-20080114-0200_b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	2008-01-27 14:23:34.000000000 -0600
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_
 			.max_send_wr  = ipoib_sendq_size,
 			.max_recv_wr  = ipoib_recvq_size,
 			.max_send_sge = dev->features & NETIF_F_SG ? MAX_SKB_FRAGS + 1 : 1,
-			.max_recv_sge = 1
+			.max_recv_sge = IPOIB_UD_RX_SG(priv->max_ib_mtu) 
 		},
 		.sq_sig_type = IB_SIGNAL_ALL_WR,
 		.qp_type     = IB_QPT_UD,
@@ -212,6 +212,16 @@ int ipoib_transport_dev_init(struct net_
 	priv->tx_wr.sg_list 	= priv->tx_sge;
 	priv->tx_wr.send_flags 	= IB_SEND_SIGNALED;
 
+	priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu);
+	for (i = 0; i < IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1; ++i) {
+		priv->rx_sge[i].lkey = priv->mr->lkey;
+		priv->rx_sge[i + 1].length = PAGE_SIZE;
+	}
+	priv->rx_sge[i + 1].lkey = priv->mr->lkey;
+	priv->rx_wr.num_sge = IPOIB_UD_RX_SG(priv->max_ib_mtu);
+	priv->rx_wr.next = NULL;
+	priv->rx_wr.sg_list = priv->rx_sge;
+
 	return 0;
 
 out_free_cq:





More information about the general mailing list