[ofa-general] [PATCH 1/3] ib/ipoib: Make IPoIB-CM RX S/G APIs more generic

Shirley Ma mashirle at us.ibm.com
Wed Jan 30 10:45:16 PST 2008


Please review below patch while I am testing so I can integrate your
comments in my test immediately.

Thanks
Shirley

Signed-off-by:Shirley Ma <xma at us.ibm.com>
---

 drivers/infiniband/ulp/ipoib/ipoib.h    |   25 ++++--
 drivers/infiniband/ulp/ipoib/ipoib_cm.c |  139
++++++------------------------
 drivers/infiniband/ulp/ipoib/ipoib_ib.c |   85 +++++++++++++++++++
 3 files changed, 131 insertions(+), 118 deletions(-)

diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h
b/drivers/infiniband/ulp/ipoib/ipoib.h
index fe250c6..138f1a3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -138,7 +138,7 @@ struct ipoib_mcast {
 
 struct ipoib_rx_buf {
 	struct sk_buff *skb;
-	u64		mapping;
+	u64		mapping[IPOIB_CM_RX_SG];
 };
 
 struct ipoib_tx_buf {
@@ -189,7 +189,7 @@ enum ipoib_cm_state {
 struct ipoib_cm_rx {
 	struct ib_cm_id	       *id;
 	struct ib_qp	       *qp;
-	struct ipoib_cm_rx_buf *rx_ring;
+	struct ipoib_rx_buf    *rx_ring;
 	struct list_head	list;
 	struct net_device      *dev;
 	unsigned long		jiffies;
@@ -212,11 +212,6 @@ struct ipoib_cm_tx {
 	struct ib_wc	     ibwc[IPOIB_NUM_WC];
 };
 
-struct ipoib_cm_rx_buf {
-	struct sk_buff *skb;
-	u64 mapping[IPOIB_CM_RX_SG];
-};
-
 struct ipoib_cm_dev_priv {
 	struct ib_srq	       *srq;
 	struct ipoib_cm_rx_buf *srq_ring;
@@ -458,6 +453,22 @@ int ipoib_vlan_delete(struct net_device *pdev,
unsigned short pkey);
 void ipoib_pkey_poll(struct work_struct *work);
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 void ipoib_drain_cq(struct net_device *dev);
+void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+		   unsigned int length, struct sk_buff *toskb);
+struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+				      int id, int frags, int head_size,
+				      int pad, u64 *mapping);
+void inline ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
+			       int head_size, u64 *mapping)
+{
+	int i;
+	ib_dma_unmap_single(priv->ca, mapping[0], head_size, DMA_FROM_DEVICE);
+	for (i = 0; i < frags; i++)
+		ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE,
+				    DMA_FROM_DEVICE);
+
+}
+
 
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 1818f95..c7d42ea 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -77,17 +77,6 @@ static struct ib_send_wr ipoib_cm_rx_drain_wr = {
 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
 			       struct ib_cm_event *event);
 
-static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int
frags,
-				  u64 mapping[IPOIB_CM_RX_SG])
-{
-	int i;
-
-	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
-
-	for (i = 0; i < frags; ++i)
-		ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE,
DMA_FROM_DEVICE);
-}
-
 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -102,8 +91,9 @@ static int ipoib_cm_post_receive_srq(struct
net_device *dev, int id)
 	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
 	if (unlikely(ret)) {
 		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
-		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
-				      priv->cm.srq_ring[id].mapping);
+		ipoib_dma_unmap_rx(priv, priv->cm.num_frags - 1,
+				   IPOIB_CM_HEAD_SIZE,
+				   priv->cm.srq_ring[id].mapping);
 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
 		priv->cm.srq_ring[id].skb = NULL;
 	}
@@ -126,8 +116,8 @@ static int ipoib_cm_post_receive_nonsrq(struct
net_device *dev,
 	ret = ib_post_recv(rx->qp, &priv->cm.rx_wr, &bad_wr);
 	if (unlikely(ret)) {
 		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
-		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
-				      rx->rx_ring[id].mapping);
+		ipoib_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, IPOIB_CM_HEAD_SIZE,
+				   rx->rx_ring[id].mapping);
 		dev_kfree_skb_any(rx->rx_ring[id].skb);
 		rx->rx_ring[id].skb = NULL;
 	}
@@ -135,69 +125,17 @@ static int ipoib_cm_post_receive_nonsrq(struct
net_device *dev,
 	return ret;
 }
 
-static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
-					     struct ipoib_cm_rx_buf *rx_ring,
-					     int id, int frags,
-					     u64 mapping[IPOIB_CM_RX_SG])
-{
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	struct sk_buff *skb;
-	int i;
-
-	skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
-	if (unlikely(!skb))
-		return NULL;
-
-	/*
-	 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
-	 * IP header to a multiple of 16.
-	 */
-	skb_reserve(skb, 12);
-
-	mapping[0] = ib_dma_map_single(priv->ca, skb->data,
IPOIB_CM_HEAD_SIZE,
-				       DMA_FROM_DEVICE);
-	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
-		dev_kfree_skb_any(skb);
-		return NULL;
-	}
-
-	for (i = 0; i < frags; i++) {
-		struct page *page = alloc_page(GFP_ATOMIC);
-
-		if (!page)
-			goto partial_error;
-		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
-
-		mapping[i + 1] = ib_dma_map_page(priv->ca,
skb_shinfo(skb)->frags[i].page,
-						 0, PAGE_SIZE, DMA_FROM_DEVICE);
-		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
-			goto partial_error;
-	}
-
-	rx_ring[id].skb = skb;
-	return skb;
-
-partial_error:
-
-	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
-
-	for (; i > 0; --i)
-		ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE,
DMA_FROM_DEVICE);
-
-	dev_kfree_skb_any(skb);
-	return NULL;
-}
-
 static void ipoib_cm_free_rx_ring(struct net_device *dev,
-				  struct ipoib_cm_rx_buf *rx_ring)
+				  struct ipoib_rx_buf *rx_ring)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	int i;
 
 	for (i = 0; i < ipoib_recvq_size; ++i)
 		if (rx_ring[i].skb) {
-			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
-					      rx_ring[i].mapping);
+			ipoib_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+					   IPOIB_CM_HEAD_SIZE,
+					   rx_ring[i].mapping);
 			dev_kfree_skb_any(rx_ring[i].skb);
 		}
 
@@ -345,8 +283,12 @@ static int ipoib_cm_nonsrq_init_rx(struct
net_device *dev, struct ib_cm_id *cm_i
 	spin_unlock_irq(&priv->lock);
 
 	for (i = 0; i < ipoib_recvq_size; ++i) {
-		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
-					   rx->rx_ring[i].mapping)) {
+		rx->rx_ring[i].skb = ipoib_cm_alloc_rx_skb(dev, i,
+							   IPOIB_CM_RX_SG - 1,
+							   IPOIB_CM_HEAD_SIZE, 
+							   12,
+							   rx->rx_ring[i].mapping);
+		if (!rx->rx_ring[i].skb) {
 			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
 				ret = -ENOMEM;
 				goto err_count;
@@ -480,43 +422,11 @@ static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id,
 		return 0;
 	}
 }
-/* Adjust length of skb with fragments to match received data */
-static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
-			  unsigned int length, struct sk_buff *toskb)
-{
-	int i, num_frags;
-	unsigned int size;
-
-	/* put header into skb */
-	size = min(length, hdr_space);
-	skb->tail += size;
-	skb->len += size;
-	length -= size;
-
-	num_frags = skb_shinfo(skb)->nr_frags;
-	for (i = 0; i < num_frags; i++) {
-		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-		if (length == 0) {
-			/* don't need this page */
-			skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
-			--skb_shinfo(skb)->nr_frags;
-		} else {
-			size = min(length, (unsigned) PAGE_SIZE);
-
-			frag->size = size;
-			skb->data_len += size;
-			skb->truesize += size;
-			skb->len += size;
-			length -= size;
-		}
-	}
-}
 
 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	struct ipoib_cm_rx_buf *rx_ring;
+	struct ipoib_rx_buf *rx_ring;
 	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
 	struct sk_buff *skb, *newskb;
 	struct ipoib_cm_rx *p;
@@ -581,7 +491,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev,
struct ib_wc *wc)
 	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
 					      (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
 
-	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
+	newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE, 
+				       12, mapping);
 	if (unlikely(!newskb)) {
 		/*
 		 * If we can't allocate a new RX buffer, dump
@@ -592,7 +503,10 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev,
struct ib_wc *wc)
 		goto repost;
 	}
 
-	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
+	rx_ring[wr_id].skb = newskb;
+
+	ipoib_dma_unmap_rx(priv, frags, IPOIB_CM_HEAD_SIZE, 
+			   rx_ring[wr_id].mapping);
 	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof
*mapping);
 
 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
@@ -1481,9 +1395,12 @@ int ipoib_cm_dev_init(struct net_device *dev)
 
 	if (ipoib_cm_has_srq(dev)) {
 		for (i = 0; i < ipoib_recvq_size; ++i) {
-			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
-						   priv->cm.num_frags - 1,
-						   priv->cm.srq_ring[i].mapping)) {
+			priv->cm.srq_ring[i].skb = 
+			      ipoib_cm_alloc_rx_skb(dev, i, 
+						    priv->cm.num_frags - 1, 
+						    IPOIB_CM_HEAD_SIZE, 12, 
+						    priv->cm.srq_ring[i].mapping);
+			if (!priv->cm.srq_ring[i].skb) {	
 				ipoib_warn(priv, "failed to allocate "
 					   "receive buffer %d\n", i);
 				ipoib_cm_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 52bc2bd..138c758 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -116,6 +116,91 @@ static int ipoib_ib_post_receive(struct net_device
*dev, int id)
 	return ret;
 }
 
+static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+					     int id, int frags, int head_size,
+					     int pad, u64 mapping)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct sk_buff *skb;
+	int i;
+
+	skb = dev_alloc_skb(head_size + pad);
+	if (unlikely(!skb))
+		return NULL;
+
+	/*
+	 * IPoIB adds a 4 byte header. So we need pad more bytes to align the
+	 * IP header to a multiple of 16. For CM mode, you add pad 12, 
+	 * for UD mode, we add pad 4.
+	 */
+	skb_reserve(skb, pad);
+
+	mapping[0] = ib_dma_map_single(priv->ca, skb->data, head_size,
+				       DMA_FROM_DEVICE);
+	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+
+	for (i = 0; i < frags; i++) {
+		struct page *page = alloc_page(GFP_ATOMIC);
+
+		if (!page)
+			goto partial_error;
+		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+
+		mapping[i + 1] = ib_dma_map_page(priv->ca,
skb_shinfo(skb)->frags[i].page,
+						 0, PAGE_SIZE, DMA_FROM_DEVICE);
+		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
+			goto partial_error;
+	}
+
+	return skb;
+
+partial_error:
+
+	ib_dma_unmap_single(priv->ca, mapping[0], head_size, DMA_FROM_DEVICE);
+
+	for (; i > 0; --i)
+		ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE,
DMA_FROM_DEVICE);
+
+	dev_kfree_skb_any(skb);
+	return NULL;
+}
+
+/* Adjust length of skb with fragments to match received data */
+static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+			  unsigned int length, struct sk_buff *toskb)
+{
+	int i, num_frags;
+	unsigned int size;
+
+	/* put header into skb */
+	size = min(length, hdr_space);
+	skb->tail += size;
+	skb->len += size;
+	length -= size;
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	for (i = 0; i < num_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		if (length == 0) {
+			/* don't need this page */
+			skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
+			--skb_shinfo(skb)->nr_frags;
+		} else {
+			size = min(length, (unsigned) PAGE_SIZE);
+
+			frag->size = size;
+			skb->data_len += size;
+			skb->truesize += size;
+			skb->len += size;
+			length -= size;
+		}
+	}
+}
+
 static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);





More information about the general mailing list