[ofa-general] [V3][PATCH 1/3] ib/ipoib: UD RX S/G API

Shirley Ma mashirle at us.ibm.com
Sat Feb 2 10:38:30 PST 2008


This patch has created a couple of APIs for UD RX S/G to be used later.

Signed-off-by: Shirley Ma <xma at us.ibm.com>
---

 drivers/infiniband/ulp/ipoib/ipoib.h    |    9 ++++
 drivers/infiniband/ulp/ipoib/ipoib_ib.c |   65
+++++++++++++++++++++++++++++++
 2 files changed, 74 insertions(+), 0 deletions(-)

diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h
b/drivers/infiniband/ulp/ipoib/ipoib.h
index fe250c6..415bf9a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -61,6 +61,10 @@ enum {
 
 	IPOIB_ENCAP_LEN		  = 4,
 
+	IPOIB_MAX_IB_MTU	  = 4096,
+	IPOIB_UD_HEAD_SIZE	  = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+	IPOIB_UD_RX_SG		  = 2, /* for 4K MTU */ 
+
 	IPOIB_CM_MTU		  = 0x10000 - 0x10, /* padding to align header to 16 */
 	IPOIB_CM_BUF_SIZE	  = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
 	IPOIB_CM_HEAD_SIZE	  = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -136,6 +140,11 @@ struct ipoib_mcast {
 	struct net_device *dev;
 };
 
+struct ipoib_sg_rx_buf {
+	struct sk_buff *skb;
+	u64		mapping[IPOIB_UD_RX_SG];
+};
+
 struct ipoib_rx_buf {
 	struct sk_buff *skb;
 	u64		mapping;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 52bc2bd..9ca3d34 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -87,6 +87,71 @@ void ipoib_free_ah(struct kref *kref)
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
+/* Adjust length of skb with fragments to match received data */
+static void ipoib_ud_skb_put_frags(struct sk_buff *skb, unsigned int
length,
+				   struct sk_buff *toskb)
+{
+	unsigned int size;
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+	/* put header into skb */
+	size = min(length, (unsigned)IPOIB_UD_HEAD_SIZE);
+	skb->tail += size;
+	skb->len += size;
+	length -= size;
+
+	if (length == 0) {
+		/* don't need this page */
+		skb_fill_page_desc(toskb, 0, frag->page, 0, PAGE_SIZE);
+		--skb_shinfo(skb)->nr_frags;
+	} else {
+		size = min(length, (unsigned) PAGE_SIZE);
+		frag->size = size;
+		skb->data_len += size;
+		skb->truesize += size;
+		skb->len += size;
+		length -= size;
+	}
+}
+
+static struct sk_buff *ipoib_sg_alloc_rx_skb(struct net_device *dev,
+					     int id, u64 mapping[IPOIB_UD_RX_SG])
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct page *page;
+	struct sk_buff *skb;
+
+	skb = dev_alloc_skb(IPOIB_UD_HEAD_SIZE);
+
+	if (unlikely(!skb)) 
+		return NULL;
+
+	mapping[0] = ib_dma_map_single(priv->ca, skb->data,
IPOIB_UD_HEAD_SIZE,
+				       DMA_FROM_DEVICE);
+	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+
+	page = alloc_page(GFP_ATOMIC);
+	if (!page)
+		goto partial_error;
+
+	skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
+	mapping[1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
+				     0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
+		goto partial_error;
+
+	priv->rx_ring[id].skb = skb;
+	return skb;
+
+partial_error:
+	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
DMA_FROM_DEVICE);
+	dev_kfree_skb_any(skb);
+	return NULL;
+}
+
 static int ipoib_ib_post_receive(struct net_device *dev, int id)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);





More information about the general mailing list