[ofa-general] [UPDATE][PATCH 1/3] ib/ipoib: Make IPoIB-CM RX S/G APIs more generic (for-2.6.25)
Shirley Ma
mashirle at us.ibm.com
Thu Jan 31 10:58:26 PST 2008
This patch makes IPoIB-CM RX S/G APIs more generic for IPoIB-UD RX S/G
to be resued later.
Signed-off-by: Shirley Ma <xma at us.ibm.com>
---
drivers/infiniband/ulp/ipoib/ipoib.h | 26 +++++-
drivers/infiniband/ulp/ipoib/ipoib_cm.c | 135
++++++-------------------------
drivers/infiniband/ulp/ipoib/ipoib_ib.c | 85 +++++++++++++++++++
3 files changed, 132 insertions(+), 114 deletions(-)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h
b/drivers/infiniband/ulp/ipoib/ipoib.h
index fe250c6..d1d3ca2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -141,6 +141,11 @@ struct ipoib_rx_buf {
u64 mapping;
};
+struct ipoib_cm_rx_buf {
+ struct sk_buff *skb;
+ u64 mapping[IPOIB_CM_RX_SG];
+};
+
struct ipoib_tx_buf {
struct sk_buff *skb;
u64 mapping;
@@ -212,11 +217,6 @@ struct ipoib_cm_tx {
struct ib_wc ibwc[IPOIB_NUM_WC];
};
-struct ipoib_cm_rx_buf {
- struct sk_buff *skb;
- u64 mapping[IPOIB_CM_RX_SG];
-};
-
struct ipoib_cm_dev_priv {
struct ib_srq *srq;
struct ipoib_cm_rx_buf *srq_ring;
@@ -458,6 +458,22 @@ int ipoib_vlan_delete(struct net_device *pdev,
unsigned short pkey);
void ipoib_pkey_poll(struct work_struct *work);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
void ipoib_drain_cq(struct net_device *dev);
+void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+ unsigned int length, struct sk_buff *toskb);
+struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+ int id, int frags, int head_size,
+ int pad, u64 *mapping);
+static void inline ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, int
frags,
+ int head_size, u64 *mapping)
+{
+ int i;
+ ib_dma_unmap_single(priv->ca, mapping[0], head_size, DMA_FROM_DEVICE);
+ for (i = 0; i < frags; i++)
+ ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+}
+
#ifdef CONFIG_INFINIBAND_IPOIB_CM
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 1818f95..2c2c6b2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -77,17 +77,6 @@ static struct ib_send_wr ipoib_cm_rx_drain_wr = {
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event);
-static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int
frags,
- u64 mapping[IPOIB_CM_RX_SG])
-{
- int i;
-
- ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
-
- for (i = 0; i < frags; ++i)
- ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE,
DMA_FROM_DEVICE);
-}
-
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -102,8 +91,9 @@ static int ipoib_cm_post_receive_srq(struct
net_device *dev, int id)
ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
- ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
- priv->cm.srq_ring[id].mapping);
+ ipoib_dma_unmap_rx(priv, priv->cm.num_frags - 1,
+ IPOIB_CM_HEAD_SIZE,
+ priv->cm.srq_ring[id].mapping);
dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
priv->cm.srq_ring[id].skb = NULL;
}
@@ -126,8 +116,8 @@ static int ipoib_cm_post_receive_nonsrq(struct
net_device *dev,
ret = ib_post_recv(rx->qp, &priv->cm.rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
- ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
- rx->rx_ring[id].mapping);
+ ipoib_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, IPOIB_CM_HEAD_SIZE,
+ rx->rx_ring[id].mapping);
dev_kfree_skb_any(rx->rx_ring[id].skb);
rx->rx_ring[id].skb = NULL;
}
@@ -135,59 +125,6 @@ static int ipoib_cm_post_receive_nonsrq(struct
net_device *dev,
return ret;
}
-static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
- struct ipoib_cm_rx_buf *rx_ring,
- int id, int frags,
- u64 mapping[IPOIB_CM_RX_SG])
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct sk_buff *skb;
- int i;
-
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
- if (unlikely(!skb))
- return NULL;
-
- /*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
- * IP header to a multiple of 16.
- */
- skb_reserve(skb, 12);
-
- mapping[0] = ib_dma_map_single(priv->ca, skb->data,
IPOIB_CM_HEAD_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- for (i = 0; i < frags; i++) {
- struct page *page = alloc_page(GFP_ATOMIC);
-
- if (!page)
- goto partial_error;
- skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
-
- mapping[i + 1] = ib_dma_map_page(priv->ca,
skb_shinfo(skb)->frags[i].page,
- 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
- goto partial_error;
- }
-
- rx_ring[id].skb = skb;
- return skb;
-
-partial_error:
-
- ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
-
- for (; i > 0; --i)
- ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE,
DMA_FROM_DEVICE);
-
- dev_kfree_skb_any(skb);
- return NULL;
-}
-
static void ipoib_cm_free_rx_ring(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring)
{
@@ -196,8 +133,9 @@ static void ipoib_cm_free_rx_ring(struct net_device
*dev,
for (i = 0; i < ipoib_recvq_size; ++i)
if (rx_ring[i].skb) {
- ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
- rx_ring[i].mapping);
+ ipoib_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
+ IPOIB_CM_HEAD_SIZE,
+ rx_ring[i].mapping);
dev_kfree_skb_any(rx_ring[i].skb);
}
@@ -345,8 +283,12 @@ static int ipoib_cm_nonsrq_init_rx(struct
net_device *dev, struct ib_cm_id *cm_i
spin_unlock_irq(&priv->lock);
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
- rx->rx_ring[i].mapping)) {
+ rx->rx_ring[i].skb = ipoib_cm_alloc_rx_skb(dev, i,
+ IPOIB_CM_RX_SG - 1,
+ IPOIB_CM_HEAD_SIZE,
+ 12,
+ rx->rx_ring[i].mapping);
+ if (!rx->rx_ring[i].skb) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
goto err_count;
@@ -480,38 +422,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id,
return 0;
}
}
-/* Adjust length of skb with fragments to match received data */
-static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
- unsigned int length, struct sk_buff *toskb)
-{
- int i, num_frags;
- unsigned int size;
-
- /* put header into skb */
- size = min(length, hdr_space);
- skb->tail += size;
- skb->len += size;
- length -= size;
-
- num_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- if (length == 0) {
- /* don't need this page */
- skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
- --skb_shinfo(skb)->nr_frags;
- } else {
- size = min(length, (unsigned) PAGE_SIZE);
-
- frag->size = size;
- skb->data_len += size;
- skb->truesize += size;
- skb->len += size;
- length -= size;
- }
- }
-}
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
@@ -581,7 +491,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev,
struct ib_wc *wc)
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
- newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
+ newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE,
+ 12, mapping);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
@@ -592,7 +503,10 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev,
struct ib_wc *wc)
goto repost;
}
- ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
+ rx_ring[wr_id].skb = newskb;
+
+ ipoib_dma_unmap_rx(priv, frags, IPOIB_CM_HEAD_SIZE,
+ rx_ring[wr_id].mapping);
memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof
*mapping);
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
@@ -1481,9 +1395,12 @@ int ipoib_cm_dev_init(struct net_device *dev)
if (ipoib_cm_has_srq(dev)) {
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
- priv->cm.num_frags - 1,
- priv->cm.srq_ring[i].mapping)) {
+ priv->cm.srq_ring[i].skb =
+ ipoib_cm_alloc_rx_skb(dev, i,
+ priv->cm.num_frags - 1,
+ IPOIB_CM_HEAD_SIZE, 12,
+ priv->cm.srq_ring[i].mapping);
+ if (!priv->cm.srq_ring[i].skb) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 52bc2bd..c40329f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -116,6 +116,91 @@ static int ipoib_ib_post_receive(struct net_device
*dev, int id)
return ret;
}
+struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+ int id, int frags, int head_size,
+ int pad, u64 *mapping)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ skb = dev_alloc_skb(head_size + pad);
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+ * IPoIB adds a 4 byte header. So we need pad more bytes to align the
+ * IP header to a multiple of 16. For CM mode, you add pad 12,
+ * for UD mode, we add pad 4.
+ */
+ skb_reserve(skb, pad);
+
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, head_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ for (i = 0; i < frags; i++) {
+ struct page *page = alloc_page(GFP_ATOMIC);
+
+ if (!page)
+ goto partial_error;
+ skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+
+ mapping[i + 1] = ib_dma_map_page(priv->ca,
skb_shinfo(skb)->frags[i].page,
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
+ goto partial_error;
+ }
+
+ return skb;
+
+partial_error:
+
+ ib_dma_unmap_single(priv->ca, mapping[0], head_size, DMA_FROM_DEVICE);
+
+ for (; i > 0; --i)
+ ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE,
DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+/* Adjust length of skb with fragments to match received data */
+void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+ unsigned int length, struct sk_buff *toskb)
+{
+ int i, num_frags;
+ unsigned int size;
+
+ /* put header into skb */
+ size = min(length, hdr_space);
+ skb->tail += size;
+ skb->len += size;
+ length -= size;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ if (length == 0) {
+ /* don't need this page */
+ skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
+ --skb_shinfo(skb)->nr_frags;
+ } else {
+ size = min(length, (unsigned) PAGE_SIZE);
+
+ frag->size = size;
+ skb->data_len += size;
+ skb->truesize += size;
+ skb->len += size;
+ length -= size;
+ }
+ }
+}
+
static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
More information about the general
mailing list