[ofa-general] [PATCH 3/3] ib/ipoib: IPoIB-UD RX S/G support
Shirley Ma
mashirle at us.ibm.com
Wed Jan 30 12:30:08 PST 2008
Signed-off-by: Shirley Ma <xma at us.ibm.com>
---
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h
b/drivers/infiniband/ulp/ipoib/ipoib.h
index 65b1159..969955e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -463,9 +463,9 @@ int ipoib_pkey_dev_delay_open(struct net_device
*dev);
void ipoib_drain_cq(struct net_device *dev);
void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
unsigned int length, struct sk_buff *toskb);
-struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
- int id, int frags, int head_size,
- int pad, u64 *mapping);
+struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev,
+ int id, int frags, int head_size,
+ int pad, u64 *mapping);
void inline ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
int head_size, u64 *mapping)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c7d42ea..a9af796 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -283,11 +283,10 @@ static int ipoib_cm_nonsrq_init_rx(struct
net_device *dev, struct ib_cm_id *cm_i
spin_unlock_irq(&priv->lock);
for (i = 0; i < ipoib_recvq_size; ++i) {
- rx->rx_ring[i].skb = ipoib_cm_alloc_rx_skb(dev, i,
- IPOIB_CM_RX_SG - 1,
- IPOIB_CM_HEAD_SIZE,
- 12,
- rx->rx_ring[i].mapping);
+ rx->rx_ring[i].skb = ipoib_alloc_rx_skb(dev, i,
+ IPOIB_CM_RX_SG - 1,
+ IPOIB_CM_HEAD_SIZE, 12,
+ rx->rx_ring[i].mapping);
if (!rx->rx_ring[i].skb) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
@@ -491,8 +490,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev,
struct ib_wc *wc)
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
- newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE,
- 12, mapping);
+ newskb = ipoib_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE,
+ 12, mapping);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
@@ -1396,10 +1395,10 @@ int ipoib_cm_dev_init(struct net_device *dev)
if (ipoib_cm_has_srq(dev)) {
for (i = 0; i < ipoib_recvq_size; ++i) {
priv->cm.srq_ring[i].skb =
- ipoib_cm_alloc_rx_skb(dev, i,
- priv->cm.num_frags - 1,
- IPOIB_CM_HEAD_SIZE, 12,
- priv->cm.srq_ring[i].mapping);
+ ipoib_alloc_rx_skb(dev, i,
+ priv->cm.num_frags - 1,
+ IPOIB_CM_HEAD_SIZE, 12,
+ priv->cm.srq_ring[i].mapping);
if (!priv->cm.srq_ring[i].skb) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 138c758..d6967ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -90,25 +90,18 @@ void ipoib_free_ah(struct kref *kref)
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_sge list;
- struct ib_recv_wr param;
struct ib_recv_wr *bad_wr;
int ret;
- list.addr = priv->rx_ring[id].mapping;
- list.length = IPOIB_BUF_SIZE;
- list.lkey = priv->mr->lkey;
-
- param.next = NULL;
- param.wr_id = id | IPOIB_OP_RECV;
- param.sg_list = &list;
- param.num_sge = 1;
-
- ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
+ priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+ for (i = 0; i < IPOIB_UD_RX_SG(priv->max_ib_mtu); ++i)
+ priv->rx_sge[i].addr = priv->rx_ring[id].mapping[i];
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[id].mapping);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -116,9 +109,9 @@ static int ipoib_ib_post_receive(struct net_device
*dev, int id)
return ret;
}
-static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
- int id, int frags, int head_size,
- int pad, u64 mapping)
+struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev,
+ int id, int frags, int head_size,
+ int pad, u64 mapping)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
@@ -201,43 +194,17 @@ static void skb_put_frags(struct sk_buff *skb,
unsigned int hdr_space,
}
}
-static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct sk_buff *skb;
- u64 addr;
-
- skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
- if (!skb)
- return -ENOMEM;
-
- /*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
- */
- skb_reserve(skb, 4);
-
- addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
- dev_kfree_skb_any(skb);
- return -EIO;
- }
-
- priv->rx_ring[id].skb = skb;
- priv->rx_ring[id].mapping = addr;
-
- return 0;
-}
-
static int ipoib_ib_post_receives(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (ipoib_alloc_rx_skb(dev, i)) {
+ priv->rx_ring[i].skb = ipoib_alloc_rx_skb(dev, i,
+ IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 4,
+ priv->rx_ring[i].mapping);
+ if (!priv->rx_ring[i].skb) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
@@ -254,8 +221,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device
*dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
- struct sk_buff *skb;
+ struct sk_buff *skb, *newskb;
u64 addr;
+ int frags;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -267,15 +235,15 @@ static void ipoib_ib_handle_rx_wc(struct
net_device *dev, struct ib_wc *wc)
}
skb = priv->rx_ring[wr_id].skb;
- addr = priv->rx_ring[wr_id].mapping;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- ib_dma_unmap_single(priv->ca, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
@@ -288,11 +256,18 @@ static void ipoib_ib_handle_rx_wc(struct
net_device *dev, struct ib_wc *wc)
if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
goto repost;
+ frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
+ (unsigned)IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu))) /
PAGE_SIZE;
+ newskb = ipoib_alloc_rx_skb(dev, wr_id, frags,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ 4, mapping);
+
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
- if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ if (unlikely(!newskb)) {
+ ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
++dev->stats.rx_dropped;
goto repost;
}
@@ -300,9 +275,12 @@ static void ipoib_ib_handle_rx_wc(struct net_device
*dev, struct ib_wc *wc)
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
- ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_dma_unmap_rx(priv, frags, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[wr_id].mapping);
+ memcpy(priv->rx_ring[wr_id].mapping, mapping,
+ (frags + 1) * sizeof *mapping);
- skb_put(skb, wc->byte_len);
+ skb_put_frags(skb, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), wc->byte_len,
newskb);
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -715,10 +693,10 @@ int ipoib_ib_dev_stop(struct net_device *dev, int
flush)
rx_req = &priv->rx_ring[i];
if (!rx_req->skb)
continue;
- ib_dma_unmap_single(priv->ca,
- rx_req->mapping,
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
+ ipoib_dma_unmap_rx(priv,
+ IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[i].mapping);
dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL;
}
More information about the general
mailing list