[ofa-general] [V2][PATCH 3/3] ib/ipoib: IPoIB-UD RX S/G support
Shirley Ma
mashirle at us.ibm.com
Sat Feb 2 04:39:17 PST 2008
This patch keeps existing 2K MTU IPoIB-UD implemenation to be used by
both 2K MTU and no S/G 4K MTU. 4K MTU RX S/G is needed when necessary.
Signed-off-by: Shirley Ma <xma at us.ibm.com>
---
drivers/infiniband/ulp/ipoib/ipoib.h | 28 ++++-----
drivers/infiniband/ulp/ipoib/ipoib_cm.c | 10 ++--
drivers/infiniband/ulp/ipoib/ipoib_ib.c | 108
++++++++++++++++++++++---------
3 files changed, 95 insertions(+), 51 deletions(-)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h
b/drivers/infiniband/ulp/ipoib/ipoib.h
index 004a80b..6c33d7d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -56,9 +56,6 @@
/* constants */
enum {
- IPOIB_PACKET_SIZE = 2048,
- IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
IPOIB_ENCAP_LEN = 4,
IPOIB_MAX_IB_MTU = 4096,
@@ -140,12 +137,7 @@ struct ipoib_mcast {
struct net_device *dev;
};
-struct ipoib_rx_buf {
- struct sk_buff *skb;
- u64 mapping;
-};
-
-struct ipoib_cm_rx_buf {
+struct ipoib_sg_rx_buf {
struct sk_buff *skb;
u64 mapping[IPOIB_CM_RX_SG];
};
@@ -198,7 +190,7 @@ enum ipoib_cm_state {
struct ipoib_cm_rx {
struct ib_cm_id *id;
struct ib_qp *qp;
- struct ipoib_cm_rx_buf *rx_ring;
+ struct ipoib_sg_rx_buf *rx_ring;
struct list_head list;
struct net_device *dev;
unsigned long jiffies;
@@ -223,7 +215,7 @@ struct ipoib_cm_tx {
struct ipoib_cm_dev_priv {
struct ib_srq *srq;
- struct ipoib_cm_rx_buf *srq_ring;
+ struct ipoib_sg_rx_buf *srq_ring;
struct ib_cm_id *id;
struct list_head passive_ids; /* state: LIVE */
struct list_head rx_error_list; /* state: ERROR */
@@ -294,7 +286,7 @@ struct ipoib_dev_priv {
unsigned int admin_mtu;
unsigned int mcast_mtu;
- struct ipoib_rx_buf *rx_ring;
+ struct ipoib_sg_rx_buf *rx_ring;
spinlock_t tx_lock;
struct ipoib_tx_buf *tx_ring;
@@ -367,10 +359,14 @@ struct ipoib_neigh {
};
#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
/* padding to align IP header */
-#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES + 4)
-#define IPOIB_UD_HEAD_SIZE(ib_mtu) (IPOIB_UD_BUF_SIZE(ib_mtu)) %
PAGE_SIZE
-#define IPOIB_UD_RX_SG(ib_mtu) ALIGN(IPOIB_UD_BUF_SIZE(ib_mtu),
PAGE_SIZE) / PAGE_SIZE
+#define IPOIB_UD_HEAD_SIZE(ib_mtu) (IPOIB_UD_BUF_SIZE(ib_mtu) + 4) %
PAGE_SIZE
+#define IPOIB_UD_RX_SG(ib_mtu) ALIGN(IPOIB_UD_BUF_SIZE(ib_mtu) + 4,
PAGE_SIZE) / PAGE_SIZE
+static inline int ipoib_ud_need_sg(int ib_mtu)
+{
+ return (IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE) ? 1 : 0;
+}
/*
* We stash a pointer to our private neighbour information after our
@@ -473,7 +469,7 @@ int ipoib_pkey_dev_delay_open(struct net_device
*dev);
void ipoib_drain_cq(struct net_device *dev);
void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
unsigned int length, struct sk_buff *toskb);
-struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+struct sk_buff *ipoib_alloc_sg_rx_skb(struct net_device *dev,
int id, int frags, int head_size,
int pad, u64 *mapping);
static void inline ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, int
frags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2c2c6b2..4667f70 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -126,7 +126,7 @@ static int ipoib_cm_post_receive_nonsrq(struct
net_device *dev,
}
static void ipoib_cm_free_rx_ring(struct net_device *dev,
- struct ipoib_cm_rx_buf *rx_ring)
+ struct ipoib_sg_rx_buf *rx_ring)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i;
@@ -283,7 +283,7 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device
*dev, struct ib_cm_id *cm_i
spin_unlock_irq(&priv->lock);
for (i = 0; i < ipoib_recvq_size; ++i) {
- rx->rx_ring[i].skb = ipoib_cm_alloc_rx_skb(dev, i,
+ rx->rx_ring[i].skb = ipoib_alloc_sg_rx_skb(dev, i,
IPOIB_CM_RX_SG - 1,
IPOIB_CM_HEAD_SIZE,
12,
@@ -426,7 +426,7 @@ static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id,
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_cm_rx_buf *rx_ring;
+ struct ipoib_sg_rx_buf *rx_ring;
unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
struct sk_buff *skb, *newskb;
struct ipoib_cm_rx *p;
@@ -491,7 +491,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev,
struct ib_wc *wc)
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
- newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE,
+ newskb = ipoib_alloc_sg_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE,
12, mapping);
if (unlikely(!newskb)) {
/*
@@ -1396,7 +1396,7 @@ int ipoib_cm_dev_init(struct net_device *dev)
if (ipoib_cm_has_srq(dev)) {
for (i = 0; i < ipoib_recvq_size; ++i) {
priv->cm.srq_ring[i].skb =
- ipoib_cm_alloc_rx_skb(dev, i,
+ ipoib_alloc_sg_rx_skb(dev, i,
priv->cm.num_frags - 1,
IPOIB_CM_HEAD_SIZE, 12,
priv->cm.srq_ring[i].mapping);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index c40329f..dcdb042 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -95,8 +95,8 @@ static int ipoib_ib_post_receive(struct net_device
*dev, int id)
struct ib_recv_wr *bad_wr;
int ret;
- list.addr = priv->rx_ring[id].mapping;
- list.length = IPOIB_BUF_SIZE;
+ list.addr = priv->rx_ring[id].mapping[0];
+ list.length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
list.lkey = priv->mr->lkey;
param.next = NULL;
@@ -104,19 +104,29 @@ static int ipoib_ib_post_receive(struct net_device
*dev, int id)
param.sg_list = &list;
param.num_sge = 1;
- ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
- if (unlikely(ret)) {
- ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_ring[id].skb);
- priv->rx_ring[id].skb = NULL;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
+ ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[id].mapping);
+ dev_kfree_skb_any(priv->rx_ring[id].skb);
+ priv->rx_ring[id].skb = NULL;
+ } else {
+ ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
+ ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_ring[id].skb);
+ priv->rx_ring[id].skb = NULL;
+ }
}
return ret;
}
-struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+struct sk_buff *ipoib_alloc_sg_rx_skb(struct net_device *dev,
int id, int frags, int head_size,
int pad, u64 *mapping)
{
@@ -207,7 +217,7 @@ static int ipoib_alloc_rx_skb(struct net_device
*dev, int id)
struct sk_buff *skb;
u64 addr;
- skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
+ skb = dev_alloc_skb(IPOIB_UD_BUF_SIZE(priv->max_ib_mtu) + 4);
if (!skb)
return -ENOMEM;
@@ -218,7 +228,7 @@ static int ipoib_alloc_rx_skb(struct net_device
*dev, int id)
*/
skb_reserve(skb, 4);
- addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
+ addr = ib_dma_map_single(priv->ca, skb->data,
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
dev_kfree_skb_any(skb);
@@ -226,7 +236,7 @@ static int ipoib_alloc_rx_skb(struct net_device
*dev, int id)
}
priv->rx_ring[id].skb = skb;
- priv->rx_ring[id].mapping = addr;
+ priv->rx_ring[id].mapping[0] = addr;
return 0;
}
@@ -237,7 +247,17 @@ static int ipoib_ib_post_receives(struct net_device
*dev)
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (ipoib_alloc_rx_skb(dev, i)) {
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ priv->rx_ring[i].skb
+ = ipoib_alloc_sg_rx_skb(dev, i,
+ IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 4,
+ priv->rx_ring[i].mapping);
+ if (!priv->rx_ring[i].skb) {
+ ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+ return -ENOMEM;
+ }
+ } else if (ipoib_alloc_rx_skb(dev, i)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
@@ -254,8 +274,10 @@ static void ipoib_ib_handle_rx_wc(struct net_device
*dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
- struct sk_buff *skb;
- u64 addr;
+ struct sk_buff *skb, *newskb = NULL;
+ u64 mapping[IPOIB_UD_RX_SG(priv->max_ib_mtu)];
+ int frags = 0;
+ u64 addr = 0;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -267,15 +289,21 @@ static void ipoib_ib_handle_rx_wc(struct
net_device *dev, struct ib_wc *wc)
}
skb = priv->rx_ring[wr_id].skb;
- addr = priv->rx_ring[wr_id].mapping;
+ if (!ipoib_ud_need_sg(priv->max_ib_mtu))
+ addr = priv->rx_ring[wr_id].mapping[0];
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- ib_dma_unmap_single(priv->ca, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[wr_id].mapping);
+ else
+ ib_dma_unmap_single(priv->ca, addr,
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
@@ -292,17 +320,32 @@ static void ipoib_ib_handle_rx_wc(struct
net_device *dev, struct ib_wc *wc)
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
- if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
+ (unsigned)(IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu)))) /
PAGE_SIZE;
+ newskb = ipoib_alloc_sg_rx_skb(dev, wr_id, frags,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 4, mapping);
+ priv->rx_ring[wr_id].skb = newskb;
+ if (unlikely(newskb)) {
+ ++dev->stats.rx_dropped;
+ goto repost;
+ }
+ } else if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
++dev->stats.rx_dropped;
goto repost;
}
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
-
- ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
-
- skb_put(skb, wc->byte_len);
+
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ ipoib_dma_unmap_rx(priv, frags, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[wr_id].mapping);
+ skb_put_frags(skb, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
wc->byte_len, newskb);
+ } else {
+ ib_dma_unmap_single(priv->ca, addr,
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE);
+ skb_put(skb, wc->byte_len);
+ }
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -710,15 +753,20 @@ int ipoib_ib_dev_stop(struct net_device *dev, int
flush)
}
for (i = 0; i < ipoib_recvq_size; ++i) {
- struct ipoib_rx_buf *rx_req;
-
+ struct ipoib_sg_rx_buf *rx_req;
rx_req = &priv->rx_ring[i];
if (!rx_req->skb)
continue;
- ib_dma_unmap_single(priv->ca,
- rx_req->mapping,
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ ipoib_dma_unmap_rx(priv,
+ IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+ IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+ priv->rx_ring[i].mapping);
+ else
+ ib_dma_unmap_single(priv->ca,
+ rx_req->mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL;
}
More information about the general
mailing list