[ewg] [PATCH] IPoIB-UD 4K MTU patch against 2.6.24 ofed-1.3-git tree
Shirley Ma
mashirle at us.ibm.com
Mon Feb 4 06:05:25 PST 2008
Tziporet,
This IPoIB 4K MTU patch is built against today's 2.6.24 OFED-1.3-Git
tree. This patch tested before Eli's patch successfully. This rebuilt
patch is on top of Eli's patch. However this constant UD_POST_RCV_COUNT
which is defined in Eli's patch as 16 does impact the behavior this
patch. When I define this as 1, everything works OK, if I change the
value to 8 or bigger, the patch won't work well.
We do see a couple of issues after Eli's patch checks in. So I suggest
to check in the patch. Then we can work together to address these issues
tomorrow. In Eli's patch I would suggest use kzalloc() to alloc 16
ib_sge and ib_recv_wr instead of defining this in ipoib_dev_priv since
it might have some memory issue there. I am working on the patch now to
see any better results.
Vlad,
There would be one line change for backporting regarding priv->stats vs.
dev->stats. If you have any problem to create the backport patch, let me
know. I will ask Nam to help. The attachment is for you to easily apply
the patch, my email might have issues.
Thanks
Shirley
Signed-off-by: Shirley Ma <xma at us.ibm.com>
---
diff -urpN ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib.h
ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib.h
--- ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib.h 2008-02-04
15:45:44.000000000 -0800
+++ ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib.h 2008-02-04
15:40:38.000000000 -0800
@@ -56,11 +56,11 @@
/* constants */
enum {
- IPOIB_PACKET_SIZE = 2048,
- IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
IPOIB_ENCAP_LEN = 4,
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* for 4K MTU */
+
IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header
to 16 */
IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -141,9 +141,9 @@ struct ipoib_mcast {
struct net_device *dev;
};
-struct ipoib_rx_buf {
+struct ipoib_sg_rx_buf {
struct sk_buff *skb;
- u64 mapping;
+ u64 mapping[IPOIB_UD_RX_SG];
};
struct ipoib_tx_buf {
@@ -337,7 +337,7 @@ struct ipoib_dev_priv {
struct net_device *dev;
struct ib_recv_wr rx_wr_draft[UD_POST_RCV_COUNT];
- struct ib_sge sglist_draft[UD_POST_RCV_COUNT];
+ struct ib_sge sglist_draft[UD_POST_RCV_COUNT][IPOIB_UD_RX_SG];
unsigned int rx_outst;
struct napi_struct napi;
@@ -378,7 +378,7 @@ struct ipoib_dev_priv {
unsigned int admin_mtu;
unsigned int mcast_mtu;
- struct ipoib_rx_buf *rx_ring;
+ struct ipoib_sg_rx_buf *rx_ring;
spinlock_t tx_lock;
struct ipoib_tx_buf *tx_ring;
@@ -412,6 +412,7 @@ struct ipoib_dev_priv {
struct ipoib_ethtool_st etool;
struct timer_list poll_timer;
struct ib_ah *own_ah;
+ int max_ib_mtu;
};
struct ipoib_ah {
@@ -452,6 +453,19 @@ struct ipoib_neigh {
struct list_head list;
};
+#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
+static inline int ipoib_ud_need_sg(int ib_mtu)
+{
+ return (IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE) ? 1 : 0;
+}
+static inline void ipoib_sg_dma_unmap_rx(struct ipoib_dev_priv *priv,
+ u64 mapping[IPOIB_UD_RX_SG])
+{
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, mapping[1], PAGE_SIZE, DMA_FROM_DEVICE);
+}
+
/*
* We stash a pointer to our private neighbour information after our
* hardware address in neigh->ha. The ALIGN() expression here makes
diff -urpN ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
--- ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_ib.c 2008-02-04
15:45:44.000000000 -0800
+++ ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_ib.c 2008-02-04
15:40:38.000000000 -0800
@@ -96,14 +96,82 @@ static void clean_pending_receives(struc
for (i = 0; i < priv->rx_outst; ++i) {
id = priv->rx_wr_draft[i].wr_id & ~IPOIB_OP_RECV;
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE,
DMA_FROM_DEVICE);
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ ipoib_sg_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
+ else
+ ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping[0],
+
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
priv->rx_outst = 0;
}
+static void ipoib_ud_skb_put_frags(struct sk_buff *skb, unsigned int
length,
+ struct sk_buff *toskb)
+{
+ unsigned int size;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+ /* put header into skb */
+ size = min(length, (unsigned)IPOIB_UD_HEAD_SIZE);
+ skb->tail += size;
+ skb->len += size;
+ length -= size;
+
+ if (length == 0) {
+ /* don't need this page */
+ skb_fill_page_desc(toskb, 0, frag->page, 0, PAGE_SIZE);
+ --skb_shinfo(skb)->nr_frags;
+ } else {
+ size = min(length, (unsigned) PAGE_SIZE);
+ frag->size = size;
+ skb->data_len += size;
+ skb->truesize += size;
+ skb->len += size;
+ length -= size;
+ }
+}
+
+static struct sk_buff *ipoib_sg_alloc_rx_skb(struct net_device *dev,
+ int id, u64 mapping[IPOIB_UD_RX_SG])
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct page *page;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(IPOIB_UD_HEAD_SIZE);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data,
IPOIB_UD_HEAD_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ goto partial_error;
+
+ skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
+ mapping[1] = ib_dma_map_page(priv->ca,
skb_shinfo(skb)->frags[0].page,
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
+ goto partial_error;
+
+ priv->rx_ring[id].skb = skb;
+ return skb;
+
+partial_error:
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -111,8 +179,11 @@ static int ipoib_ib_post_receive(struct
int ret = 0;
int i = priv->rx_outst;
- priv->sglist_draft[i].addr = priv->rx_ring[id].mapping;
+ priv->sglist_draft[i][0].addr = priv->rx_ring[id].mapping[0];
+ priv->sglist_draft[i][1].addr = priv->rx_ring[id].mapping[1];
+
priv->rx_wr_draft[i].wr_id = id | IPOIB_OP_RECV;
+
if (++priv->rx_outst == UD_POST_RCV_COUNT) {
ret = ib_post_recv(priv->qp, priv->rx_wr_draft, &bad_wr);
@@ -120,8 +191,13 @@ static int ipoib_ib_post_receive(struct
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
while (bad_wr) {
id = bad_wr->wr_id & ~IPOIB_OP_RECV;
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ ipoib_sg_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
+ else
+ ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -138,7 +214,7 @@ static int ipoib_alloc_rx_skb(struct net
struct sk_buff *skb;
u64 addr;
- skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
+ skb = dev_alloc_skb(IPOIB_UD_BUF_SIZE(priv->max_ib_mtu) + 4);
if (!skb)
return -ENOMEM;
@@ -149,7 +225,8 @@ static int ipoib_alloc_rx_skb(struct net
*/
skb_reserve(skb, 4);
- addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
+ addr = ib_dma_map_single(priv->ca, skb->data,
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
dev_kfree_skb_any(skb);
@@ -157,7 +234,7 @@ static int ipoib_alloc_rx_skb(struct net
}
priv->rx_ring[id].skb = skb;
- priv->rx_ring[id].mapping = addr;
+ priv->rx_ring[id].mapping[0] = addr;
return 0;
}
@@ -165,10 +242,15 @@ static int ipoib_alloc_rx_skb(struct net
static int ipoib_ib_post_receives(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- int i;
+ int i, ret;
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (ipoib_alloc_rx_skb(dev, i)) {
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ ret = !(ipoib_sg_alloc_rx_skb(dev, i,
+ priv->rx_ring[i].mapping));
+ else
+ ret = ipoib_alloc_rx_skb(dev, i);
+ if (ret) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
@@ -186,7 +268,7 @@ static void ipoib_ib_handle_rx_wc(struct
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
- u64 addr;
+ u64 mapping[IPOIB_UD_RX_SG];
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -198,42 +280,74 @@ static void ipoib_ib_handle_rx_wc(struct
}
skb = priv->rx_ring[wr_id].skb;
- addr = priv->rx_ring[wr_id].mapping;
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- ipoib_warn(priv, "failed recv event "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
- ib_dma_unmap_single(priv->ca, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- priv->rx_ring[wr_id].skb = NULL;
- return;
- }
+ /* duplicate the code here, to omit fast path if need-sg condition
check */
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ struct sk_buff *newskb;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ ipoib_warn(priv, "failed recv event "
+ "(status=%d, wrid=%d vend_err %x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
+ dev_kfree_skb_any(skb);
+ priv->rx_ring[wr_id].skb = NULL;
+ return;
+ }
+ /*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
+ goto repost;
+ newskb = ipoib_sg_alloc_rx_skb(dev, wr_id, mapping);
+ if (unlikely(!newskb)) {
+ ++dev->stats.rx_dropped;
+ goto repost;
+ }
+ ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ wc->byte_len, wc->slid);
+ ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
+ memcpy(priv->rx_ring[wr_id].mapping, mapping,
+ IPOIB_UD_RX_SG * sizeof *mapping);
+ ipoib_ud_skb_put_frags(skb, wc->byte_len, newskb);
+ } else {
+ u64 addr = priv->rx_ring[wr_id].mapping[0];
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ ipoib_warn(priv, "failed recv event "
+ "(status=%d, wrid=%d vend_err %x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ ib_dma_unmap_single(priv->ca, addr,
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_ring[wr_id].skb = NULL;
+ return;
+ }
- /*
- * Drop packets that this interface sent, ie multicast packets
- * that the HCA has replicated.
- */
- if (unlikely(wc->slid == priv->local_lid && wc->src_qp ==
priv->qp->qp_num))
- goto repost;
+ /*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (unlikely(wc->slid == priv->local_lid && wc->src_qp ==
priv->qp->qp_num))
+ goto repost;
- /*
- * If we can't allocate a new RX buffer, dump
- * this packet and reuse the old buffer.
- */
- if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
- ++dev->stats.rx_dropped;
- goto repost;
- }
+ /*
+ * If we can't allocate a new RX buffer, dump
+ * this packet and reuse the old buffer.
+ */
+ if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ ++dev->stats.rx_dropped;
+ goto repost;
+ }
- ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
- wc->byte_len, wc->slid);
+ ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ wc->byte_len, wc->slid);
- ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, addr,
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE);
- skb_put(skb, wc->byte_len);
+ skb_put(skb, wc->byte_len);
+ }
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -827,18 +941,21 @@ int ipoib_ib_dev_stop(struct net_device
* all our pending work requests.
*/
for (i = 0; i < ipoib_recvq_size; ++i) {
- struct ipoib_rx_buf *rx_req;
+ struct ipoib_sg_rx_buf *rx_req;
rx_req = &priv->rx_ring[i];
-
- if (rx_req->skb) {
+ if (!rx_req->skb)
+ continue;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ ipoib_sg_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
+ else
ib_dma_unmap_single(priv->ca,
- rx_req->mapping,
- IPOIB_BUF_SIZE,
+ rx_req->mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_req->skb);
- rx_req->skb = NULL;
- }
+ dev_kfree_skb_any(rx_req->skb);
+ rx_req->skb = NULL;
}
goto timeout;
diff -urpN ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_main.c
ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_main.c
--- ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_main.c 2008-02-04
15:45:44.000000000 -0800
+++ ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_main.c 2008-02-04
15:40:38.000000000 -0800
@@ -193,7 +193,7 @@ static int ipoib_change_mtu(struct net_d
return 0;
}
- if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+ if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
@@ -1007,10 +1007,6 @@ static void ipoib_setup(struct net_devic
dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
- /* MTU will be reset when mcast join happens */
- dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
- priv->mcast_mtu = priv->admin_mtu = dev->mtu;
-
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
netif_carrier_off(dev);
@@ -1156,6 +1152,7 @@ static struct net_device *ipoib_add_port
struct ib_device *hca, u8 port)
{
struct ipoib_dev_priv *priv;
+ struct ib_port_attr attr;
int result = -ENOMEM;
priv = ipoib_intf_alloc(format);
@@ -1166,6 +1163,18 @@ static struct net_device *ipoib_add_port
priv->dev->features |= NETIF_F_HIGHDMA;
+ if (!ib_query_port(hca, port, &attr))
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ else {
+ printk(KERN_WARNING "%s: ib_query_port %d failed\n",
+ hca->name, port);
+ goto device_init_failed;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff -urpN ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
--- ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2008-02-04
15:45:44.000000000 -0800
+++ ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2008-02-04
15:40:38.000000000 -0800
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_s
return;
}
- priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
- IPOIB_ENCAP_LEN;
+ priv->mcast_mtu =
IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
if (!ipoib_cm_admin_enabled(dev))
dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
diff -urpN ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
--- ofed_1_3_a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c 2008-02-04
15:45:44.000000000 -0800
+++ ofed_1_3_b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c 2008-02-04
15:40:38.000000000 -0800
@@ -151,7 +151,7 @@ int ipoib_transport_dev_init(struct net_
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
.max_send_sge = dev->features & NETIF_F_SG ? MAX_SKB_FRAGS + 1 : 1,
- .max_recv_sge = 1
+ .max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_REQ_WR,
.qp_type = IB_QPT_UD,
@@ -225,18 +225,29 @@ int ipoib_transport_dev_init(struct net_
priv->tx_wr.opcode = IB_WR_SEND;
priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
-
+
for (i = 0; i < UD_POST_RCV_COUNT; ++i) {
- priv->sglist_draft[i].length = IPOIB_BUF_SIZE;
- priv->sglist_draft[i].lkey = priv->mr->lkey;
-
- priv->rx_wr_draft[i].sg_list = &priv->sglist_draft[i];
- priv->rx_wr_draft[i].num_sge = 1;
- if (i < UD_POST_RCV_COUNT - 1)
- priv->rx_wr_draft[i].next = &priv->rx_wr_draft[i + 1];
+ priv->sglist_draft[i][0].lkey = priv->mr->lkey;
+ priv->sglist_draft[i][1].lkey = priv->mr->lkey;
+ priv->rx_wr_draft[i].sg_list = &priv->sglist_draft[i][0];
}
+ if (i < UD_POST_RCV_COUNT - 1)
+ priv->rx_wr_draft[i].next = &priv->rx_wr_draft[i + 1];
priv->rx_wr_draft[i].next = NULL;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ for (i = 0; i < UD_POST_RCV_COUNT; ++i) {
+ priv->sglist_draft[i][0].length = IPOIB_UD_HEAD_SIZE;
+ priv->sglist_draft[i][1].length = PAGE_SIZE;
+ priv->rx_wr_draft[i].num_sge = IPOIB_UD_RX_SG;
+ }
+ } else {
+ for (i = 0; i < UD_POST_RCV_COUNT; ++i) {
+ priv->sglist_draft[i][0].length =
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr_draft[i].num_sge = 1;
+ }
+ }
+
return 0;
out_free_scq:
-------------- next part --------------
A non-text attachment was scrubbed...
Name: ipoib-4kmtu-sg-2.6.24-rc3.patch
Type: text/x-patch
Size: 16759 bytes
Desc: not available
URL: <http://lists.openfabrics.org/pipermail/ewg/attachments/20080204/7a08b488/attachment.bin>
More information about the ewg
mailing list