[ofa-general] ***SPAM*** Fw: [Final][PATCH] IPoIB-4K MTU patch
Shirley Ma
xma at us.ibm.com
Wed Feb 6 10:33:03 PST 2008
Hello Eli,
FYI. In case you didn't receive these emails on time. You are welcome
to create a patch on top of it, like use __skb_put to replace +size in
ipoib_ud_sg_put_frags().
-------------------------------------------------------------------------------------------
Nam and Stefan have helped out in the backporting while I am
concentrate on stress testing against 2.6.24 kernel, (20 duplex streams
over one port testing against mthca for 2K mtu, it has been running over 8
hours). What we have validated this patch on (build, sniff test, flood
ping) are 2.6.16 - 2.6.24 kernel, RHEL4.5, RHEL4.6, RHEL5.1 and SLES10SP1&
the derivative version of SLES 10 SP1.
Below attachment is the backport patch. I reattach the patch file
here for your convenient. The backport patch file
ipoib_0100_to_2.6.21.patch needs to be copied into below dir:
./kernel_patches/attic/backport/2.6.9_U2/ipoib_0100_to_2.6.21.patch
./kernel_patches/attic/backport/2.6.9_U3/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.18-EL5.1/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.16_sles10/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.9_U4/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.9_U5/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.9_U6/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.18_suse10_2/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.16_sles10_sp1/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.11/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.12/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.13/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.14/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.15/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.16/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.17/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.18/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.19/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.20/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.21/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.13_suse10_0_u/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.15_ubuntu606/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.11_FC4/ipoib_0100_to_2.6.21.patch
./kernel_patches/backport/2.6.18_FC6/ipoib_0100_to_2.6.21.patch
Shirley
----- Forwarded by Shirley Ma/Beaverton/IBM on 02/06/08 12:28 AM -----
Shirley
Ma/Beavert
on/IBM To
tziporet at dev.mellanox.co.il, "Vladimir
02/05/08 Sokolovsky (Mellanox)"
01:33 PM <vlad at lists.openfabrics.org>
cc
eli at mellanox.co.il
Subject
[Final][PATCH] IPoIB-4K MTU patch(Document
link: Shirley Ma)
Hello, below is the final patch based on Eli's review comments. Thanks Eli
for all of your work.
This patch has been validated on 2.6.24 kernel, SLES10 on both intel/mthca
and ppc/mthca. I am working on RHEL5 testing. The backport patch will be
provided tonight. Hopefully Nam could help me on this. I will continue to
let the stress test going on different of subnets. I hopefully these is
nothing changed in ofed-1.3bit today. So the patch can be applied cleanly.
If not, let me know. Please use attachment for applying patch since my
notes has problem.
Thanks
Shirley
------------
This patch is enabling IPoIB 4K MTU support. When PAGE_SIZE is greater than
IB MTU size + GRH + IPoIB head, there is no need for RX S/G. When it's smaller
two buffers are allocated, one buffer is GRH+IPoIB header, one buffer is for
IPoIB payload.
Signed-off-by: Shirley Ma <xma at us.ibm.com>
---
diff -urpN ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib.h ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib.h
--- ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib.h 2008-02-04 20:09:18.000000000 -0800
+++ ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib.h 2008-02-05 12:20:46.000000000 -0800
@@ -56,11 +56,11 @@
/* constants */
enum {
- IPOIB_PACKET_SIZE = 2048,
- IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
IPOIB_ENCAP_LEN = 4,
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* for 4K MTU */
+
IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -141,9 +141,9 @@ struct ipoib_mcast {
struct net_device *dev;
};
-struct ipoib_rx_buf {
+struct ipoib_sg_rx_buf {
struct sk_buff *skb;
- u64 mapping;
+ u64 mapping[IPOIB_UD_RX_SG];
};
struct ipoib_tx_buf {
@@ -337,7 +337,7 @@ struct ipoib_dev_priv {
struct net_device *dev;
struct ib_recv_wr rx_wr_draft[UD_POST_RCV_COUNT];
- struct ib_sge sglist_draft[UD_POST_RCV_COUNT];
+ struct ib_sge sglist_draft[UD_POST_RCV_COUNT][IPOIB_UD_RX_SG];
unsigned int rx_outst;
struct napi_struct napi;
@@ -378,7 +378,7 @@ struct ipoib_dev_priv {
unsigned int admin_mtu;
unsigned int mcast_mtu;
- struct ipoib_rx_buf *rx_ring;
+ struct ipoib_sg_rx_buf *rx_ring;
spinlock_t tx_lock;
struct ipoib_tx_buf *tx_ring;
@@ -412,6 +412,7 @@ struct ipoib_dev_priv {
struct ipoib_ethtool_st etool;
struct timer_list poll_timer;
struct ib_ah *own_ah;
+ int max_ib_mtu;
};
struct ipoib_ah {
@@ -452,6 +453,22 @@ struct ipoib_neigh {
struct list_head list;
};
+#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
+static inline int ipoib_ud_need_sg(int ib_mtu)
+{
+ return (IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE) ? 1 : 0;
+}
+static inline void ipoib_sg_dma_unmap_rx(struct ipoib_dev_priv *priv,
+ u64 mapping[IPOIB_UD_RX_SG])
+{
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE, DMA_FROM_DEVICE);
+ } else
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE);
+}
+
/*
* We stash a pointer to our private neighbour information after our
* hardware address in neigh->ha. The ALIGN() expression here makes
diff -urpN ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_ib.c ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
--- ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_ib.c 2008-02-04 20:09:18.000000000 -0800
+++ ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_ib.c 2008-02-05 12:20:40.000000000 -0800
@@ -96,14 +96,37 @@ static void clean_pending_receives(struc
for (i = 0; i < priv->rx_outst; ++i) {
id = priv->rx_wr_draft[i].wr_id & ~IPOIB_OP_RECV;
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_sg_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
priv->rx_outst = 0;
}
+static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, struct sk_buff *skb,
+ unsigned int length)
+{
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ unsigned int size;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+ /* put header into skb */
+ size = min(length, (unsigned)IPOIB_UD_HEAD_SIZE);
+ skb->tail += size;
+ skb->len += size;
+ length -= size;
+
+ size = min(length, (unsigned) PAGE_SIZE);
+ frag->size = size;
+ skb->data_len += size;
+ skb->truesize += size;
+ skb->len += size;
+ length -= size;
+ } else
+ skb_put(skb, length);
+}
+
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -111,8 +134,11 @@ static int ipoib_ib_post_receive(struct
int ret = 0;
int i = priv->rx_outst;
- priv->sglist_draft[i].addr = priv->rx_ring[id].mapping;
+ priv->sglist_draft[i][0].addr = priv->rx_ring[id].mapping[0];
+ priv->sglist_draft[i][1].addr = priv->rx_ring[id].mapping[1];
+
priv->rx_wr_draft[i].wr_id = id | IPOIB_OP_RECV;
+
if (++priv->rx_outst == UD_POST_RCV_COUNT) {
ret = ib_post_recv(priv->qp, priv->rx_wr_draft, &bad_wr);
@@ -120,8 +146,8 @@ static int ipoib_ib_post_receive(struct
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
while (bad_wr) {
id = bad_wr->wr_id & ~IPOIB_OP_RECV;
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_sg_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -132,16 +158,23 @@ static int ipoib_ib_post_receive(struct
return ret;
}
-static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
+static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id,
+ u64 mapping[IPOIB_UD_RX_SG])
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
- u64 addr;
+ int buf_size;
- skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
- if (!skb)
- return -ENOMEM;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ buf_size = IPOIB_UD_HEAD_SIZE;
+ else
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ skb = dev_alloc_skb(buf_size + 4);
+
+ if (unlikely(!skb))
+ return NULL;
+
/*
* IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
* header. So we need 4 more bytes to get to 48 and align the
@@ -149,17 +182,32 @@ static int ipoib_alloc_rx_skb(struct net
*/
skb_reserve(skb, 4);
- addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
- dev_kfree_skb_any(skb);
- return -EIO;
- }
-
- priv->rx_ring[id].skb = skb;
- priv->rx_ring[id].mapping = addr;
-
- return 0;
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ struct page *page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ goto partial_error;
+
+ skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
+ mapping[1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
+ goto partial_error;
+ }
+
+ priv->rx_ring[id].skb = skb;
+ return skb;
+
+partial_error:
+ ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return NULL;
}
static int ipoib_ib_post_receives(struct net_device *dev)
@@ -168,7 +216,7 @@ static int ipoib_ib_post_receives(struct
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (ipoib_alloc_rx_skb(dev, i)) {
+ if (!ipoib_alloc_rx_skb(dev, i, priv->rx_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
@@ -186,7 +234,7 @@ static void ipoib_ib_handle_rx_wc(struct
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
- u64 addr;
+ u64 mapping[IPOIB_UD_RX_SG];
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -198,42 +246,38 @@ static void ipoib_ib_handle_rx_wc(struct
}
skb = priv->rx_ring[wr_id].skb;
- addr = priv->rx_ring[wr_id].mapping;
+ /* duplicate the code here, to omit fast path if need-sg condition check */
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- ib_dma_unmap_single(priv->ca, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
}
-
/*
* Drop packets that this interface sent, ie multicast packets
* that the HCA has replicated.
*/
- if (unlikely(wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num))
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
goto repost;
-
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
- if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id, mapping))) {
++dev->stats.rx_dropped;
goto repost;
}
-
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
-
- ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
-
- skb_put(skb, wc->byte_len);
+ ipoib_sg_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
+ ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
+ memcpy(priv->rx_ring[wr_id].mapping, mapping,
+ IPOIB_UD_RX_SG * sizeof *mapping);
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -827,18 +871,15 @@ int ipoib_ib_dev_stop(struct net_device
* all our pending work requests.
*/
for (i = 0; i < ipoib_recvq_size; ++i) {
- struct ipoib_rx_buf *rx_req;
+ struct ipoib_sg_rx_buf *rx_req;
rx_req = &priv->rx_ring[i];
-
- if (rx_req->skb) {
- ib_dma_unmap_single(priv->ca,
- rx_req->mapping,
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_req->skb);
- rx_req->skb = NULL;
- }
+ if (!rx_req->skb)
+ continue;
+ ipoib_sg_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
+ dev_kfree_skb_any(rx_req->skb);
+ rx_req->skb = NULL;
}
goto timeout;
diff -urpN ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_main.c ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_main.c
--- ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_main.c 2008-02-04 20:09:18.000000000 -0800
+++ ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_main.c 2008-02-05 12:20:40.000000000 -0800
@@ -193,7 +193,7 @@ static int ipoib_change_mtu(struct net_d
return 0;
}
- if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+ if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
@@ -1007,10 +1007,6 @@ static void ipoib_setup(struct net_devic
dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
- /* MTU will be reset when mcast join happens */
- dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
- priv->mcast_mtu = priv->admin_mtu = dev->mtu;
-
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
netif_carrier_off(dev);
@@ -1156,6 +1152,7 @@ static struct net_device *ipoib_add_port
struct ib_device *hca, u8 port)
{
struct ipoib_dev_priv *priv;
+ struct ib_port_attr attr;
int result = -ENOMEM;
priv = ipoib_intf_alloc(format);
@@ -1166,6 +1163,18 @@ static struct net_device *ipoib_add_port
priv->dev->features |= NETIF_F_HIGHDMA;
+ if (!ib_query_port(hca, port, &attr))
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ else {
+ printk(KERN_WARNING "%s: ib_query_port %d failed\n",
+ hca->name, port);
+ goto device_init_failed;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff -urpN ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
--- ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2008-02-04 15:31:14.000000000 -0800
+++ ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2008-02-05 12:20:40.000000000 -0800
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_s
return;
}
- priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
- IPOIB_ENCAP_LEN;
+ priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
if (!ipoib_cm_admin_enabled(dev))
dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
diff -urpN ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
--- ofed_kernel_a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c 2008-02-04 20:09:18.000000000 -0800
+++ ofed_kernel_b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c 2008-02-05 12:20:40.000000000 -0800
@@ -151,7 +151,7 @@ int ipoib_transport_dev_init(struct net_
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
.max_send_sge = dev->features & NETIF_F_SG ? MAX_SKB_FRAGS + 1 : 1,
- .max_recv_sge = 1
+ .max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_REQ_WR,
.qp_type = IB_QPT_UD,
@@ -225,18 +225,29 @@ int ipoib_transport_dev_init(struct net_
priv->tx_wr.opcode = IB_WR_SEND;
priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
-
+
for (i = 0; i < UD_POST_RCV_COUNT; ++i) {
- priv->sglist_draft[i].length = IPOIB_BUF_SIZE;
- priv->sglist_draft[i].lkey = priv->mr->lkey;
-
- priv->rx_wr_draft[i].sg_list = &priv->sglist_draft[i];
- priv->rx_wr_draft[i].num_sge = 1;
+ priv->sglist_draft[i][0].lkey = priv->mr->lkey;
+ priv->sglist_draft[i][1].lkey = priv->mr->lkey;
+ priv->rx_wr_draft[i].sg_list = &priv->sglist_draft[i][0];
if (i < UD_POST_RCV_COUNT - 1)
priv->rx_wr_draft[i].next = &priv->rx_wr_draft[i + 1];
}
priv->rx_wr_draft[i].next = NULL;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ for (i = 0; i < UD_POST_RCV_COUNT; ++i) {
+ priv->sglist_draft[i][0].length = IPOIB_UD_HEAD_SIZE;
+ priv->sglist_draft[i][1].length = PAGE_SIZE;
+ priv->rx_wr_draft[i].num_sge = IPOIB_UD_RX_SG;
+ }
+ } else {
+ for (i = 0; i < UD_POST_RCV_COUNT; ++i) {
+ priv->sglist_draft[i][0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr_draft[i].num_sge = 1;
+ }
+ }
+
return 0;
out_free_scq:
(See attached file: ipoib-new-4kmtu.patch)
Shirley
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20080206/1210838d/attachment.html>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: pic28698.gif
Type: image/gif
Size: 1972 bytes
Desc: not available
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20080206/1210838d/attachment.gif>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: ecblank.gif
Type: image/gif
Size: 45 bytes
Desc: not available
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20080206/1210838d/attachment-0001.gif>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: doclink.gif
Type: image/gif
Size: 149 bytes
Desc: not available
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20080206/1210838d/attachment-0002.gif>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: ipoib-new-4kmtu.patch
Type: application/octet-stream
Size: 14679 bytes
Desc: not available
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20080206/1210838d/attachment.obj>
More information about the general
mailing list