[ofa-general] [PATCH] sdp: fix bad credits advertised when connection initiated
Amir Vadai
amirv at mellanox.co.il
Wed Jul 1 09:44:59 PDT 2009
This enables removing the ugly post credits after connection
establishment
Signed-off-by: Amir Vadai <amirv at mellanox.co.il>
---
drivers/infiniband/ulp/sdp/sdp.h | 1 -
drivers/infiniband/ulp/sdp/sdp_bcopy.c | 15 ----
drivers/infiniband/ulp/sdp/sdp_cma.c | 8 +--
.../2.6.16/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.17/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.18/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.19/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.20/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.21/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.22/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.23/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.24/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.9_U4/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.9_U5/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.9_U6/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
.../2.6.9_U7/sdp_0090_revert_to_2_6_24.patch | 77 +++++++++-----------
25 files changed, 751 insertions(+), 967 deletions(-)
diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
index f9c2421..7f197d2 100644
--- a/drivers/infiniband/ulp/sdp/sdp.h
+++ b/drivers/infiniband/ulp/sdp/sdp.h
@@ -334,7 +334,6 @@ void sdp_reset(struct sock *sk);
void sdp_reset_sk(struct sock *sk, int rc);
void sdp_completion_handler(struct ib_cq *cq, void *cq_context);
void sdp_work(struct work_struct *work);
-int sdp_post_credits(struct sdp_sock *ssk);
void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid);
void sdp_post_recvs(struct sdp_sock *ssk);
int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq);
diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
index 475d7c3..a090868 100644
--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
@@ -488,21 +488,6 @@ static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb)
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH);
}
-int sdp_post_credits(struct sdp_sock *ssk)
-{
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- sdp_post_send(ssk, skb, SDP_MID_DATA);
- }
- return 0;
-}
-
void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
{
/* TODO: nonagle? */
diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
index 96d65bd..aba9113 100644
--- a/drivers/infiniband/ulp/sdp/sdp_cma.c
+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
@@ -412,11 +412,11 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
sdp_sk(sk)->rx_tail;
memset(&hh, 0, sizeof hh);
hh.bsdh.mid = SDP_MID_HELLO;
- hh.bsdh.bufs = htons(sdp_sk(sk)->remote_credits);
hh.bsdh.len = htonl(sizeof(struct sdp_bsdh) + SDP_HH_SIZE);
hh.max_adverts = 1;
hh.majv_minv = SDP_MAJV_MINV;
sdp_init_buffers(sdp_sk(sk), rcvbuf_initial_size);
+ hh.bsdh.bufs = htons(sdp_sk(sk)->rx_head - sdp_sk(sk)->rx_tail);
hh.localrcvsz = hh.desremrcvsz = htonl(sdp_sk(sk)->recv_frags *
PAGE_SIZE + SDP_HEAD_SIZE);
hh.max_adverts = 0x1;
@@ -446,7 +446,8 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
sdp_sk(child)->rx_tail;
memset(&hah, 0, sizeof hah);
hah.bsdh.mid = SDP_MID_HELLO_ACK;
- hah.bsdh.bufs = htons(sdp_sk(child)->remote_credits);
+ hah.bsdh.bufs =
+ htons(sdp_sk(child)->rx_head - sdp_sk(child)->rx_tail);
hah.bsdh.len = htonl(sizeof(struct sdp_bsdh) + SDP_HAH_SIZE);
hah.majv_minv = SDP_MAJV_MINV;
hah.ext_max_adverts = 1; /* Doesn't seem to be mandated by spec,
@@ -473,9 +474,6 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
rdma_reject(id, NULL, 0);
else
rc = rdma_accept(id, NULL);
-
- if (!rc)
- rc = sdp_post_credits(sdp_sk(sk));
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
sdp_dbg(sk, "RDMA_CM_EVENT_CONNECT_ERROR\n");
diff --git a/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18-EL5.3/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
diff --git a/kernel_patches/backport/2.6.9_U7/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.9_U7/sdp_0090_revert_to_2_6_24.patch
index 893db9b..e0d6686 100644
--- a/kernel_patches/backport/2.6.9_U7/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.9_U7/sdp_0090_revert_to_2_6_24.patch
@@ -5,13 +5,13 @@
drivers/infiniband/ulp/sdp/sdp_main.c | 19 +++++++++----------
4 files changed, 21 insertions(+), 50 deletions(-)
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp.h
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-@@ -317,30 +317,4 @@
- void sdp_start_keepalive_timer(struct sock *sk);
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp.h
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp.h
+@@ -354,30 +354,4 @@ void sdp_start_keepalive_timer(struct so
void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ int sdp_init_sock(struct sock *sk);
-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
-{
@@ -40,11 +40,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp.h
-
-
#endif
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -139,7 +139,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+@@ -147,7 +147,7 @@ static void sdp_fin(struct sock *sk)
}
@@ -53,7 +53,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
-@@ -190,7 +190,7 @@
+@@ -198,7 +198,7 @@ void sdp_post_send(struct sdp_sock *ssk,
struct ib_send_wr *bad_wr;
h->mid = mid;
@@ -62,7 +62,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
else
h->flags = 0;
-@@ -234,7 +234,7 @@
+@@ -242,7 +242,7 @@ void sdp_post_send(struct sdp_sock *ssk,
ssk->tx_wr.num_sge = frags + 1;
ssk->tx_wr.opcode = IB_WR_SEND;
ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -71,7 +71,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
++ssk->tx_head;
-@@ -304,11 +304,11 @@
+@@ -312,11 +312,11 @@ static void sdp_post_recv(struct sdp_soc
/* TODO: allocate from cache */
if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -85,16 +85,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
GFP_KERNEL);
gfp_page = GFP_HIGHUSER;
}
-@@ -476,7 +476,7 @@
- if (likely(ssk->bufs > 1) &&
- likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
- struct sk_buff *skb;
-- skb = sdp_stream_alloc_skb(&ssk->isk.sk,
-+ skb = sk_stream_alloc_skb(&ssk->isk.sk,
- sizeof(struct sdp_bsdh),
- GFP_KERNEL);
- if (!skb)
-@@ -514,7 +514,7 @@
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *resp_size;
ssk->recv_request = 0;
@@ -103,7 +94,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*resp_size),
gfp_page);
-@@ -539,7 +539,7 @@
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
struct sdp_chrecvbuf *req_size;
@@ -112,7 +103,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh) +
sizeof(*req_size),
gfp_page);
-@@ -561,7 +561,7 @@
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
likely((1 << ssk->isk.sk.sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
@@ -121,7 +112,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
GFP_KERNEL);
/* FIXME */
-@@ -573,7 +573,7 @@
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
!ssk->isk.sk.sk_send_head &&
ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
ssk->sdp_disconnect = 0;
@@ -130,7 +121,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
sizeof(struct sdp_bsdh),
gfp_page);
/* FIXME */
-@@ -778,7 +778,7 @@
+@@ -789,7 +789,7 @@ static int sdp_handle_send_comp(struct s
}
out:
@@ -139,7 +130,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
return 0;
}
-@@ -864,7 +864,7 @@
+@@ -875,7 +875,7 @@ void sdp_work(struct work_struct *work)
sdp_poll_cq(ssk, cq);
release_sock(sk);
@@ -148,11 +139,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_bcopy.c
lock_sock(sk);
cq = ssk->cq;
if (unlikely(!cq))
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -161,8 +161,6 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_cma.c
+@@ -161,8 +161,6 @@ static int sdp_init_qp(struct sock *sk,
goto err_cq;
}
@@ -161,11 +152,11 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_cma.c
qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
rc = rdma_create_qp(id, pd, &qp_init_attr);
-Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
---- ofed_1_4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -509,7 +509,7 @@
+--- ofed_kernel-2.6.16.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.16/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -521,7 +521,7 @@ static void sdp_close(struct sock *sk, l
__kfree_skb(skb);
}
@@ -174,7 +165,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
-@@ -1200,7 +1200,7 @@
+@@ -1243,7 +1243,7 @@ static inline void sdp_mark_urg(struct s
{
if (unlikely(flags & MSG_OOB)) {
struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -183,7 +174,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
}
}
-@@ -1217,8 +1217,7 @@
+@@ -1260,8 +1260,7 @@ static inline void skb_entail(struct soc
{
skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -193,7 +184,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!sk->sk_send_head)
sk->sk_send_head = skb;
if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1382,7 +1381,7 @@
+@@ -1432,7 +1431,7 @@ static inline int sdp_bcopy_get(struct s
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
@@ -202,7 +193,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
return SDP_DO_WAIT_MEM;
if (!page) {
-@@ -1454,7 +1453,7 @@
+@@ -1504,7 +1503,7 @@ static inline int sdp_bzcopy_get(struct
if (left <= this_page)
this_page = left;
@@ -210,8 +201,8 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
+ if (!sk_stream_wmem_schedule(sk, copy))
return SDP_DO_WAIT_MEM;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1662,8 +1661,8 @@
+ get_page(bz->pages[bz->cur_page]);
+@@ -1720,8 +1719,8 @@ new_segment:
goto wait_for_sndbuf;
}
@@ -222,7 +213,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
if (!skb)
goto wait_for_memory;
-@@ -1687,7 +1686,7 @@
+@@ -1745,7 +1744,7 @@ new_segment:
/* OOB data byte should be the last byte of
the data payload */
@@ -231,7 +222,7 @@ Index: ofed_1_4/drivers/infiniband/ulp/sdp/sdp_main.c
!(flags & MSG_OOB)) {
sdp_mark_push(ssk, skb);
goto new_segment;
-@@ -1763,7 +1762,7 @@
+@@ -1821,7 +1820,7 @@ do_fault:
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
__skb_unlink(skb, &sk->sk_write_queue);
--
1.5.3.7
More information about the general
mailing list