[ofa-general] [PATCH v2 5/6] Changed after fix to sdp disconnect

Amir Vadai amirv at mellanox.co.il
Thu Jul 10 00:48:52 PDT 2008


Signed-off-by: Amir Vadai <amirv at mellanox.co.il>
---
 .../2.6.16/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch  |  162 ++++++++++----------
 .../sdp_0090_revert_to_2_6_24.patch                |  162 ++++++++++----------
 .../sdp_0090_revert_to_2_6_24.patch                |  162 ++++++++++----------
 .../2.6.17/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch   |  162 ++++++++++----------
 .../2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch   |  162 ++++++++++----------
 .../2.6.18/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch     |  162 ++++++++++----------
 .../sdp_0090_revert_to_2_6_24.patch                |  162 ++++++++++----------
 .../2.6.19/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.20/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.21/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.22/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../sdp_0090_revert_to_2_6_24.patch                |  162 ++++++++++----------
 .../2.6.23/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.24/sdp_0090_revert_to_2_6_24.patch         |  162 ++++++++++----------
 .../2.6.9_U4/sdp_0090_revert_to_2_6_24.patch       |  162 ++++++++++----------
 .../backport/2.6.9_U4/sdp_7277_to_2_6_11.patch     |   40 +++---
 .../2.6.9_U5/sdp_0090_revert_to_2_6_24.patch       |  162 ++++++++++----------
 .../backport/2.6.9_U5/sdp_7277_to_2_6_11.patch     |   40 +++---
 .../2.6.9_U6/sdp_0090_revert_to_2_6_24.patch       |  162 ++++++++++----------
 .../backport/2.6.9_U6/sdp_7277_to_2_6_11.patch     |   40 +++---
 23 files changed, 1640 insertions(+), 1720 deletions(-)

diff --git a/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16_sles10/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16_sles10_sp1/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.16_sles10_sp2/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.17/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18-EL5.1/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18-EL5.2/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18_FC6/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.18_suse10_2/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.19/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.20/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.21/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.22/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.22_suse10_3/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.23/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.24/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.9_U4/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch b/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch
index 789362b..a9d65a7 100644
--- a/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch
+++ b/kernel_patches/backport/2.6.9_U4/sdp_7277_to_2_6_11.patch
@@ -1,32 +1,32 @@
-Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
 ===================================================================
---- openib_gen2-20060517-1739_check.orig/drivers/infiniband/ulp/sdp/sdp_main.c	2006-05-17 17:45:32.000000000 +0300
-+++ openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c	2006-05-17 18:33:13.000000000 +0300
-@@ -300,7 +300,6 @@
+--- ofed_kernel-2.6.9_U4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -564,7 +564,6 @@ adjudge_to_death:
  		/* TODO: tcp_fin_time to get timeout */
  		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
  			atomic_read(&sk->sk_refcnt));
 -		atomic_inc(sk->sk_prot->orphan_count);
  		queue_delayed_work(sdp_workqueue, &sdp_sk(sk)->time_wait_work,
  				   TCP_FIN_TIMEOUT);
- 		goto out;
-@@ -495,7 +494,6 @@
- 	sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
- 
- 	cancel_delayed_work(&sdp_sk(sk)->time_wait_work);
--	atomic_dec(sk->sk_prot->orphan_count);
- 
- 	sock_put(sk);
+ 	}
+@@ -836,7 +835,6 @@ void sdp_cancel_fin_wait_timeout(struct 
+ {
+ 	ssk->fin_wait_timeout = 0;
+ 	cancel_delayed_work(&ssk->time_wait_work);
+-	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
  }
-@@ -517,7 +515,6 @@
- 	sdp_sk(sk)->time_wait = 0;
+ 
+ void sdp_destroy_work(struct work_struct *work)
+@@ -870,7 +868,6 @@ void sdp_time_wait_work(struct work_stru
+ 	sdp_sk(sk)->fin_wait_timeout = 0;
  	release_sock(sk);
  
 -	atomic_dec(sk->sk_prot->orphan_count);
- 	sock_put(sk);
- }
  
-@@ -1166,7 +1163,6 @@
+ 	if (sdp_sk(sk)->id)
+ 		rdma_disconnect(sdp_sk(sk)->id);
+@@ -2101,7 +2098,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
  
  static atomic_t sockets_allocated;
  static atomic_t memory_allocated;
@@ -34,7 +34,7 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
  static int memory_pressure;
  struct proto sdp_proto = {
          .close       = sdp_close,
-@@ -1187,13 +1183,11 @@
+@@ -2122,13 +2118,11 @@ struct proto sdp_proto = {
  	.sockets_allocated = &sockets_allocated,
  	.memory_allocated = &memory_allocated,
  	.memory_pressure = &memory_pressure,
@@ -50,7 +50,7 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
  	.name	     = "SDP",
  };
  
-@@ -1298,9 +1292,6 @@
+@@ -2490,9 +2484,6 @@ static void __exit sdp_exit(void)
  	sock_unregister(PF_INET_SDP);
  	proto_unregister(&sdp_proto);
  
@@ -59,4 +59,4 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
 -		       atomic_read(&orphan_count));
  	destroy_workqueue(sdp_workqueue);
  	flush_scheduled_work();
- }
+ 
diff --git a/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.9_U5/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch b/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch
index 789362b..a9d65a7 100644
--- a/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch
+++ b/kernel_patches/backport/2.6.9_U5/sdp_7277_to_2_6_11.patch
@@ -1,32 +1,32 @@
-Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
 ===================================================================
---- openib_gen2-20060517-1739_check.orig/drivers/infiniband/ulp/sdp/sdp_main.c	2006-05-17 17:45:32.000000000 +0300
-+++ openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c	2006-05-17 18:33:13.000000000 +0300
-@@ -300,7 +300,6 @@
+--- ofed_kernel-2.6.9_U4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -564,7 +564,6 @@ adjudge_to_death:
  		/* TODO: tcp_fin_time to get timeout */
  		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
  			atomic_read(&sk->sk_refcnt));
 -		atomic_inc(sk->sk_prot->orphan_count);
  		queue_delayed_work(sdp_workqueue, &sdp_sk(sk)->time_wait_work,
  				   TCP_FIN_TIMEOUT);
- 		goto out;
-@@ -495,7 +494,6 @@
- 	sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
- 
- 	cancel_delayed_work(&sdp_sk(sk)->time_wait_work);
--	atomic_dec(sk->sk_prot->orphan_count);
- 
- 	sock_put(sk);
+ 	}
+@@ -836,7 +835,6 @@ void sdp_cancel_fin_wait_timeout(struct 
+ {
+ 	ssk->fin_wait_timeout = 0;
+ 	cancel_delayed_work(&ssk->time_wait_work);
+-	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
  }
-@@ -517,7 +515,6 @@
- 	sdp_sk(sk)->time_wait = 0;
+ 
+ void sdp_destroy_work(struct work_struct *work)
+@@ -870,7 +868,6 @@ void sdp_time_wait_work(struct work_stru
+ 	sdp_sk(sk)->fin_wait_timeout = 0;
  	release_sock(sk);
  
 -	atomic_dec(sk->sk_prot->orphan_count);
- 	sock_put(sk);
- }
  
-@@ -1166,7 +1163,6 @@
+ 	if (sdp_sk(sk)->id)
+ 		rdma_disconnect(sdp_sk(sk)->id);
+@@ -2101,7 +2098,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
  
  static atomic_t sockets_allocated;
  static atomic_t memory_allocated;
@@ -34,7 +34,7 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
  static int memory_pressure;
  struct proto sdp_proto = {
          .close       = sdp_close,
-@@ -1187,13 +1183,11 @@
+@@ -2122,13 +2118,11 @@ struct proto sdp_proto = {
  	.sockets_allocated = &sockets_allocated,
  	.memory_allocated = &memory_allocated,
  	.memory_pressure = &memory_pressure,
@@ -50,7 +50,7 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
  	.name	     = "SDP",
  };
  
-@@ -1298,9 +1292,6 @@
+@@ -2490,9 +2484,6 @@ static void __exit sdp_exit(void)
  	sock_unregister(PF_INET_SDP);
  	proto_unregister(&sdp_proto);
  
@@ -59,4 +59,4 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
 -		       atomic_read(&orphan_count));
  	destroy_workqueue(sdp_workqueue);
  	flush_scheduled_work();
- }
+ 
diff --git a/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch b/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch
index 78550f8..8e4b929 100644
--- a/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch
+++ b/kernel_patches/backport/2.6.9_U6/sdp_0090_revert_to_2_6_24.patch
@@ -1,44 +1,8 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
-index c434b60..e4d3eff 100644
---- a/drivers/infiniband/ulp/sdp/sdp.h
-+++ b/drivers/infiniband/ulp/sdp/sdp.h
-@@ -265,30 +265,4 @@ void sdp_post_keepalive(struct sdp_sock *ssk);
- void sdp_start_keepalive_timer(struct sock *sk);
- void sdp_bzcopy_write_space(struct sdp_sock *ssk);
- 
--static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
--{
--	struct sk_buff *skb;
--
--	/* The TCP header must be at least 32-bit aligned.  */
--	size = ALIGN(size, 4);
--
--	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
--	if (skb) {
--		if (sk_wmem_schedule(sk, skb->truesize)) {
--			/*
--			 * Make sure that we have exactly size bytes
--			 * available to the caller, no more, no less.
--			 */
--			skb_reserve(skb, skb_tailroom(skb) - size);
--			return skb;
--		}
--		__kfree_skb(skb);
--	} else {
--		sk->sk_prot->enter_memory_pressure();
--		sk_stream_moderate_sndbuf(sk);
--	}
--	return NULL;
--}
--
--
- #endif
-diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-index 36cbbad..ad788f7 100644
---- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
-@@ -105,7 +105,7 @@ static void sdp_fin(struct sock *sk)
- 	sock_set_flag(sk, SOCK_DONE);
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c
+--- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2008-06-22 15:02:31.000000000 +0300
+@@ -141,7 +141,7 @@ static void sdp_fin(struct sock *sk)
+ 	}
  
  
 -	sk_mem_reclaim(sk);
@@ -46,7 +10,7 @@ index 36cbbad..ad788f7 100644
  
  	if (!sock_flag(sk, SOCK_DEAD)) {
  		sk->sk_state_change(sk);
-@@ -156,7 +156,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -192,7 +192,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	struct ib_send_wr *bad_wr;
  
  	h->mid = mid;
@@ -55,7 +19,7 @@ index 36cbbad..ad788f7 100644
  		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
  	else
  		h->flags = 0;
-@@ -200,7 +200,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
+@@ -236,7 +236,7 @@ void sdp_post_send(struct sdp_sock *ssk,
  	ssk->tx_wr.num_sge = frags + 1;
  	ssk->tx_wr.opcode = IB_WR_SEND;
  	ssk->tx_wr.send_flags = IB_SEND_SIGNALED;
@@ -64,7 +28,7 @@ index 36cbbad..ad788f7 100644
  		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
  	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
  	++ssk->tx_head;
-@@ -270,11 +270,11 @@ static void sdp_post_recv(struct sdp_sock *ssk)
+@@ -306,11 +306,11 @@ static void sdp_post_recv(struct sdp_soc
  	/* TODO: allocate from cache */
  
  	if (unlikely(ssk->isk.sk.sk_allocation)) {
@@ -78,7 +42,7 @@ index 36cbbad..ad788f7 100644
  					  GFP_KERNEL);
  		gfp_page = GFP_HIGHUSER;
  	}
-@@ -442,7 +442,7 @@ int sdp_post_credits(struct sdp_sock *ssk)
+@@ -478,7 +478,7 @@ int sdp_post_credits(struct sdp_sock *ss
  	if (likely(ssk->bufs > 1) &&
  	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
  		struct sk_buff *skb;
@@ -87,7 +51,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		if (!skb)
-@@ -480,7 +480,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -516,7 +516,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *resp_size;
  		ssk->recv_request = 0;
@@ -96,7 +60,7 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*resp_size),
  					  gfp_page);
-@@ -505,7 +505,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
+@@ -541,7 +541,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  	    ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
  	    ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
  		struct sdp_chrecvbuf *req_size;
@@ -105,35 +69,35 @@ index 36cbbad..ad788f7 100644
  					  sizeof(struct sdp_bsdh) +
  					  sizeof(*req_size),
  					  gfp_page);
-@@ -525,7 +525,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 	if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
- 	    likely(ssk->bufs > 1) &&
- 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+@@ -563,7 +563,7 @@ void sdp_post_sends(struct sdp_sock *ssk
+ 	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+ 	    likely((1 << ssk->isk.sk.sk_state) &
+ 		    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  GFP_KERNEL);
  		/* FIXME */
-@@ -537,7 +537,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
- 			(TCPF_FIN_WAIT1 | TCPF_LAST_ACK)) &&
+@@ -575,7 +575,7 @@ void sdp_post_sends(struct sdp_sock *ssk
  		!ssk->isk.sk.sk_send_head &&
  		ssk->bufs > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+ 		ssk->sdp_disconnect = 0;
 -		skb = sdp_stream_alloc_skb(&ssk->isk.sk,
 +		skb = sk_stream_alloc_skb(&ssk->isk.sk,
  					  sizeof(struct sdp_bsdh),
  					  gfp_page);
  		/* FIXME */
-@@ -684,7 +684,7 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
- 		skb = sdp_send_completion(ssk, wc->wr_id);
- 		if (unlikely(!skb))
- 			return;
--		sk_wmem_free_skb(&ssk->isk.sk, skb);
-+		sk_stream_free_skb(&ssk->isk.sk, skb);
- 		if (unlikely(wc->status)) {
- 			if (wc->status != IB_WC_WR_FLUSH_ERR) {
- 				sdp_dbg(&ssk->isk.sk,
-@@ -766,7 +766,7 @@ void sdp_work(struct work_struct *work)
- 		goto out;
+@@ -788,7 +788,7 @@ static int sdp_handle_send_comp(struct s
+ 	}
+ 
+ out:
+-	sk_wmem_free_skb(&ssk->isk.sk, skb);
++	sk_stream_free_skb(&ssk->isk.sk, skb);
+ 
+ 	return 0;
+ }
+@@ -874,7 +874,7 @@ void sdp_work(struct work_struct *work)
+ 
  	sdp_poll_cq(ssk, cq);
  	release_sock(sk);
 -	sk_mem_reclaim(sk);
@@ -141,11 +105,10 @@ index 36cbbad..ad788f7 100644
  	lock_sock(sk);
  	cq = ssk->cq;
  	if (unlikely(!cq))
-diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
-index b9f54d0..0cab38b 100644
---- a/drivers/infiniband/ulp/sdp/sdp_cma.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_cma.c
-@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c
+--- a/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_cma.c	2008-06-22 15:02:31.000000000 +0300
+@@ -162,8 +162,6 @@ int sdp_init_qp(struct sock *sk, struct 
  		goto err_cq;
  	}
  
@@ -154,11 +117,44 @@ index b9f54d0..0cab38b 100644
          qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
  
  	rc = rdma_create_qp(id, pd, &qp_init_attr);
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index d35c803..dcc60e3 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -490,7 +490,7 @@ static void sdp_close(struct sock *sk, long timeout)
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h
+--- a/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp.h	2008-06-22 15:02:31.000000000 +0300
+@@ -266,30 +266,4 @@ void sdp_post_keepalive(struct sdp_sock 
+ void sdp_start_keepalive_timer(struct sock *sk);
+ void sdp_bzcopy_write_space(struct sdp_sock *ssk);
+ 
+-static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+-{
+-	struct sk_buff *skb;
+-
+-	/* The TCP header must be at least 32-bit aligned.  */
+-	size = ALIGN(size, 4);
+-
+-	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+-	if (skb) {
+-		if (sk_wmem_schedule(sk, skb->truesize)) {
+-			/*
+-			 * Make sure that we have exactly size bytes
+-			 * available to the caller, no more, no less.
+-			 */
+-			skb_reserve(skb, skb_tailroom(skb) - size);
+-			return skb;
+-		}
+-		__kfree_skb(skb);
+-	} else {
+-		sk->sk_prot->enter_memory_pressure();
+-		sk_stream_moderate_sndbuf(sk);
+-	}
+-	return NULL;
+-}
+-
+-
+ #endif
+diff -Naup a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:25.000000000 +0300
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c	2008-06-22 15:02:31.000000000 +0300
+@@ -494,7 +494,7 @@ static void sdp_close(struct sock *sk, l
  		__kfree_skb(skb);
  	}
  
@@ -167,7 +163,7 @@ index d35c803..dcc60e3 100644
  
  	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
  	 * 3.10, we send a RST here because data was lost.  To
-@@ -1185,7 +1185,7 @@ static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags
+@@ -1197,7 +1197,7 @@ static inline void sdp_mark_urg(struct s
  {
  	if (unlikely(flags & MSG_OOB)) {
  		struct sk_buff *skb = sk->sk_write_queue.prev;
@@ -176,17 +172,17 @@ index d35c803..dcc60e3 100644
  	}
  }
  
-@@ -1202,8 +1202,7 @@ static inline void skb_entail(struct sock *sk, struct sdp_sock *ssk,
+@@ -1214,8 +1214,7 @@ static inline void skb_entail(struct soc
  {
          skb_header_release(skb);
          __skb_queue_tail(&sk->sk_write_queue, skb);
 -	sk->sk_wmem_queued += skb->truesize;
 -        sk_mem_charge(sk, skb->truesize);
-+        sk_charge_skb(sk, skb);
++	sk_charge_skb(sk, skb);
          if (!sk->sk_send_head)
                  sk->sk_send_head = skb;
          if (ssk->nonagle & TCP_NAGLE_PUSH)
-@@ -1367,7 +1366,7 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1379,7 +1378,7 @@ static inline int sdp_bcopy_get(struct s
  		if (copy > PAGE_SIZE - off)
  			copy = PAGE_SIZE - off;
  
@@ -195,7 +191,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		if (!page) {
-@@ -1439,7 +1438,7 @@ static inline int sdp_bzcopy_get(struct sock *sk, struct sk_buff *skb,
+@@ -1451,7 +1450,7 @@ static inline int sdp_bzcopy_get(struct 
  		if (left <= this_page)
  			this_page = left;
  
@@ -204,7 +200,7 @@ index d35c803..dcc60e3 100644
  			return SDP_DO_WAIT_MEM;
  
  		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-@@ -1647,8 +1646,8 @@ new_segment:
+@@ -1659,8 +1658,8 @@ new_segment:
  						goto wait_for_sndbuf;
  				}
  
@@ -215,7 +211,7 @@ index d35c803..dcc60e3 100644
  				if (!skb)
  					goto wait_for_memory;
  
-@@ -1672,7 +1671,7 @@ new_segment:
+@@ -1684,7 +1683,7 @@ new_segment:
  
  			/* OOB data byte should be the last byte of
  			   the data payload */
@@ -224,7 +220,7 @@ index d35c803..dcc60e3 100644
  			    !(flags & MSG_OOB)) {
  				sdp_mark_push(ssk, skb);
  				goto new_segment;
-@@ -1748,7 +1747,7 @@ do_fault:
+@@ -1760,7 +1759,7 @@ do_fault:
  		if (sk->sk_send_head == skb)
  			sk->sk_send_head = NULL;
  		__skb_unlink(skb, &sk->sk_write_queue);
@@ -233,7 +229,7 @@ index d35c803..dcc60e3 100644
  	}
  
  do_error:
-@@ -2353,6 +2352,10 @@ static int __init sdp_proc_init(void)
+@@ -2365,6 +2364,10 @@ static int __init sdp_proc_init(void)
  				 sdp_seq_afinfo.seq_fops);
  	if (p)
  		p->data = &sdp_seq_afinfo;
diff --git a/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch b/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch
index 789362b..a9d65a7 100644
--- a/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch
+++ b/kernel_patches/backport/2.6.9_U6/sdp_7277_to_2_6_11.patch
@@ -1,32 +1,32 @@
-Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
+Index: ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
 ===================================================================
---- openib_gen2-20060517-1739_check.orig/drivers/infiniband/ulp/sdp/sdp_main.c	2006-05-17 17:45:32.000000000 +0300
-+++ openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c	2006-05-17 18:33:13.000000000 +0300
-@@ -300,7 +300,6 @@
+--- ofed_kernel-2.6.9_U4.orig/drivers/infiniband/ulp/sdp/sdp_main.c
++++ ofed_kernel-2.6.9_U4/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -564,7 +564,6 @@ adjudge_to_death:
  		/* TODO: tcp_fin_time to get timeout */
  		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
  			atomic_read(&sk->sk_refcnt));
 -		atomic_inc(sk->sk_prot->orphan_count);
  		queue_delayed_work(sdp_workqueue, &sdp_sk(sk)->time_wait_work,
  				   TCP_FIN_TIMEOUT);
- 		goto out;
-@@ -495,7 +494,6 @@
- 	sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt));
- 
- 	cancel_delayed_work(&sdp_sk(sk)->time_wait_work);
--	atomic_dec(sk->sk_prot->orphan_count);
- 
- 	sock_put(sk);
+ 	}
+@@ -836,7 +835,6 @@ void sdp_cancel_fin_wait_timeout(struct 
+ {
+ 	ssk->fin_wait_timeout = 0;
+ 	cancel_delayed_work(&ssk->time_wait_work);
+-	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
  }
-@@ -517,7 +515,6 @@
- 	sdp_sk(sk)->time_wait = 0;
+ 
+ void sdp_destroy_work(struct work_struct *work)
+@@ -870,7 +868,6 @@ void sdp_time_wait_work(struct work_stru
+ 	sdp_sk(sk)->fin_wait_timeout = 0;
  	release_sock(sk);
  
 -	atomic_dec(sk->sk_prot->orphan_count);
- 	sock_put(sk);
- }
  
-@@ -1166,7 +1163,6 @@
+ 	if (sdp_sk(sk)->id)
+ 		rdma_disconnect(sdp_sk(sk)->id);
+@@ -2101,7 +2098,6 @@ void sdp_urg(struct sdp_sock *ssk, struc
  
  static atomic_t sockets_allocated;
  static atomic_t memory_allocated;
@@ -34,7 +34,7 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
  static int memory_pressure;
  struct proto sdp_proto = {
          .close       = sdp_close,
-@@ -1187,13 +1183,11 @@
+@@ -2122,13 +2118,11 @@ struct proto sdp_proto = {
  	.sockets_allocated = &sockets_allocated,
  	.memory_allocated = &memory_allocated,
  	.memory_pressure = &memory_pressure,
@@ -50,7 +50,7 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
  	.name	     = "SDP",
  };
  
-@@ -1298,9 +1292,6 @@
+@@ -2490,9 +2484,6 @@ static void __exit sdp_exit(void)
  	sock_unregister(PF_INET_SDP);
  	proto_unregister(&sdp_proto);
  
@@ -59,4 +59,4 @@ Index: openib_gen2-20060517-1739_check/drivers/infiniband/ulp/sdp/sdp_main.c
 -		       atomic_read(&orphan_count));
  	destroy_workqueue(sdp_workqueue);
  	flush_scheduled_work();
- }
+ 
-- 
1.5.3




More information about the general mailing list