[ofa-general] [PATCH 1/1] SDP - Bug647 (size recieved from ChRcvBuf is never checked to see if it is in acceptable range)
Jim Mott
jim at mellanox.com
Mon Oct 22 13:52:24 PDT 2007
Clean up the buffer resize code to comply with CA4-83:
Upon receipt of ChRcvBuf message, the remote peer shall not
change the buffer size in the direction opposite of that
requested.
Also add some comments and pretty up the code.
Signed-off-by: Jim Mott <jim at mellanox.com>
This patch was created by Ami Perlmuter on May 30, 2007 and attached to
bug 647 (and duplicate bug 640). I missed getting it into 1.2.5.
---
Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp.h
===================================================================
--- ofa_1_3_dev_kernel.orig/drivers/infiniband/ulp/sdp/sdp.h
2007-10-10 15:36:46.000000000 -0500
+++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp.h 2007-10-10
15:37:35.000000000 -0500
@@ -148,13 +148,16 @@ struct sdp_sock {
struct ib_send_wr tx_wr;
/* SDP slow start */
- int rcvbuf_scale;
- int sent_request;
- int sent_request_head;
- int recv_request_head;
- int recv_request;
- int recv_frags;
- int send_frags;
+ int rcvbuf_scale; /* local recv buf scale for each socket
*/
+ int sent_request_head; /* mark the tx_head of the last send
resize
+ request */
+ int sent_request; /* 0 - not sent yet, 1 - request pending
+ -1 - resize done succesfully */
+ int recv_request_head; /* mark the rx_head when the resize
request
+ was recieved */
+ int recv_request; /* flag if request to resize was
recieved */
+ int recv_frags; /* max skb frags in recv packets */
+ int send_frags; /* max skb frags in send packets */
struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1];
struct ib_wc ibwc[SDP_NUM_WC];
@@ -227,9 +230,10 @@ struct sk_buff *sdp_recv_completion(stru
struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq);
void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb);
void sdp_add_sock(struct sdp_sock *ssk);
+void sdp_remove_sock(struct sdp_sock *ssk);
+void sdp_remove_large_sock(struct sdp_sock *ssk);
+int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size);
void sdp_post_keepalive(struct sdp_sock *ssk);
void sdp_start_keepalive_timer(struct sock *sk);
-void sdp_remove_sock(struct sdp_sock *ssk);
-void sdp_remove_large_sock(void);
#endif
Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp_bcopy.c
===================================================================
--- ofa_1_3_dev_kernel.orig/drivers/infiniband/ulp/sdp/sdp_bcopy.c
2007-10-10 15:36:46.000000000 -0500
+++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp_bcopy.c
2007-10-10 15:37:41.000000000 -0500
@@ -70,9 +70,13 @@ static int curr_large_sockets = 0;
atomic_t sdp_current_mem_usage;
spinlock_t sdp_large_sockets_lock;
-static int sdp_can_resize(void)
+static int sdp_get_large_socket(struct sdp_sock *ssk)
{
int count, ret;
+
+ if (ssk->recv_request)
+ return 1;
+
spin_lock_irq(&sdp_large_sockets_lock);
count = curr_large_sockets;
ret = curr_large_sockets < max_large_sockets;
@@ -83,11 +87,13 @@ static int sdp_can_resize(void)
return ret;
}
-void sdp_remove_large_sock(void)
+void sdp_remove_large_sock(struct sdp_sock *ssk)
{
- spin_lock_irq(&sdp_large_sockets_lock);
- curr_large_sockets--;
- spin_unlock_irq(&sdp_large_sockets_lock);
+ if (ssk->recv_frags) {
+ spin_lock_irq(&sdp_large_sockets_lock);
+ curr_large_sockets--;
+ spin_unlock_irq(&sdp_large_sockets_lock);
+ }
}
/* Like tcp_fin */
@@ -458,7 +464,7 @@ void sdp_post_sends(struct sdp_sock *ssk
/* FIXME */
BUG_ON(!skb);
resp_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof
*resp_size);
- resp_size->size = htons(ssk->recv_frags * PAGE_SIZE);
+ resp_size->size = htonl(ssk->recv_frags * PAGE_SIZE);
sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF_ACK);
}
@@ -485,7 +491,7 @@ void sdp_post_sends(struct sdp_sock *ssk
ssk->sent_request = SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE;
ssk->sent_request_head = ssk->tx_head;
req_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof
*req_size);
- req_size->size = htons(ssk->sent_request);
+ req_size->size = htonl(ssk->sent_request);
sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF);
}
@@ -521,11 +527,42 @@ void sdp_post_sends(struct sdp_sock *ssk
}
}
-static inline void sdp_resize(struct sdp_sock *ssk, u32 new_size)
+int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size)
+{
+ u32 curr_size = SDP_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE;
+ u32 max_size = SDP_HEAD_SIZE + SDP_MAX_SEND_SKB_FRAGS *
PAGE_SIZE;
+
+ if (new_size > curr_size && new_size <= max_size &&
+ sdp_get_large_socket(ssk)) {
+ ssk->rcvbuf_scale = rcvbuf_scale;
+ ssk->recv_frags = PAGE_ALIGN(new_size - SDP_HEAD_SIZE) /
PAGE_SIZE;
+ if (ssk->recv_frags > SDP_MAX_SEND_SKB_FRAGS)
+ ssk->recv_frags = SDP_MAX_SEND_SKB_FRAGS;
+ return 0;
+ } else
+ return -1;
+}
+
+static void sdp_handle_resize_request(struct sdp_sock *ssk, struct
sdp_chrecvbuf *buf)
{
- ssk->recv_frags = PAGE_ALIGN(new_size - SDP_HEAD_SIZE) /
PAGE_SIZE;
- if (ssk->recv_frags > SDP_MAX_SEND_SKB_FRAGS)
- ssk->recv_frags = SDP_MAX_SEND_SKB_FRAGS;
+ if (sdp_resize_buffers(ssk, ntohl(buf->size)) == 0)
+ ssk->recv_request_head = ssk->rx_head + 1;
+ else
+ ssk->recv_request_head = ssk->rx_tail;
+ ssk->recv_request = 1;
+}
+
+static void sdp_handle_resize_ack(struct sdp_sock *ssk, struct
sdp_chrecvbuf *buf)
+{
+ u32 new_size = ntohl(buf->size);
+
+ if (new_size > ssk->xmit_size_goal) {
+ ssk->sent_request = -1;
+ ssk->xmit_size_goal = new_size;
+ ssk->send_frags =
+ PAGE_ALIGN(ssk->xmit_size_goal) / PAGE_SIZE;
+ } else
+ ssk->sent_request = 0;
}
static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
@@ -605,28 +642,10 @@ static void sdp_handle_wc(struct sdp_soc
sdp_sock_queue_rcv_skb(&ssk->isk.sk,
skb);
sdp_fin(&ssk->isk.sk);
} else if (h->mid == SDP_MID_CHRCVBUF) {
- u32 new_size = *(u32 *)skb->data;
-
- if (ssk->recv_request ||
sdp_can_resize()) {
- ssk->rcvbuf_scale =
rcvbuf_scale;
- sdp_resize(ssk,
ntohs(new_size));
- ssk->recv_request_head =
ssk->rx_head + 1;
- } else
- ssk->recv_request_head =
ssk->rx_tail;
- ssk->recv_request = 1;
+ sdp_handle_resize_request(ssk, (struct
sdp_chrecvbuf *)skb->data);
__kfree_skb(skb);
} else if (h->mid == SDP_MID_CHRCVBUF_ACK) {
- u32 new_size = *(u32 *)skb->data;
- new_size = ntohs(new_size);
-
- if (new_size > ssk->xmit_size_goal) {
- ssk->sent_request = -1;
- ssk->xmit_size_goal = new_size;
- ssk->send_frags =
-
PAGE_ALIGN(ssk->xmit_size_goal) /
- PAGE_SIZE;
- } else
- ssk->sent_request = 0;
+ sdp_handle_resize_ack(ssk, (struct
sdp_chrecvbuf *)skb->data);
__kfree_skb(skb);
} else {
/* TODO: Handle other messages */
Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp_cma.c
===================================================================
--- ofa_1_3_dev_kernel.orig/drivers/infiniband/ulp/sdp/sdp_cma.c
2007-10-10 15:36:46.000000000 -0500
+++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp_cma.c
2007-10-10 15:36:59.000000000 -0500
@@ -241,6 +241,7 @@ int sdp_connect_handler(struct sock *sk,
sizeof(struct sdp_bsdh);
sdp_sk(child)->send_frags =
PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) /
PAGE_SIZE;
+ sdp_resize_buffers(sdp_sk(child), ntohl(h->desremrcvsz));
sdp_dbg(child, "%s bufs %d xmit_size_goal %d\n", __func__,
sdp_sk(child)->bufs,
Index: ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp_main.c
===================================================================
--- ofa_1_3_dev_kernel.orig/drivers/infiniband/ulp/sdp/sdp_main.c
2007-10-10 15:36:46.000000000 -0500
+++ ofa_1_3_dev_kernel/drivers/infiniband/ulp/sdp/sdp_main.c
2007-10-10 15:37:35.000000000 -0500
@@ -204,8 +204,7 @@ static void sdp_destroy_qp(struct sdp_so
if (pd)
ib_dealloc_pd(pd);
- if (ssk->recv_frags)
- sdp_remove_large_sock();
+ sdp_remove_large_sock(ssk);
kfree(ssk->rx_ring);
kfree(ssk->tx_ring);
More information about the general
mailing list