[openib-general] Re: [PATCH] Re: 0 op factor
Libor Michalek
libor at topspin.com
Tue Jun 14 14:24:39 PDT 2005
On Fri, May 13, 2005 at 05:06:55PM +0300, Michael S. Tsirkin wrote:
> Quoting r. Libor Michalek <libor at topspin.com>:
> > Subject: Re: [PATCH] Re: 0 op factor
> >
> > On Tue, May 10, 2005 at 05:47:00PM -0700, Libor Michalek wrote:
> > > On Sat, May 07, 2005 at 07:47:18PM +0200, Bernhard Fischer wrote:
> > >
> > > > - remove expect from _sdp_cm_path_complete().
> > >
> > > When I said that the SDP_EXPECT should eventually be removed, I meant
> > > that the functions which have their return values checked by SDP_EXPECT
> > > should either be turned into void return functions, or something
> > > intelligent should be done with the return value, such as error recovery
> > > or propogation. The functions which should be turned into void functions
> > > are the ones which will never return anything but success.
> >
> > To expand on the last point. There are a lot of functions, for
> > example those in sdp_buff.c, which check for incorrect function
> > usage, such as checking that a buffer is not already in a queue before
> > inserting it into a queue. These checks could be removed entirely or
> > the checks could be made and a call to BUG() in case the condition
> > is met. I prefer the later since the former would corrupt the entire
> > queue. I'm not sure which is preferable to everyone else, either way
> > the result is that the function becomes a void.
>
> IMO BUG_ON is the way to go.
Attached is a patch which simplifiess a number of functions to return
void, instead of a status result, and check incorrect parameters with
BUG_ON. This results in the removal in a number of SDP_EXPECT instances
as well as other unused error condition code.
20 files changed, 221 insertions(+), 548 deletions(-)
-Libor
Index: infiniband/ulp/sdp/sdp_queue.h
===================================================================
--- infiniband/ulp/sdp/sdp_queue.h (revision 2588)
+++ infiniband/ulp/sdp/sdp_queue.h (working copy)
@@ -54,7 +54,7 @@
struct sdpc_desc *prev; /* previous structure in table */
u32 type; /* element type. (for generic queue) */
struct sdpc_desc_q *table; /* table to which this object belongs */
- int (*release)(struct sdpc_desc *element); /* release the object */
+ void (*release)(struct sdpc_desc *element); /* release the object */
};
/*
Index: infiniband/ulp/sdp/sdp_write.c
===================================================================
--- infiniband/ulp/sdp/sdp_write.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_write.c (working copy)
@@ -87,13 +87,11 @@
type = sdp_desc_q_type_head(&conn->w_snk);
switch (type) {
case SDP_DESC_TYPE_BUFF:
- buff = (struct sdpc_buff *) sdp_desc_q_get_head(&conn->w_snk);
- SDP_EXPECT((buff));
+ buff = (struct sdpc_buff *)sdp_desc_q_get_head(&conn->w_snk);
conn->send_qud -= buff->data_size;
- result = sdp_buff_pool_put(buff);
- SDP_EXPECT(result >= 0);
+ sdp_buff_pool_put(buff);
break;
case SDP_DESC_TYPE_IOCB:
@@ -104,19 +102,13 @@
}
iocb = (struct sdpc_iocb *)sdp_desc_q_get_head(&conn->w_snk);
- SDP_EXPECT((iocb));
iocb->flags &= ~(SDP_IOCB_F_ACTIVE | SDP_IOCB_F_RDMA_W);
SDP_CONN_STAT_WRITE_INC(conn, iocb->post);
SDP_CONN_STAT_WQ_DEC(conn, iocb->size);
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
- goto error;
- }
+ sdp_iocb_complete(iocb, 0);
break;
case SDP_DESC_TYPE_NONE:
Index: infiniband/ulp/sdp/sdp_link.c
===================================================================
--- infiniband/ulp/sdp/sdp_link.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_link.c (working copy)
@@ -310,12 +310,13 @@
sdp_dbg_warn(NULL, "Error <%d> starting path record query",
result);
info->query = NULL;
- } else {
- info->qid = result;
- info->flags |= SDP_LINK_F_PATH;
+ return result;
}
- return result;
+ info->qid = result;
+ info->flags |= SDP_LINK_F_PATH;
+
+ return 0;
}
/*
Index: infiniband/ulp/sdp/sdp_rcvd.c
===================================================================
--- infiniband/ulp/sdp/sdp_rcvd.c (revision 2592)
+++ infiniband/ulp/sdp/sdp_rcvd.c (working copy)
@@ -125,7 +125,6 @@
static int sdp_rcvd_send_sm(struct sdp_opt *conn, struct sdpc_buff *buff)
{
struct sdpc_iocb *iocb;
- int result;
/*
* 1) Conn is not in source cancel mode. Send active IOCB
@@ -152,8 +151,7 @@
conn->src_sent--;
- result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(result >= 0);
+ sdp_iocb_complete(iocb, 0);
}
/*
* Cancel complete, clear the state.
@@ -169,7 +167,6 @@
{
struct msg_hdr_rwch *rwch;
struct sdpc_iocb *iocb;
- int result;
rwch = (struct msg_hdr_rwch *) buff->data;
buff->data = buff->data + sizeof(struct msg_hdr_rwch);
@@ -181,8 +178,7 @@
iocb = sdp_iocb_q_look(&conn->r_snk);
if (!iocb) {
sdp_dbg_warn(conn, "Cannot find IOCB for Write Completion.");
- result = -EPROTO;
- goto error;
+ return -EPROTO;
}
SDP_EXPECT((iocb->flags & SDP_IOCB_F_RDMA_W));
@@ -196,17 +192,13 @@
if (rwch->size > iocb->len) {
sdp_dbg_warn(conn, "IOCB and Write size mismatch. <%d:%d>",
rwch->size, iocb->len);
- result = -EPROTO;
- goto error;
+ return -EPROTO;
}
/*
* Iocb is done, deregister memory, and generate completion.
*/
iocb = sdp_iocb_q_get_head(&conn->r_snk);
- SDP_EXPECT((iocb));
- conn->snk_sent--;
-
iocb->len -= rwch->size;
iocb->post += rwch->size;
@@ -214,23 +206,17 @@
SDP_CONN_STAT_READ_INC(conn, iocb->post);
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
- goto error;
- }
+ conn->snk_sent--;
+ sdp_iocb_complete(iocb, 0);
+
return 0;
-error:
- return result;
}
static int sdp_rcvd_rdma_rd(struct sdp_opt *conn, struct sdpc_buff *buff)
{
struct msg_hdr_rrch *rrch;
struct sdpc_iocb *iocb;
- int result;
rrch = (struct msg_hdr_rrch *) buff->data;
buff->data = buff->data + sizeof(struct msg_hdr_rrch);
@@ -242,8 +228,7 @@
iocb = sdp_iocb_q_look(&conn->w_src);
if (!iocb) {
sdp_dbg_warn(conn, "Cannot find IOCB for Read Completion.");
- result = -EPROTO;
- goto error;
+ return -EPROTO;
}
SDP_CONN_STAT_SRC_INC(conn);
@@ -257,8 +242,7 @@
if (rrch->size > iocb->len) {
sdp_dbg_warn(conn, "IOCB and Read size mismatch. <%d:%d>",
rrch->size, iocb->len);
- result = -EPROTO;
- goto error;
+ return -EPROTO;
}
/*
* In combined mode the total RDMA size is going to be the buffer
@@ -279,19 +263,13 @@
*/
if (iocb->len <= 0) {
iocb = sdp_iocb_q_get_head(&conn->w_src);
- SDP_EXPECT((iocb));
conn->src_sent--;
SDP_CONN_STAT_WRITE_INC(conn, iocb->post);
SDP_CONN_STAT_WQ_DEC(conn, iocb->size);
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
- goto error;
- }
+ sdp_iocb_complete(iocb, 0);
}
/*
* If Source Cancel was in process, and there are no more outstanding
@@ -304,8 +282,6 @@
}
return 0;
-error:
- return result;
}
static int sdp_rcvd_mode_change(struct sdp_opt *conn, struct sdpc_buff *buff)
@@ -457,8 +433,7 @@
conn->src_recv--;
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
+ sdp_advt_destroy(advt);
}
/*
* If there are active reads, mark the connection as being in
@@ -544,8 +519,7 @@
counter++;
conn->snk_recv--;
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
+ sdp_advt_destroy(advt);
}
/*
* A cancel ack is sent only if we cancelled an advertisment without
@@ -568,10 +542,10 @@
/*
* sdp_rcvd_snk_cancel_ack - sink cancel confirmantion
*/
-static int sdp_rcvd_snk_cancel_ack(struct sdp_opt *conn, struct sdpc_buff *buff)
+static int sdp_rcvd_snk_cancel_ack(struct sdp_opt *conn,
+ struct sdpc_buff *buff)
{
struct sdpc_iocb *iocb;
- int result;
sdp_dbg_ctrl(conn, "Sink Cancel Ack. actv <%d> mode <%d> flag <%08x>",
conn->snk_sent, conn->recv_mode, conn->flags);
@@ -579,28 +553,20 @@
if (!(conn->flags & SDP_CONN_F_SNK_CANCEL)) {
sdp_dbg_warn(conn, "Connection not in sink cancel mode <%08x>",
conn->flags);
- result = -EPROTO;
- goto done;
+ return -EPROTO;
}
/*
* drain and complete all active IOCBs
*/
while ((iocb = sdp_iocb_q_get_head(&conn->r_snk))) {
- conn->snk_sent--;
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
- goto done;
- }
+ conn->snk_sent--;
+ sdp_iocb_complete(iocb, 0);
}
/*
* cancellation is complete. Cancel flag is cleared in RECV post.
*/
return 0;
-done:
- return result;
}
/*
@@ -675,22 +641,19 @@
if (conn->send_mode != SDP_MODE_PIPE) {
sdp_dbg_warn(conn, "SinkAvail, incorrect source mode <%d>",
conn->send_mode);
- result = -EPROTO;
- goto error;
+ return -EPROTO;
}
if (SDP_MSG_MAX_ADVS == (conn->src_recv + conn->snk_recv)) {
sdp_dbg_warn(conn, "SinkAvail, too many advertisments. <%d>",
(conn->src_recv + conn->snk_recv));
- result = -EPROTO;
- goto error;
+ return -EPROTO;
}
if (snkah->size < conn->send_size) {
sdp_dbg_warn(conn, "SinkAvail too small. <%d:%d>",
snkah->size, conn->send_size);
- result = -EPROTO;
- goto error;
+ return -EPROTO;
}
/*
* Save the advertisment, if it's not stale. otherwise update
@@ -716,15 +679,11 @@
* in cancel processing they need to be
* completed.
*/
- if (!(iocb->flags & SDP_IOCB_F_CANCEL)) {
- result = sdp_desc_q_put_head(&conn->send_queue,
- (struct sdpc_desc *)
- iocb);
- SDP_EXPECT(result >= 0);
- } else {
- result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(result >= 0);
- }
+ if (!(iocb->flags & SDP_IOCB_F_CANCEL))
+ sdp_desc_q_put_head(&conn->send_queue,
+ (struct sdpc_desc *)iocb);
+ else
+ sdp_iocb_complete(iocb, 0);
}
/*
* If Source Cancel was in process, it should now
@@ -741,8 +700,7 @@
advt = sdp_advt_create();
if (!advt) {
sdp_dbg_warn(conn, "SrcAvail cannot be copied.");
- result = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
advt->post = 0;
@@ -783,7 +741,6 @@
* PostRecv will take care of consuming this advertisment, based
* on result.
*/
-error:
return result;
}
@@ -948,7 +905,7 @@
return result;
advt_error:
- (void)sdp_advt_destroy(advt);
+ sdp_advt_destroy(advt);
done:
return result;
}
@@ -1187,8 +1144,7 @@
* process result.
*/
if (!result) {
- result = sdp_buff_pool_put(buff);
- SDP_EXPECT(result >= 0);
+ sdp_buff_pool_put(buff);
/*
* If this buffer was consumed, then make sure sufficient
* recv buffers are posted. Also we might be able to move
@@ -1226,7 +1182,7 @@
return 0;
drop:
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
done:
return result;
}
Index: infiniband/ulp/sdp/sdp_inet.c
===================================================================
--- infiniband/ulp/sdp/sdp_inet.c (revision 2593)
+++ infiniband/ulp/sdp/sdp_inet.c (working copy)
@@ -401,8 +401,7 @@
sdp_iocb_q_cancel_all_write(conn, -ECANCELED);
- result = sdp_inet_disconnect(conn);
- SDP_EXPECT(result >= 0);
+ (void)sdp_inet_disconnect(conn);
}
#endif
}
@@ -1137,10 +1136,8 @@
case TCP_NODELAY:
conn->nodelay = value ? 1 : 0;
- if (conn->nodelay > 0) {
- result = sdp_send_flush(conn);
- SDP_EXPECT(result >= 0);
- }
+ if (conn->nodelay > 0)
+ (void)sdp_send_flush(conn);
break;
case SDP_ZCOPY_THRSH:
Index: infiniband/ulp/sdp/sdp_proto.h
===================================================================
--- infiniband/ulp/sdp/sdp_proto.h (revision 2593)
+++ infiniband/ulp/sdp/sdp_proto.h (working copy)
@@ -49,9 +49,9 @@
*/
struct sdpc_buff *sdp_buff_pool_get(void);
-int sdp_buff_pool_put(struct sdpc_buff *buff);
+void sdp_buff_pool_put(struct sdpc_buff *buff);
-int sdp_buff_pool_chain_put(struct sdpc_buff *buff, u32 count);
+void sdp_buff_pool_chain_put(struct sdpc_buff *buff, u32 count);
void sdp_buff_pool_chain_link(struct sdpc_buff *head, struct sdpc_buff *buff);
@@ -68,8 +68,6 @@
sdp_buff_q_clear_unmap(pool, NULL, 0);
}
-int sdp_buff_q_remove(struct sdpc_buff *buff);
-
struct sdpc_buff *sdp_buff_q_get(struct sdpc_buff_q *pool);
struct sdpc_buff *sdp_buff_q_get_head(struct sdpc_buff_q *pool);
@@ -78,17 +76,12 @@
struct sdpc_buff *sdp_buff_q_look_head(struct sdpc_buff_q *pool);
-int sdp_buff_q_put(struct sdpc_buff_q *pool, struct sdpc_buff *buff);
+void sdp_buff_q_put(struct sdpc_buff_q *pool, struct sdpc_buff *buff);
-int sdp_buff_q_put_head(struct sdpc_buff_q *pool, struct sdpc_buff *buff);
+void sdp_buff_q_put_head(struct sdpc_buff_q *pool, struct sdpc_buff *buff);
-int sdp_buff_q_put_tail(struct sdpc_buff_q *pool, struct sdpc_buff *buff);
+void sdp_buff_q_put_tail(struct sdpc_buff_q *pool, struct sdpc_buff *buff);
-struct sdpc_buff *sdp_buff_q_fetch_head(struct sdpc_buff_q *pool,
- int (*test)(struct sdpc_buff *buff,
- void *arg),
- void *usr_arg);
-
int sdp_buff_q_trav_head(struct sdpc_buff_q *pool,
int (*trav_func)(struct sdpc_buff *buff,
void *arg),
@@ -133,7 +126,7 @@
struct sdpc_advt *sdp_advt_create(void);
-int sdp_advt_destroy(struct sdpc_advt *advt);
+void sdp_advt_destroy(struct sdpc_advt *advt);
struct sdpc_advt *sdp_advt_q_get(struct sdpc_advt_q *table);
@@ -154,7 +147,7 @@
struct sdpc_iocb *sdp_iocb_create(void);
-int sdp_iocb_destroy(struct sdpc_iocb *iocb);
+void sdp_iocb_destroy(struct sdpc_iocb *iocb);
struct sdpc_iocb *sdp_iocb_q_look(struct sdpc_iocb_q *table);
@@ -162,9 +155,9 @@
struct sdpc_iocb *sdp_iocb_q_get_tail(struct sdpc_iocb_q *table);
-int sdp_iocb_q_put_head(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb);
+void sdp_iocb_q_put_head(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb);
-int sdp_iocb_q_put_tail(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb);
+void sdp_iocb_q_put_tail(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb);
struct sdpc_iocb *sdp_iocb_q_lookup(struct sdpc_iocb_q *table, u32 key);
@@ -176,7 +169,7 @@
void sdp_iocb_release(struct sdpc_iocb *iocb);
-int sdp_iocb_complete(struct sdpc_iocb *iocb, ssize_t status);
+void sdp_iocb_complete(struct sdpc_iocb *iocb, ssize_t status);
int sdp_iocb_lock(struct sdpc_iocb *iocb);
@@ -191,11 +184,11 @@
struct sdpc_desc *sdp_desc_q_get_tail(struct sdpc_desc_q *table);
-int sdp_desc_q_put_head(struct sdpc_desc_q *table,
- struct sdpc_desc *element);
+void sdp_desc_q_put_head(struct sdpc_desc_q *table,
+ struct sdpc_desc *element);
-int sdp_desc_q_put_tail(struct sdpc_desc_q *table,
- struct sdpc_desc *element);
+void sdp_desc_q_put_tail(struct sdpc_desc_q *table,
+ struct sdpc_desc *element);
struct sdpc_desc *sdp_desc_q_look_head(struct sdpc_desc_q *table);
@@ -573,7 +566,7 @@
/*
* sdp_conn_stat_dump - dump stats to the log
*/
-static inline int sdp_conn_stat_dump(struct sdp_opt *conn)
+static inline void sdp_conn_stat_dump(struct sdp_opt *conn)
{
#ifdef _SDP_CONN_STATS_REC
int counter;
@@ -590,7 +583,6 @@
conn->recv_mid[counter]);
}
#endif
- return 0;
}
/*
Index: infiniband/ulp/sdp/sdp_read.c
===================================================================
--- infiniband/ulp/sdp/sdp_read.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_read.c (working copy)
@@ -62,15 +62,13 @@
}
advt = sdp_advt_q_get(&conn->src_actv);
- SDP_EXPECT((advt));
conn->src_recv--;
result = sdp_send_ctrl_rdma_rd(conn, advt->post);
SDP_EXPECT(result >= 0);
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
+ sdp_advt_destroy(advt);
/*
* If a SrcAvailCancel was received, and all RDMA reads
* have been flushed, perform tail processing
@@ -161,15 +159,14 @@
type = sdp_desc_q_type_head(&conn->r_src);
switch (type) {
case SDP_DESC_TYPE_BUFF:
- buff = (struct sdpc_buff *) sdp_desc_q_get_head(&conn->r_src);
- SDP_EXPECT((buff));
+ buff = (struct sdpc_buff *)sdp_desc_q_get_head(&conn->r_src);
if (comp->wr_id != buff->wrid) {
sdp_dbg_warn(conn, "work request mismatch <%llu:%llu>",
(unsigned long long)comp->wr_id,
(unsigned long long)buff->wrid);
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
result = -EPROTO;
goto done;
}
@@ -191,8 +188,7 @@
sdp_dbg_warn(conn, "Error <%d> receiving buff",
result);
- result = sdp_buff_pool_put(buff);
- SDP_EXPECT(result >= 0);
+ sdp_buff_pool_put(buff);
}
break;
@@ -202,19 +198,13 @@
break;
iocb = (struct sdpc_iocb *)sdp_desc_q_get_head(&conn->r_src);
- SDP_EXPECT((iocb));
iocb->flags &= ~(SDP_IOCB_F_ACTIVE | SDP_IOCB_F_RDMA_R);
SDP_CONN_STAT_READ_INC(conn, iocb->post);
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
- goto done;
- }
+ sdp_iocb_complete(iocb, 0);
break;
case SDP_DESC_TYPE_NONE:
@@ -232,18 +222,10 @@
if (conn->sk->sk_rcvlowat > iocb->post)
break;
- iocb = sdp_iocb_q_get_head(&conn->r_pend);
- SDP_EXPECT((iocb));
-
SDP_CONN_STAT_READ_INC(conn, iocb->post);
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
- goto done;
- }
+ sdp_iocb_complete(sdp_iocb_q_get_head(&conn->r_pend), 0);
break;
default:
Index: infiniband/ulp/sdp/sdp_send.c
===================================================================
--- infiniband/ulp/sdp/sdp_send.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_send.c (working copy)
@@ -81,7 +81,8 @@
* the flag. This allows for at least one pending urgent message
* to send early notification.
*/
- if ((conn->flags & SDP_CONN_F_OOB_SEND) && conn->oob_offset <= 0xFFFF) {
+ if ((conn->flags & SDP_CONN_F_OOB_SEND) &&
+ conn->oob_offset <= 0xFFFF) {
SDP_BSDH_SET_OOB_PEND(buff->bsdh_hdr);
SDP_BUFF_F_SET_SE(buff);
@@ -123,16 +124,12 @@
/*
* check queue membership. (first send attempt vs. flush)
*/
- if (sdp_desc_q_member((struct sdpc_desc *) buff) > 0)
+ if (sdp_desc_q_member((struct sdpc_desc *) buff))
sdp_desc_q_remove((struct sdpc_desc *) buff);
/*
* save the buffer for the event handler.
*/
- result = sdp_buff_q_put_tail(&conn->send_post, buff);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queueing send buffer", result);
- goto done;
- }
+ sdp_buff_q_put_tail(&conn->send_post, buff);
/*
* post send
*/
@@ -182,9 +179,9 @@
/*
* sdp_send_data_buff_post - Post data for buffered transmission
*/
-static int sdp_send_data_buff_post(struct sdp_opt *conn, struct sdpc_buff *buff)
+static int sdp_send_data_buff_post(struct sdp_opt *conn,
+ struct sdpc_buff *buff)
{
- struct sdpc_advt *advt;
int result;
/*
@@ -253,7 +250,7 @@
* update non-discard counter.
* Make consideration for a pending sink. (can be forced by OOB)
*/
- if (sdp_advt_q_size(&conn->snk_pend) > 0) {
+ if (sdp_advt_q_size(&conn->snk_pend)) {
/*
* As sink advertisment needs to be discarded. We always
* complete an advertisment if there is not enough room
@@ -263,11 +260,7 @@
* (remember the spec makes sure that the sink is bigger
* then the buffer.)
*/
- advt = sdp_advt_q_get(&conn->snk_pend);
- SDP_EXPECT((advt));
-
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
+ sdp_advt_destroy(sdp_advt_q_get(&conn->snk_pend));
/*
* update sink advertisments.
*/
@@ -281,12 +274,10 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting send data buffer",
result);
- goto error;
+ return result;
}
return 0;
-error:
- return result;
}
/*
@@ -298,7 +289,6 @@
struct ib_send_wr *bad_wr;
struct sdpc_advt *advt;
int result;
- int zcopy;
/*
* check state to determine OK to send:
@@ -367,12 +357,7 @@
if (sdp_desc_q_member((struct sdpc_desc *) buff) > 0)
sdp_desc_q_remove((struct sdpc_desc *) buff);
- result = sdp_desc_q_put_tail(&conn->w_snk, (struct sdpc_desc *)buff);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queueing write buffer. <%d>",
- result, sdp_desc_q_size(&conn->w_snk));
- goto error;
- }
+ sdp_desc_q_put_tail(&conn->w_snk, (struct sdpc_desc *)buff);
/*
* update send queue depth
*/
@@ -402,24 +387,18 @@
* advertisment.
*/
if (conn->send_size > advt->size) {
- advt = sdp_advt_q_get(&conn->snk_pend);
- SDP_EXPECT((advt));
- zcopy = advt->post;
-
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
-
- result = sdp_send_ctrl_rdma_wr(conn, zcopy);
+ result = sdp_send_ctrl_rdma_wr(conn, advt->post);
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing sink. <%d>",
- result, zcopy);
+ result, advt->post);
result = -ENODEV;
goto error;
}
/*
* update sink advertisments.
*/
+ sdp_advt_destroy(sdp_advt_q_get(&conn->snk_pend));
conn->snk_recv--;
}
@@ -523,19 +502,7 @@
if (conn->send_size <= advt->size)
continue;
- advt = sdp_advt_q_get(&conn->snk_pend);
- if (!advt) {
- sdp_dbg_warn(conn, "sink advertisment disappeared.");
- result = -ENODEV;
- goto error;
- }
-
- zcopy = advt->post;
-
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
-
- result = sdp_send_ctrl_rdma_wr(conn, zcopy);
+ result = sdp_send_ctrl_rdma_wr(conn, advt->post);
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing sink. <%d>",
result, zcopy);
@@ -545,6 +512,7 @@
/*
* update sink advertisments.
*/
+ sdp_advt_destroy(sdp_advt_q_get(&conn->snk_pend));
conn->snk_recv--;
}
@@ -715,7 +683,7 @@
error:
iocb->flags &= ~(SDP_IOCB_F_RDMA_R | SDP_IOCB_F_ACTIVE);
drop:
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
return result;
}
@@ -832,7 +800,7 @@
return iocb->len;
drop:
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
error:
return result;
}
@@ -865,13 +833,8 @@
if (sdp_desc_q_member((struct sdpc_desc *)iocb) > 0)
sdp_desc_q_remove((struct sdpc_desc *)iocb);
- result = sdp_desc_q_put_tail(&conn->w_snk,
- (struct sdpc_desc *)iocb);
- if (result < 0) {
- sdp_dbg_warn(conn,
- "Error <%d> queuing write IOCB.",
- result);
- }
+ sdp_desc_q_put_tail(&conn->w_snk,
+ (struct sdpc_desc *)iocb);
}
goto done;
@@ -899,11 +862,7 @@
SDP_CONN_STAT_WRITE_INC(conn, iocb->post);
SDP_CONN_STAT_WQ_DEC(conn, iocb->size);
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0)
- sdp_dbg_warn(conn,
- "Error <%d> completing iocb <%d>",
- result, iocb->key);
+ sdp_iocb_complete(iocb, 0);
}
goto done;
@@ -917,11 +876,7 @@
if (sdp_desc_q_member((struct sdpc_desc *) iocb) > 0)
sdp_desc_q_remove((struct sdpc_desc *)iocb);
- result = sdp_iocb_q_put_tail(&conn->w_src, iocb);
- if (result < 0)
- sdp_dbg_warn(conn, "Error <%d> queueing write <%d:%d>",
- result, iocb->key,
- sdp_iocb_q_size(&conn->w_src));
+ sdp_iocb_q_put_tail(&conn->w_src, iocb);
}
done:
@@ -972,10 +927,9 @@
* non-zero result is generated.
* (positive: no space; negative: error)
*/
- while (!result && sdp_desc_q_size(&conn->send_queue) > 0) {
- element = sdp_desc_q_look_head(&conn->send_queue);
- SDP_EXPECT((element));
-
+ while (!result &&
+ (element = sdp_desc_q_look_head(&conn->send_queue))) {
+
result = sdp_send_data_queue_test(conn, element);
if (!result)
continue;
@@ -991,11 +945,9 @@
* since called functions can dequeue the
* element, and not know how to requeue it.
*/
- if (!sdp_desc_q_member(element)) {
- result = sdp_desc_q_put_head(&conn->send_queue,
- element);
- SDP_EXPECT(result >= 0);
- }
+ if (!sdp_desc_q_member(element))
+ sdp_desc_q_put_head(&conn->send_queue,
+ element);
}
}
@@ -1018,12 +970,8 @@
*/
if (sdp_desc_q_size(&conn->send_queue) > 0 ||
(result = sdp_send_data_queue_test(conn, element)) > 0) {
- result = sdp_desc_q_put_tail(&conn->send_queue, element);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queueing data for send",
- result);
- goto done;
- }
+
+ sdp_desc_q_put_tail(&conn->send_queue, element);
/*
* Potentially request a switch to pipelined mode.
*/
@@ -1084,7 +1032,6 @@
int urg)
{
int result = 0;
- int expect;
/*
* See note on send OOB implementation in SendBuffPost.
@@ -1121,8 +1068,7 @@
sdp_dbg_warn(conn, "Error <%d> buffer to SEND queue.",
result);
- expect = sdp_buff_pool_put(buff);
- SDP_EXPECT(expect >= 0);
+ sdp_buff_pool_put(buff);
}
}
@@ -1171,9 +1117,8 @@
* As long as there are buffers, try to post until a non-zero
* result is generated. (positive: no space; negative: error)
*/
- while (!result && sdp_desc_q_size(&conn->send_ctrl) > 0) {
- element = sdp_desc_q_look_head(&conn->send_ctrl);
- SDP_EXPECT((element));
+ while (!result &&
+ (element = sdp_desc_q_look_head(&conn->send_ctrl))) {
result = sdp_send_ctrl_buff_test(conn,
(struct sdpc_buff *)element);
@@ -1185,11 +1130,8 @@
"Error <%d> failed to flush control msg",
result);
- if (!sdp_desc_q_member(element)) {
- result = sdp_desc_q_put_head(&conn->send_ctrl,
- element);
- SDP_EXPECT(result >= 0);
- }
+ if (!sdp_desc_q_member(element))
+ sdp_desc_q_put_head(&conn->send_ctrl, element);
}
}
@@ -1208,18 +1150,12 @@
* Either post a send, or buffer the packet in the tx queue
*/
if (sdp_desc_q_size(&conn->send_ctrl) > 0 ||
- (result = sdp_send_ctrl_buff_test(conn, buff)) > 0) {
+ (result = sdp_send_ctrl_buff_test(conn, buff)) > 0)
/*
* save the buffer for later flushing into the post queue.
*/
- result = sdp_desc_q_put_tail(&conn->send_ctrl,
- (struct sdpc_desc *)buff);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queueing control buff",
- result);
- goto error;
- }
- }
+ sdp_desc_q_put_tail(&conn->send_ctrl,
+ (struct sdpc_desc *)buff);
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> during control send posting",
@@ -1278,7 +1214,7 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
}
return result;
@@ -1319,7 +1255,7 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
goto error;
}
@@ -1462,7 +1398,7 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
}
error:
@@ -1531,7 +1467,7 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
}
error:
@@ -1588,7 +1524,7 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
}
error:
@@ -1736,7 +1672,7 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
}
error:
@@ -1804,16 +1740,13 @@
sdp_desc_q_remove((struct sdpc_desc *)iocb);
if (iocb->flags & SDP_IOCB_F_ACTIVE) {
- if (iocb->flags & SDP_IOCB_F_RDMA_W) {
- result = sdp_desc_q_put_tail(&conn->w_snk,
- (struct sdpc_desc *)iocb);
- SDP_EXPECT(result >= 0);
- } else {
+ if (iocb->flags & SDP_IOCB_F_RDMA_W)
+ sdp_desc_q_put_tail(&conn->w_snk,
+ (struct sdpc_desc *)iocb);
+ else {
SDP_EXPECT((iocb->flags & SDP_IOCB_F_RDMA_R));
- result = sdp_iocb_q_put_tail(&conn->w_src,
- iocb);
- SDP_EXPECT(result >= 0);
+ sdp_iocb_q_put_tail(&conn->w_src, iocb);
}
} else {
/*
@@ -1821,13 +1754,10 @@
* needs to be compelted.
*/
if (iocb->post > 0) {
- result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(result >= 0);
-
+ sdp_iocb_complete(iocb, 0);
result = -EAGAIN;
} else {
- result = sdp_iocb_destroy(iocb);
- SDP_EXPECT(result >= 0);
+ sdp_iocb_destroy(iocb);
/*
* completion reference
*/
@@ -1924,14 +1854,11 @@
*/
advt = sdp_advt_q_look(&conn->snk_pend);
if (advt && advt->post > 0) {
- advt = sdp_advt_q_get(&conn->snk_pend);
- SDP_EXPECT((advt));
result = sdp_send_ctrl_rdma_wr(conn, advt->post);
SDP_EXPECT(result >= 0);
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
+ sdp_advt_destroy(sdp_advt_q_get(&conn->snk_pend));
/*
* update sink advertisments.
*/
@@ -2080,7 +2007,7 @@
msg->msg_iov,
copy);
if (result < 0) {
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
goto done;
}
#endif
@@ -2196,7 +2123,7 @@
sdp_dbg_warn(conn, "Error <%d> locking IOCB <%Zu:%d>",
result, size, copied);
- (void)sdp_iocb_destroy(iocb);
+ sdp_iocb_destroy(iocb);
break;
}
@@ -2209,7 +2136,7 @@
sdp_dbg_warn(conn, "Error <%d> queueing write IOCB",
result);
- (void)sdp_iocb_destroy(iocb);
+ sdp_iocb_destroy(iocb);
break;
}
Index: infiniband/ulp/sdp/sdp_actv.c
===================================================================
--- infiniband/ulp/sdp/sdp_actv.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_actv.c (working copy)
@@ -309,7 +309,7 @@
/*
* Pop the hello message that was sent
*/
- (void)sdp_buff_pool_put(sdp_buff_q_get_head(&conn->send_post));
+ sdp_buff_pool_put(sdp_buff_q_get_head(&conn->send_post));
result = sdp_cm_actv_establish(conn);
if (result) {
@@ -343,7 +343,6 @@
struct sdp_opt *conn = (struct sdp_opt *) arg;
struct sdpc_buff *buff;
int result = 0;
- int expect;
/*
* lock the socket
*/
@@ -441,16 +440,7 @@
/*
* save message
*/
- result = sdp_buff_q_put(&conn->send_post, buff);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> buffering hello msg.", result);
-
- expect = sdp_buff_pool_put(buff);
- SDP_EXPECT(expect >= 0);
-
- status = -EPROTO;
- goto failed;
- }
+ sdp_buff_q_put_tail(&conn->send_post, buff);
#if 1
/*
* Mellanox performance bug workaround.
Index: infiniband/ulp/sdp/sdp_conn.c
===================================================================
--- infiniband/ulp/sdp/sdp_conn.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_conn.c (working copy)
@@ -758,8 +758,7 @@
result, dev_root_s.sk_entry,
dev_root_s.sk_size);
- result = sdp_conn_stat_dump(conn);
- SDP_EXPECT(result >= 0);
+ sdp_conn_stat_dump(conn);
/*
* really there shouldn't be anything in these tables, but it's
* really bad if we leave a dangling reference here.
Index: infiniband/ulp/sdp/sdp_advt.c
===================================================================
--- infiniband/ulp/sdp/sdp_advt.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_advt.c (working copy)
@@ -53,12 +53,7 @@
advt = kmem_cache_alloc(sdp_advt_cache, SLAB_ATOMIC);
if (advt) {
- advt->next = NULL;
- advt->prev = NULL;
- advt->size = 0;
- advt->post = 0;
- advt->addr = 0;
- advt->rkey = 0;
+ memset(advt, 0, sizeof(*advt));
advt->type = SDP_DESC_TYPE_ADVT;
advt->release = sdp_advt_destroy;
@@ -70,16 +65,13 @@
/*
* sdp_advt_destroy - destroy an advertisment object
*/
-int sdp_advt_destroy(struct sdpc_advt *advt)
+void sdp_advt_destroy(struct sdpc_advt *advt)
{
- if (advt->next || advt->prev)
- return -EACCES;
+ BUG_ON(advt->next || advt->prev);
/*
* return the object to its cache
*/
kmem_cache_free(sdp_advt_cache, advt);
-
- return 0;
}
/*
@@ -130,6 +122,8 @@
struct sdpc_advt *next;
struct sdpc_advt *prev;
+ BUG_ON(advt->table);
+
if (!table->head) {
advt->next = advt;
advt->prev = advt;
@@ -163,15 +157,11 @@
void sdp_advt_q_clear(struct sdpc_advt_q *table)
{
struct sdpc_advt *advt;
- int result;
-
/*
* drain the table of any objects
*/
- while ((advt = sdp_advt_q_get(table))) {
- result = sdp_advt_destroy(advt);
- SDP_EXPECT(result >= 0);
- }
+ while ((advt = sdp_advt_q_get(table)))
+ sdp_advt_destroy(advt);
}
/*
Index: infiniband/ulp/sdp/sdp_recv.c
===================================================================
--- infiniband/ulp/sdp/sdp_recv.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_recv.c (working copy)
@@ -74,11 +74,7 @@
* actually posting the thing. Completion event can happen before
* post function returns.
*/
- result = sdp_buff_q_put_tail(&conn->recv_post, buff);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queuing recv buffer.", result);
- goto drop;
- }
+ sdp_buff_q_put_tail(&conn->recv_post, buff);
sdp_dbg_data(conn, "POST RECV BUFF wrid <%llu> of <%u> bytes.",
(unsigned long long) buff->wrid,
@@ -106,7 +102,7 @@
return 0;
drop:
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
conn->l_recv_bf--;
error:
return result;
@@ -180,11 +176,7 @@
* actually posting the thing. Completion event can happen before
* post function returns.
*/
- result = sdp_desc_q_put_tail(&conn->r_src, (struct sdpc_desc *) buff);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queuing rdma read.", result);
- goto drop;
- }
+ sdp_desc_q_put_tail(&conn->r_src, (struct sdpc_desc *) buff);
sdp_dbg_data(conn, "POST READ BUFF wrid <%llu> of <%u> bytes.",
(unsigned long long) buff->wrid,
@@ -216,7 +208,7 @@
return 0;
drop:
- (void)sdp_buff_pool_put(buff);
+ sdp_buff_pool_put(buff);
error:
done:
return result;
@@ -306,25 +298,11 @@
/*
* if there is no more iocb space queue the it for completion
*/
- if (!iocb->len) {
- iocb = sdp_iocb_q_get_head(&conn->r_pend);
- if (!iocb) {
- sdp_dbg_warn(conn, "read IOCB disappeared. <%d>",
- sdp_iocb_q_size(&conn->r_pend));
- result = -ENODEV;
- goto error;
- }
+ if (!iocb->len)
+ sdp_desc_q_put_tail(&conn->r_src,
+ (struct sdpc_desc *)
+ sdp_iocb_q_get_head(&conn->r_pend));
- result = sdp_desc_q_put_tail(&conn->r_src,
- (struct sdpc_desc *)iocb);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queuing read IOCB",
- result);
- (void)sdp_iocb_destroy(iocb);
- goto error;
- }
- }
-
sdp_dbg_data(conn, "POST READ IOCB wrid <%llu> bytes <%u:%d:%d>.",
(unsigned long long) iocb->wrid, zcopy,
iocb->len, advt->size);
@@ -410,19 +388,7 @@
/*
* queue IOCB
*/
- iocb = sdp_iocb_q_get_head(&conn->r_pend);
- if (result < 0) {
- sdp_dbg_warn(conn, "read IOCB missing from pending table <%d>",
- sdp_iocb_q_size(&conn->r_pend));
- goto release;
- }
-
- result = sdp_iocb_q_put_tail(&conn->r_snk, iocb);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> queueing active write IOCB",
- result);
- goto re_q;
- }
+ sdp_iocb_q_put_tail(&conn->r_snk, sdp_iocb_q_get_head(&conn->r_pend));
/*
* Either post a send, or buffer the packet in the tx queue
*/
@@ -433,21 +399,19 @@
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> sending SnkAvail message",
result);
- goto de_q;
+
+ sdp_iocb_q_put_head(&conn->r_pend,
+ sdp_iocb_q_get_tail(&conn->r_snk));
+
+ iocb->flags &= ~SDP_IOCB_F_ACTIVE;
+ iocb->flags &= ~SDP_IOCB_F_RDMA_W;
+
+ goto error;
}
conn->snk_sent++;
return 0;
-de_q:
- iocb = sdp_iocb_q_get_tail(&conn->r_snk);
-re_q:
- (void)sdp_iocb_q_put_head(&conn->r_pend, iocb);
-release:
- iocb->flags &= ~SDP_IOCB_F_ACTIVE;
- iocb->flags &= ~SDP_IOCB_F_RDMA_W;
-
- sdp_iocb_release(iocb);
error:
return result;
}
@@ -727,7 +691,7 @@
result, iocb->len,
(unsigned)(buff->tail - buff->data));
- (void)sdp_iocb_q_put_head(&conn->r_snk, iocb);
+ sdp_iocb_q_put_head(&conn->r_snk, iocb);
return result;
}
@@ -739,10 +703,7 @@
/*
* callback to complete IOCB
*/
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0)
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
+ sdp_iocb_complete(iocb, 0);
return (buff->tail - buff->data);
}
@@ -790,18 +751,12 @@
/*
* complete IOCB
*/
- iocb = sdp_iocb_q_get_head(&conn->r_pend);
- SDP_EXPECT((iocb));
-
SDP_CONN_STAT_READ_INC(conn, iocb->post);
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
/*
* callback to complete IOCB
*/
- result = sdp_iocb_complete(iocb, 0);
- if (result < 0)
- sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
- result, iocb->key);
+ sdp_iocb_complete(sdp_iocb_q_get_head(&conn->r_pend), 0);
}
return (buff->tail - buff->data);
@@ -888,10 +843,8 @@
*/
buffered = buff->tail - buff->data;
- if (buffered > 0) {
- result = sdp_buff_q_put_tail(&conn->recv_pool, buff);
- SDP_EXPECT(result >= 0);
- }
+ if (buffered)
+ sdp_buff_q_put_tail(&conn->recv_pool, buff);
return buffered;
done:
@@ -962,14 +915,11 @@
/*
* callback to complete IOCB, or drop reference
*/
- result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(result >= 0);
-
+ sdp_iocb_complete(iocb, 0);
result = -EAGAIN;
}
else {
- result = sdp_iocb_destroy(iocb);
- SDP_EXPECT(result >= 0);
+ sdp_iocb_destroy(iocb);
/*
* completion reference
*/
@@ -981,16 +931,13 @@
goto unlock;
}
- if (iocb->flags & SDP_IOCB_F_RDMA_W) {
- result = sdp_iocb_q_put_tail(&conn->r_snk, iocb);
- SDP_EXPECT(result >= 0);
- }
+ if (iocb->flags & SDP_IOCB_F_RDMA_W)
+ sdp_iocb_q_put_tail(&conn->r_snk, iocb);
else {
SDP_EXPECT((iocb->flags & SDP_IOCB_F_RDMA_R));
- result = sdp_desc_q_put_tail(&conn->r_src,
- (struct sdpc_desc *)iocb);
- SDP_EXPECT(result >= 0);
+ sdp_desc_q_put_tail(&conn->r_src,
+ (struct sdpc_desc *)iocb);
}
}
/*
@@ -1135,8 +1082,7 @@
sdp_inet_recv_urg_test,
(void *)0);
if (buff) {
- result = sdp_buff_pool_put(buff);
- SDP_EXPECT(result >= 0);
+ sdp_buff_pool_put(buff);
result = sdp_recv_flush(conn);
SDP_EXPECT(result >= 0);
@@ -1279,12 +1225,8 @@
buff->data,
copy);
if (result < 0) {
- expect =
- sdp_buff_q_put_head(&conn->
- recv_pool,
- buff);
- SDP_EXPECT(expect >= 0);
-
+ sdp_buff_q_put_head(&conn->recv_pool,
+ buff);
goto done;
}
#endif
@@ -1298,9 +1240,7 @@
copied += copy;
if ((buff->tail - buff->data) > 0) {
- expect = sdp_buff_q_put_head(&conn->recv_pool,
- buff);
- SDP_EXPECT(expect >= 0);
+ sdp_buff_q_put_head(&conn->recv_pool, buff);
/*
* always break, PEEK and OOB together could
* throw us into a loop without a forced
@@ -1312,11 +1252,9 @@
break;
}
- if (flags & MSG_PEEK) {
- expect = sdp_buff_q_put_head(&peek_queue,
- buff);
- SDP_EXPECT(expect >= 0);
- } else {
+ if (flags & MSG_PEEK)
+ sdp_buff_q_put_head(&peek_queue, buff);
+ else {
if (buff->flags & SDP_BUFF_F_OOB_PRES)
conn->rcv_urg_cnt -= 1;
/*
@@ -1459,21 +1397,13 @@
"Error <%d> IOCB lock <%Zu:%d>",
result, size, copied);
- (void)sdp_iocb_destroy(iocb);
+ sdp_iocb_destroy(iocb);
break;
}
SDP_CONN_STAT_RQ_INC(conn, iocb->size);
- result = sdp_iocb_q_put_tail(&conn->r_pend, iocb);
- if (result < 0) {
- sdp_dbg_warn(conn,
- "Error <%d> IOCB queue <%Zu:%d>",
- result, size, copied);
-
- (void)sdp_iocb_destroy(iocb);
- break;
- }
+ sdp_iocb_q_put_tail(&conn->r_pend, iocb);
ack = 1;
copied = 0; /* copied amount was saved in IOCB. */
@@ -1494,16 +1424,13 @@
expect);
}
- (void)sdp_buff_pool_chain_put(head, free_count);
+ sdp_buff_pool_chain_put(head, free_count);
/*
* return any peeked buffers to the recv queue, in the correct order.
*/
- if (flags & MSG_PEEK) {
- while ((buff = sdp_buff_q_get_tail(&peek_queue))) {
- expect = sdp_buff_q_put_head(&conn->recv_pool, buff);
- SDP_EXPECT(expect >= 0);
- }
- }
+ if (flags & MSG_PEEK)
+ while ((buff = sdp_buff_q_get_tail(&peek_queue)))
+ sdp_buff_q_put_head(&conn->recv_pool, buff);
sdp_conn_unlock(conn);
return ((copied > 0) ? copied : result);
Index: infiniband/ulp/sdp/sdp_advt.h
===================================================================
--- infiniband/ulp/sdp/sdp_advt.h (revision 2588)
+++ infiniband/ulp/sdp/sdp_advt.h (working copy)
@@ -50,7 +50,7 @@
struct sdpc_advt *prev; /* previous structure in table */
u32 type; /* element type. (for generic queue) */
struct sdpc_advt_q *table; /* table to which this object belongs */
- int (*release)(struct sdpc_advt *advt); /* release the object */
+ void (*release)(struct sdpc_advt *advt); /* release the object */
/*
* advertisment specific
*/
Index: infiniband/ulp/sdp/sdp_pass.c
===================================================================
--- infiniband/ulp/sdp/sdp_pass.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_pass.c (working copy)
@@ -49,7 +49,7 @@
/*
* free hello ack message
*/
- (void)sdp_buff_pool_put(sdp_buff_q_get_head(&conn->send_post));
+ sdp_buff_pool_put(sdp_buff_q_get_head(&conn->send_post));
qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr) {
@@ -108,7 +108,6 @@
struct ib_qp_attr *qp_attr;
int qp_mask = 0;
int result;
- int expect;
/*
* Accept connection, build listen response headers and send
* a REP message to remote peer.
@@ -159,16 +158,7 @@
/*
* save message
*/
- result = sdp_buff_q_put(&conn->send_post, buff);
- if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> buffering hello ack packet.",
- result);
-
- expect = sdp_buff_pool_put(buff);
- SDP_EXPECT(expect >= 0);
-
- goto error;
- }
+ sdp_buff_q_put_tail(&conn->send_post, buff);
/*
* modify QP. INIT->RTR
*/
Index: infiniband/ulp/sdp/sdp_sent.c
===================================================================
--- infiniband/ulp/sdp/sdp_sent.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_sent.c (working copy)
@@ -420,8 +420,7 @@
break;
}
- result = sdp_buff_pool_chain_put(head, free_count);
- SDP_EXPECT(result >= 0);
+ sdp_buff_pool_chain_put(head, free_count);
if (free_count <= 0 || conn->send_usig < 0) {
sdp_dbg_warn(conn,
@@ -443,8 +442,8 @@
return 0;
drop:
- (void)sdp_buff_pool_put(buff);
- (void)sdp_buff_pool_chain_put(head, free_count);
+ sdp_buff_pool_put(buff);
+ sdp_buff_pool_chain_put(head, free_count);
done:
return result;
}
Index: infiniband/ulp/sdp/sdp_iocb.c
===================================================================
--- infiniband/ulp/sdp/sdp_iocb.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_iocb.c (working copy)
@@ -485,7 +485,6 @@
static void do_iocb_complete(void *arg)
{
struct sdpc_iocb *iocb = (struct sdpc_iocb *)arg;
- int result;
long value;
/*
* release memory
@@ -511,16 +510,13 @@
/*
* delete IOCB
*/
- result = sdp_iocb_destroy(iocb);
- if (result < 0)
- sdp_dbg_err("Error <%d> deleting IOCB <%d> of status <%Zu>",
- result, iocb->key, iocb->status);
+ sdp_iocb_destroy(iocb);
}
/*
* sdp_iocb_complete - complete an IOCB
*/
-int sdp_iocb_complete(struct sdpc_iocb *iocb, ssize_t status)
+void sdp_iocb_complete(struct sdpc_iocb *iocb, ssize_t status)
{
iocb->status = status;
@@ -529,8 +525,6 @@
schedule_work(&iocb->completion);
} else
do_iocb_complete(iocb);
-
- return 0;
}
/*
@@ -607,13 +601,12 @@
/*
* sdp_iocb_destroy - destroy an IOCB object
*/
-int sdp_iocb_destroy(struct sdpc_iocb *iocb)
+void sdp_iocb_destroy(struct sdpc_iocb *iocb)
{
if (!iocb)
- return -EINVAL;
+ return;
- if (iocb->next || iocb->prev)
- return -EACCES;
+ BUG_ON(iocb->next || iocb->prev);
/*
* release iocb registered memory
*/
@@ -640,8 +633,6 @@
* return the object to its cache
*/
kmem_cache_free(sdp_iocb_cache, iocb);
-
- return 0;
}
/*
@@ -692,12 +683,15 @@
/*
* sdp_iocb_q_put - put the IOCB object at the tables tail
*/
-static int sdp_iocb_q_put(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb,
- int head)
+static void sdp_iocb_q_put(struct sdpc_iocb_q *table,
+ struct sdpc_iocb *iocb,
+ int head)
{
struct sdpc_iocb *next;
struct sdpc_iocb *prev;
+ BUG_ON(iocb->table);
+
if (!table->head) {
iocb->next = iocb;
iocb->prev = iocb;
@@ -718,8 +712,6 @@
table->size++;
iocb->table = table;
-
- return 0;
}
/*
@@ -741,17 +733,17 @@
/*
* sdp_iocb_q_put_tail - put the IOCB object at the tables tail
*/
-int sdp_iocb_q_put_tail(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb)
+void sdp_iocb_q_put_tail(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb)
{
- return sdp_iocb_q_put(table, iocb, 0);
+ sdp_iocb_q_put(table, iocb, 0);
}
/*
* sdp_iocb_q_put_head - put the IOCB object at the tables head
*/
-int sdp_iocb_q_put_head(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb)
+void sdp_iocb_q_put_head(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb)
{
- return sdp_iocb_q_put(table, iocb, 1);
+ sdp_iocb_q_put(table, iocb, 1);
}
/*
@@ -762,7 +754,6 @@
struct sdpc_iocb *iocb;
struct sdpc_iocb *next;
int counter;
- int result;
int total;
/*
@@ -780,9 +771,7 @@
iocb->post, iocb->len);
sdp_iocb_q_remove(iocb);
-
- result = sdp_iocb_complete(iocb, comp);
- SDP_EXPECT(result >= 0);
+ sdp_iocb_complete(iocb, comp);
}
iocb = next;
@@ -804,15 +793,11 @@
void sdp_iocb_q_clear(struct sdpc_iocb_q *table)
{
struct sdpc_iocb *iocb;
- int result;
-
/*
* drain the table of any objects
*/
- while ((iocb = sdp_iocb_q_get_head(table))) {
- result = sdp_iocb_destroy(iocb);
- SDP_EXPECT(result >= 0);
- }
+ while ((iocb = sdp_iocb_q_get_head(table)))
+ sdp_iocb_destroy(iocb);
}
/*
Index: infiniband/ulp/sdp/sdp_iocb.h
===================================================================
--- infiniband/ulp/sdp/sdp_iocb.h (revision 2588)
+++ infiniband/ulp/sdp/sdp_iocb.h (working copy)
@@ -76,7 +76,7 @@
struct sdpc_iocb *prev; /* previous structure in table */
u32 type; /* element type. (for generic queue) */
struct sdpc_iocb_q *table; /* table to which this iocb belongs */
- int (*release)(struct sdpc_iocb *iocb); /* release the object */
+ void (*release)(struct sdpc_iocb *iocb); /* release the object */
/*
* iocb sepcific
*/
Index: infiniband/ulp/sdp/sdp_buff.c
===================================================================
--- infiniband/ulp/sdp/sdp_buff.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_buff.c (working copy)
@@ -84,12 +84,11 @@
/*
* do_buff_q_put - Place a buffer into a specific pool
*/
-static inline int do_buff_q_put(struct sdpc_buff_q *pool,
- struct sdpc_buff *buff, int fifo)
+static inline void do_buff_q_put(struct sdpc_buff_q *pool,
+ struct sdpc_buff *buff, int fifo)
{
/* fifo: false == tail, true == head */
- if (buff->pool)
- return -EINVAL;
+ BUG_ON(buff->pool);
if (!pool->head) {
buff->next = buff;
@@ -108,8 +107,6 @@
pool->size++;
buff->pool = pool;
-
- return 0;
}
/*
@@ -127,14 +124,13 @@
/*
* do_buff_q_remove - remove a specific buffer from a specific pool
*/
-static inline int do_buff_q_remove(struct sdpc_buff_q *pool,
- struct sdpc_buff *buff)
+static inline void do_buff_q_remove(struct sdpc_buff_q *pool,
+ struct sdpc_buff *buff)
{
struct sdpc_buff *prev;
struct sdpc_buff *next;
- if (pool != buff->pool)
- return -EINVAL;
+ BUG_ON(pool != buff->pool);
if (buff->next == buff && buff->prev == buff)
pool->head = NULL;
@@ -153,8 +149,6 @@
buff->pool = NULL;
buff->next = NULL;
buff->prev = NULL;
-
- return 0;
}
/*
@@ -167,17 +161,6 @@
}
/*
- * sdp_buff_q_remove - remove a specific buffer from a specific pool
- */
-int sdp_buff_q_remove(struct sdpc_buff *buff)
-{
- struct sdpc_buff_q *pool;
-
- pool = buff->pool;
- return do_buff_q_remove(pool, buff);
-}
-
-/*
* sdp_buff_q_get - Get a buffer from a specific pool
*/
struct sdpc_buff *sdp_buff_q_get(struct sdpc_buff_q *pool)
@@ -210,17 +193,6 @@
}
/*
- * sdp_buff_q_fetch_head - Get the pools first buffer, if the test passes
- */
-struct sdpc_buff *sdp_buff_q_fetch_head(struct sdpc_buff_q *pool,
- int (*test)(struct sdpc_buff *buff,
- void *arg),
- void *usr_arg)
-{
- return do_buff_q_get(pool, 1, test, usr_arg);
-}
-
-/*
* sdp_buff_q_fetch - Get the first matching buffer from the pool
*/
struct sdpc_buff *sdp_buff_q_fetch(struct sdpc_buff_q *pool,
@@ -243,9 +215,7 @@
counter < pool->size; counter++, buff = buff->next) {
result = test(buff, usr_arg);
if (result > 0) {
- result = do_buff_q_remove(pool, buff);
- SDP_EXPECT(result >= 0);
-
+ do_buff_q_remove(pool, buff);
return buff;
}
@@ -289,25 +259,25 @@
/*
* sdp_buff_q_put - Place a buffer into a specific pool
*/
-int sdp_buff_q_put(struct sdpc_buff_q *pool, struct sdpc_buff *buff)
+void sdp_buff_q_put(struct sdpc_buff_q *pool, struct sdpc_buff *buff)
{
- return do_buff_q_put(pool, buff, 1);
+ do_buff_q_put(pool, buff, 1);
}
/*
* sdp_buff_q_put_head - Place a buffer into the head of a specific pool
*/
-int sdp_buff_q_put_head(struct sdpc_buff_q *pool, struct sdpc_buff *buff)
+void sdp_buff_q_put_head(struct sdpc_buff_q *pool, struct sdpc_buff *buff)
{
- return do_buff_q_put(pool, buff, 1);
+ do_buff_q_put(pool, buff, 1);
}
/*
* sdp_buff_q_put_tail - Place a buffer into the tail of a specific pool
*/
-int sdp_buff_q_put_tail(struct sdpc_buff_q *pool, struct sdpc_buff *buff)
+void sdp_buff_q_put_tail(struct sdpc_buff_q *pool, struct sdpc_buff *buff)
{
- return do_buff_q_put(pool, buff, 0);
+ do_buff_q_put(pool, buff, 0);
}
/*
@@ -316,7 +286,6 @@
void sdp_buff_q_clear_unmap(struct sdpc_buff_q *pool, struct device *dev,
int direction)
{
- int result;
struct sdpc_buff *buff;
while ((buff = do_buff_q_get(pool, 0, NULL, NULL))) {
@@ -324,10 +293,7 @@
dma_unmap_single(dev, buff->sge.addr,
buff->tail - buff->data, direction);
- result = sdp_buff_pool_put(buff);
- if (result < 0)
- sdp_dbg_err("Error <%d> returning buffer to main",
- result);
+ sdp_buff_pool_put(buff);
}
}
@@ -389,8 +355,6 @@
{
struct sdpc_buff *buff;
int total;
- int result;
-
/*
* Calculate the total number of buffers.
*/
@@ -428,14 +392,7 @@
buff->type = SDP_DESC_TYPE_BUFF;
buff->release = sdp_buff_pool_put;
- result = sdp_buff_q_put(&m_pool->pool, buff);
- if (result < 0) {
- sdp_warn("Failed to queue buffer. <%d>", result);
-
- free_page((unsigned long)buff->head);
- kmem_cache_free(m_pool->buff_cache, buff);
- break;
- }
+ sdp_buff_q_put(&m_pool->pool, buff);
m_pool->buff_cur++;
}
@@ -630,15 +587,15 @@
/*
* sdp_buff_pool_put - Return a buffer to the main buffer pool
*/
-int sdp_buff_pool_put(struct sdpc_buff *buff)
+void sdp_buff_pool_put(struct sdpc_buff *buff)
{
unsigned long flags;
- if (!buff || buff->pool)
- return -EINVAL;
+ if (!buff)
+ return;
- if (buff->next || buff->prev)
- return -ETOOMANYREFS;
+ BUG_ON(buff->pool);
+ BUG_ON(buff->next || buff->prev);
/*
* reset pointers
*/
@@ -665,8 +622,6 @@
sdp_buff_pool_release_check(main_pool);
spin_unlock_irqrestore(&main_pool->lock, flags);
-
- return 0;
}
/*
@@ -693,7 +648,7 @@
/*
* sdp_buff_pool_chain_put - Return a buffer to the main buffer pool
*/
-int sdp_buff_pool_chain_put(struct sdpc_buff *buff, u32 count)
+void sdp_buff_pool_chain_put(struct sdpc_buff *buff, u32 count)
{
unsigned long flags;
struct sdpc_buff *next;
@@ -705,7 +660,7 @@
* returned. (e.g. send completions, recv to userspace.
*/
if (!buff || count <= 0)
- return -EINVAL;
+ return;
spin_lock_irqsave(&main_pool->lock, flags);
@@ -727,8 +682,6 @@
sdp_buff_pool_release_check(main_pool);
spin_unlock_irqrestore(&main_pool->lock, flags);
-
- return 0;
}
/*
Index: infiniband/ulp/sdp/sdp_queue.c
===================================================================
--- infiniband/ulp/sdp/sdp_queue.c (revision 2588)
+++ infiniband/ulp/sdp/sdp_queue.c (working copy)
@@ -76,14 +76,13 @@
/*
* sdp_desc_q_put - Place an element into a specific table
*/
-static inline int sdp_desc_q_put(struct sdpc_desc_q *table,
+static inline void sdp_desc_q_put(struct sdpc_desc_q *table,
struct sdpc_desc *element, int fifo)
{
/*
* fifo: false == tail, true == head
*/
- if (element->table)
- return -EINVAL;
+ BUG_ON(element->table);
if (!table->head) {
element->next = element;
@@ -104,8 +103,6 @@
table->count[element->type] +=
((SDP_DESC_TYPE_NONE > element->type) ? 1 : 0);
element->table = table;
-
- return 0;
}
/*
@@ -181,17 +178,17 @@
/*
* sdp_desc_q_put_head - Place an element into the head of a table
*/
-int sdp_desc_q_put_head(struct sdpc_desc_q *table, struct sdpc_desc *element)
+void sdp_desc_q_put_head(struct sdpc_desc_q *table, struct sdpc_desc *element)
{
- return sdp_desc_q_put(table, element, 1);
+ sdp_desc_q_put(table, element, 1);
}
/*
* sdp_desc_q_put_tail - Place an element into the tail of a table
*/
-int sdp_desc_q_put_tail(struct sdpc_desc_q *table, struct sdpc_desc *element)
+void sdp_desc_q_put_tail(struct sdpc_desc_q *table, struct sdpc_desc *element)
{
- return sdp_desc_q_put(table, element, 0);
+ sdp_desc_q_put(table, element, 0);
}
/*
@@ -264,14 +261,10 @@
void sdp_desc_q_clear(struct sdpc_desc_q *table)
{
struct sdpc_desc *element;
- int result;
-
/*
* drain the table of any objects
*/
while ((element = sdp_desc_q_get_head(table)))
- if (element->release) {
- result = element->release(element);
- SDP_EXPECT(result >= 0);
- }
+ if (element->release)
+ element->release(element);
}
Index: infiniband/ulp/sdp/sdp_buff.h
===================================================================
--- infiniband/ulp/sdp/sdp_buff.h (revision 2588)
+++ infiniband/ulp/sdp/sdp_buff.h (working copy)
@@ -49,7 +49,7 @@
struct sdpc_buff *prev;
u32 type; /* element type. (for generic queue) */
struct sdpc_buff_q *pool; /* pool currently holding this buffer. */
- int (*release)(struct sdpc_buff *buff); /* release the object */
+ void (*release)(struct sdpc_buff *buff); /* release the object */
/*
* primary generic data pointers
*/
More information about the general
mailing list