[openib-general] [PATCH] sdp conditional code cleanup
Michael S. Tsirkin
mst at mellanox.co.il
Sun Feb 27 02:44:41 PST 2005
Quoting r. Libor Michalek <libor at topspin.com>:
> Subject: Re: sdp: 0 > result
>
> On Thu, Feb 24, 2005 at 11:49:28PM +0200, Michael S. Tsirkin wrote:
> > OK, now what about things like these:
> >
> > if (0 > result) {
> >
> > may I change them all to
> >
> > if (result < 0) {
> >
> > While being equivalent, we are testing the result, not 0.
> >
> > Similiarly (although I feel somewhat less strongly about it)
> >
> > if (0 == result)
> > and
> > if (NULL == conn)
> >
> > are better off as
> >
> > if (!result) {
> > and
> > if (!conn)
> >
> > C is a Spartan language, and this is more brief.
> > Libor, I think I asked about the second one, but dont recall you
> > answering.
> > If OK to both, let me know and I'll do it on Sunday.
>
> I actually feel more strongly in favour of making the second change
> you propose then the first. However, I'm OK with both, so feel free
> to submit a patch.
>
> -Libor
>
Here is the patch.
I generalized the approach -
any if (CONSTANT == variable) and if ( CONSTANT & variable) is now
if (variable == CONSTANT) and if ( variable & CONSTANT)
and so forth.
Further, some places had weird flag testing code like this:
if ( (variable & CONSTANT) > 0 )
I changed them all to
if (variable & CONSTANT)
same for linux test_bit macro usage.
While I touched some tests, I also cleaned some of them up: there is no reason
for if (!(result > 0)) when it can be a shorter and more clear as
if (result <= 0),
and in a lot of cases ((!a) ? b : c) is better off as (a ? b : c)
Two things I noticed but did not fix:
A. In some places a positive error code is returned. For example
ENOBUF and not -ENOBUF. I assume its a bug but did not touch it.
B. SDP_EXPECT seems to be used with double (( )) in some places.
This is probably an artefact from when the macro itself did not
have () around the expression.
Looks like a lot of changes, I went over them several times and
everything looks in order to me. I really hope its applied before
any other change, it almost surely will conflict with any other
patch, and it'll be a lot of work to re-diff.
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
Index: sdp_queue.h
===================================================================
--- sdp_queue.h (revision 1922)
+++ sdp_queue.h (working copy)
@@ -83,7 +83,7 @@ static inline int __sdp_desc_q_size(stru
*/
static inline int __sdp_desc_q_member(struct sdpc_desc *element)
{
- return ((NULL == element->table) ? 0 : 1);
+ return (element->table ? 1 : 0);
}
#define sdp_desc_q_size(x) __sdp_desc_q_size(x)
Index: sdp_write.c
===================================================================
--- sdp_write.c (revision 1922)
+++ sdp_write.c (working copy)
@@ -88,23 +88,23 @@ int sdp_event_write(struct sdp_opt *conn
switch (type) {
case SDP_DESC_TYPE_BUFF:
buff = (struct sdpc_buff *) sdp_desc_q_get_head(&conn->w_snk);
- SDP_EXPECT((NULL != buff));
+ SDP_EXPECT((buff));
conn->send_qud -= buff->data_size;
result = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
break;
case SDP_DESC_TYPE_IOCB:
iocb = (struct sdpc_iocb *)sdp_desc_q_look_head(&conn->w_snk);
- if (NULL == iocb || iocb->wrid != comp->wr_id) {
+ if (!iocb || iocb->wrid != comp->wr_id) {
break;
}
iocb = (struct sdpc_iocb *)sdp_desc_q_get_head(&conn->w_snk);
- SDP_EXPECT((NULL != iocb));
+ SDP_EXPECT((iocb));
iocb->flags &= ~(SDP_IOCB_F_ACTIVE | SDP_IOCB_F_RDMA_W);
@@ -112,7 +112,7 @@ int sdp_event_write(struct sdp_opt *conn
SDP_CONN_STAT_WQ_DEC(conn, iocb->size);
result = sdp_iocb_complete(iocb, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
goto error;
@@ -122,7 +122,7 @@ int sdp_event_write(struct sdp_opt *conn
case SDP_DESC_TYPE_NONE:
iocb = (struct sdpc_iocb *)sdp_desc_q_look_type_head(&conn->send_queue,
SDP_DESC_TYPE_IOCB);
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn,
"No IOCB on write complete <%llu:%d:%d>",
(unsigned long long)comp->wr_id,
@@ -158,7 +158,7 @@ int sdp_event_write(struct sdp_opt *conn
* of some more sends.
*/
result = sdp_send_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing send queue.", result);
goto error;
}
@@ -167,7 +167,7 @@ int sdp_event_write(struct sdp_opt *conn
* reads.
*/
result = sdp_recv_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing recv queue.", result);
goto error;
}
Index: sdp_link.c
===================================================================
--- sdp_link.c (revision 1922)
+++ sdp_link.c (working copy)
@@ -47,7 +47,7 @@ static struct work_struct _link_ti
static u64 _path_lookup_id = 0;
#define _SDP_PATH_LOOKUP_ID() \
- ((0 == ++_path_lookup_id) ? ++_path_lookup_id : _path_lookup_id)
+ ((++_path_lookup_id) ? _path_lookup_id : ++_path_lookup_id)
#define GID_FMT "%x:%x:%x:%x:%x:%x:%x:%x"
#define GID_ARG(gid) be16_to_cpup((__be16 *) ((gid).raw + 0)), \
@@ -105,7 +105,7 @@ static void _sdp_path_wait_add(struct sd
info->wait_list = wait;
wait->pext = &info->wait_list;
- if (NULL != wait->next)
+ if (wait->next)
wait->next->pext = &wait->next;
}
@@ -117,8 +117,8 @@ static void _sdp_path_wait_destroy(struc
/*
* if it's in the list, pext will not be null
*/
- if (NULL != wait->pext) {
- if (NULL != wait->next)
+ if (wait->pext) {
+ if (wait->next)
wait->next->pext = wait->pext;
*(wait->pext) = wait->next;
@@ -153,7 +153,7 @@ static struct sdp_path_info *_sdp_path_i
{
struct sdp_path_info *info;
- for (info = _info_list; NULL != info; info = info->next)
+ for (info = _info_list; info; info = info->next)
if (dst_ip == info->dst && dev_if == info->dif)
break;
@@ -168,7 +168,7 @@ static struct sdp_path_info *_sdp_path_i
struct sdp_path_info *info;
info = kmem_cache_alloc(_info_cache, SLAB_KERNEL);
- if (NULL == info)
+ if (!info)
return NULL;
memset(info, 0, sizeof(struct sdp_path_info));
@@ -177,7 +177,7 @@ static struct sdp_path_info *_sdp_path_i
_info_list = info;
info->pext = &_info_list;
- if (NULL != info->next)
+ if (info->next)
info->next->pext = &info->next;
info->dst = dst_ip;
@@ -200,8 +200,8 @@ static void _sdp_path_info_destroy(struc
/*
* if it's in the list, pext will not be null
*/
- if (NULL != info->pext) {
- if (NULL != info->next)
+ if (info->pext) {
+ if (info->next)
info->next->pext = info->pext;
*(info->pext) = info->next;
@@ -210,7 +210,7 @@ static void _sdp_path_info_destroy(struc
info->next = NULL;
}
- while (NULL != (wait = info->wait_list))
+ while ((wait = info->wait_list))
_sdp_path_wait_complete(wait, info, status);
cancel_delayed_work(&info->timer);
@@ -244,13 +244,13 @@ static void _sdp_link_path_rec_done(int
}
sweep = info->wait_list;
- while (NULL != sweep) {
+ while (sweep) {
wait = sweep;
sweep = sweep->next;
/*
* on timeout increment retries.
*/
- if (-ETIMEDOUT == status)
+ if (status == -ETIMEDOUT)
wait->retry++;
if (!status || SDP_LINK_SA_RETRY < wait->retry)
@@ -259,7 +259,7 @@ static void _sdp_link_path_rec_done(int
/*
* retry if anyone is waiting.
*/
- if (NULL != info->wait_list) {
+ if (info->wait_list) {
info->sa_time = min(info->sa_time * 2, SDP_LINK_SA_TIME_MAX);
result = ib_sa_path_rec_get(info->ca,
@@ -275,7 +275,7 @@ static void _sdp_link_path_rec_done(int
info,
&info->query);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(NULL, "Error <%d> restarting path query",
result);
_sdp_path_info_destroy(info, result);
@@ -294,7 +294,7 @@ static int _sdp_link_path_rec_get(struct
GID_ARG(info->path.sgid),
GID_ARG(info->path.dgid));
- if (SDP_LINK_F_PATH & info->flags)
+ if (info->flags & SDP_LINK_F_PATH)
return 0;
result = ib_sa_path_rec_get(info->ca,
@@ -309,7 +309,7 @@ static int _sdp_link_path_rec_get(struct
_sdp_link_path_rec_done,
info,
&info->query);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(NULL, "Error <%d> starting path record query",
result);
info->query = NULL;
@@ -351,16 +351,16 @@ void _sdp_link_path_lookup(void *data)
/*
* path request in progress?
*/
- if (NULL != info->query)
+ if (info->query)
goto done;
/*
* route information present, but no path query, goto re-arp.
*/
- if (NULL != info->ca)
+ if (info->ca)
goto arp;
result = ip_route_output_key(&rt, &fl);
- if (0 > result || NULL == rt) {
+ if (result < 0 || !rt) {
sdp_dbg_warn(NULL, "Error <%d> routing <%08x:%08x> (%d)",
result, info->dst, info->src, info->dif);
goto error;
@@ -368,7 +368,7 @@ void _sdp_link_path_lookup(void *data)
/*
* check route flags
*/
- if (0 < ((RTCF_MULTICAST|RTCF_BROADCAST) & rt->rt_flags)) {
+ if ((RTCF_MULTICAST|RTCF_BROADCAST) & rt->rt_flags) {
ip_rt_put(rt);
result = -ENETUNREACH;
goto error;
@@ -376,7 +376,7 @@ void _sdp_link_path_lookup(void *data)
/*
* check that device is IPoIB
*/
- if (NULL == rt->u.dst.neighbour || NULL == rt->u.dst.neighbour->dev) {
+ if (!rt->u.dst.neighbour || !rt->u.dst.neighbour->dev) {
sdp_dbg_warn(NULL, "No neighbour found for <%08x:%08x>",
rt->rt_src, rt->rt_dst);
@@ -388,7 +388,7 @@ void _sdp_link_path_lookup(void *data)
* handling.
*/
if (ARPHRD_INFINIBAND != rt->u.dst.neighbour->dev->type &&
- 0 == (IFF_LOOPBACK & rt->u.dst.neighbour->dev->flags)) {
+ !(rt->u.dst.neighbour->dev->flags & IFF_LOOPBACK)) {
result = -ENETUNREACH;
goto error;
}
@@ -402,22 +402,22 @@ void _sdp_link_path_lookup(void *data)
* In case of loopback find a valid IB device on which to
* direct the loopback traffic.
*/
- info->dev = ((IFF_LOOPBACK & rt->u.dst.neighbour->dev->flags) ?
+ info->dev = ((rt->u.dst.neighbour->dev->flags & IFF_LOOPBACK) ?
ip_dev_find(rt->rt_src) : rt->u.dst.neighbour->dev);
info->gw = rt->rt_gateway;
info->src = rt->rt_src; /* true source IP address */
- if (IFF_LOOPBACK & info->dev->flags)
- while (NULL != (info->dev = dev_get_by_index(++counter))) {
+ if (info->dev->flags & IFF_LOOPBACK)
+ while ((info->dev = dev_get_by_index(++counter))) {
dev_put(info->dev);
if (ARPHRD_INFINIBAND == info->dev->type &&
- 0 < (IFF_UP & info->dev->flags))
+ (info->dev->flags & IFF_UP))
break;
}
- if (NULL == info->dev) {
+ if (!info->dev) {
sdp_dbg_warn(NULL, "No device for IB comm <%s:%08x:%08x>",
rt->u.dst.neighbour->dev->name,
rt->u.dst.neighbour->dev->flags,
@@ -442,7 +442,7 @@ void _sdp_link_path_lookup(void *data)
* If the routing device is loopback save the device address of
* the IB device which was found.
*/
- if (IFF_LOOPBACK & rt->u.dst.neighbour->dev->flags) {
+ if (rt->u.dst.neighbour->dev->flags & IFF_LOOPBACK) {
memcpy(&info->path.dgid,
(info->dev->dev_addr + 4),
sizeof(union ib_gid));
@@ -478,12 +478,12 @@ arp:
/*
* start arp timer if it's not already going.
*/
- if (SDP_LINK_F_ARP & info->flags) {
+ if (info->flags & SDP_LINK_F_ARP) {
struct sdp_path_wait *sweep;
struct sdp_path_wait *wait;
sweep = info->wait_list;
- while (NULL != sweep) {
+ while (sweep) {
wait = sweep;
sweep = sweep->next;
@@ -492,7 +492,7 @@ arp:
-ETIMEDOUT);
}
- if (NULL == info->wait_list) {
+ if (!info->wait_list) {
result = -ETIMEDOUT;
goto error;
}
@@ -547,9 +547,9 @@ int sdp_link_path_lookup(u32 dst_addr,
* lookup entry, create if not found and add to wait list.
*/
info = _sdp_path_info_lookup(dst_addr, bound_dev_if);
- if (NULL == info) {
+ if (!info) {
info = _sdp_path_info_create(dst_addr, bound_dev_if);
- if (NULL == info) {
+ if (!info) {
sdp_dbg_warn(NULL, "Failed to create path object");
return -ENOMEM;
}
@@ -560,7 +560,7 @@ int sdp_link_path_lookup(u32 dst_addr,
/*
* if not waiting for result, complete.
*/
- if (SDP_LINK_F_VALID & info->flags) {
+ if (info->flags & SDP_LINK_F_VALID) {
_sdp_link_path_complete(*id, 0, info, completion, arg);
return 0;
}
@@ -568,7 +568,7 @@ int sdp_link_path_lookup(u32 dst_addr,
* add request to list of lookups.
*/
wait = kmem_cache_alloc(_wait_cache, SLAB_KERNEL);
- if (NULL == wait) {
+ if (!wait) {
sdp_dbg_warn(NULL, "Failed to create path wait object");
result = -ENOMEM;
goto error;
@@ -603,7 +603,7 @@ static void _sdp_link_sweep(void *data)
struct sdp_path_info *sweep;
sweep = _info_list;
- while (NULL != sweep) {
+ while (sweep) {
info = sweep;
sweep = sweep->next;
@@ -649,16 +649,16 @@ static void _sdp_link_arp_work(void *dat
/*
* find a path info structure for the source IP address.
*/
- for (info = _info_list; NULL != info; info = info->next)
+ for (info = _info_list; info; info = info->next)
if (info->dst == arp->src_ip)
break;
- if (NULL == info)
+ if (!info)
goto done;
/*
* update record info, and request new path record data.
*/
- if (SDP_LINK_F_ARP & info->flags) {
+ if (info->flags & SDP_LINK_F_ARP) {
cancel_delayed_work(&info->timer);
info->flags &= ~SDP_LINK_F_ARP;
}
@@ -696,7 +696,7 @@ static int _sdp_link_arp_recv(struct sk_
* queue IB arp packet onto work queue.
*/
work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (NULL == work)
+ if (!work)
goto done;
work->arg = skb;
@@ -734,7 +734,7 @@ int sdp_link_addr_init(void)
_tsSdpAsyncEventFunc,
NULL,
&_tsSdpAsyncHandle);
- if (0 != result) {
+ if (result) {
goto error_async;
}
@@ -743,7 +743,7 @@ int sdp_link_addr_init(void)
sizeof(struct sdp_path_info),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (NULL == _info_cache) {
+ if (!_info_cache) {
sdp_warn("Failed to allocate path info cache.");
result = -ENOMEM;
@@ -754,7 +754,7 @@ int sdp_link_addr_init(void)
sizeof(struct sdp_path_wait),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (NULL == _wait_cache) {
+ if (!_wait_cache) {
sdp_warn("Failed to allocate path wait cache.");
result = -ENOMEM;
@@ -762,7 +762,7 @@ int sdp_link_addr_init(void)
}
_link_wq = create_workqueue("sdp_wq");
- if (NULL == _link_wq) {
+ if (!_link_wq) {
sdp_warn("Failed to allocate ARP wait queue.");
result = -ENOMEM;
@@ -798,7 +798,7 @@ int sdp_link_addr_cleanup(void)
/*
* clear objects
*/
- while (NULL != (info = _info_list))
+ while ((info = _info_list))
_sdp_path_info_destroy(info, -EINTR);
/*
* remove ARP packet processing.
Index: sdp_rcvd.c
===================================================================
--- sdp_rcvd.c (revision 1922)
+++ sdp_rcvd.c (working copy)
@@ -54,7 +54,7 @@ static int _sdp_rcvd_disconnect(struct s
* initiate disconnect to framework
*/
result = sdp_wall_recv_close(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> closing connection",
result);
goto error;
@@ -79,7 +79,7 @@ static int _sdp_rcvd_disconnect(struct s
* disconnect)
*/
result = sdp_wall_recv_closing(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> confirming conn close",
result);
goto error;
@@ -92,10 +92,10 @@ static int _sdp_rcvd_disconnect(struct s
* if the remote DREQ was already received, but unprocessed,
* do not treat it as an error
*/
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> CM disconnect", result);
- if (-EPROTO != result)
+ if (result != -EPROTO)
goto error;
}
@@ -131,7 +131,7 @@ static int _sdp_rcvd_abort(struct sdp_op
case SDP_CONN_ST_DIS_SEND_2:
case SDP_CONN_ST_DIS_SENT_2:
result = sdp_wall_abort(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> during abort", result);
break;
@@ -163,19 +163,19 @@ static int _sdp_rcvd_send_sm(struct sdp_
sdp_dbg_ctrl(conn, "SendSM msg. active <%d> count <%d> flags <%08x>",
conn->src_sent, conn->src_cncl, conn->flags);
- if (0 < (SDP_CONN_F_SRC_CANCEL_L & conn->flags) &&
+ if ((conn->flags & SDP_CONN_F_SRC_CANCEL_L) &&
SDP_WRAP_GTE(buff->bsdh_hdr->seq_ack, conn->src_cseq)) {
/*
* drain the active source queue
*/
- while (NULL != (iocb = sdp_iocb_q_get_tail(&conn->w_src))) {
- SDP_EXPECT((0 < (SDP_IOCB_F_ACTIVE & iocb->flags)));
- SDP_EXPECT((0 < (SDP_IOCB_F_CANCEL & iocb->flags)));
+ while ((iocb = sdp_iocb_q_get_tail(&conn->w_src))) {
+ SDP_EXPECT((iocb->flags & SDP_IOCB_F_ACTIVE));
+ SDP_EXPECT((iocb->flags & SDP_IOCB_F_CANCEL));
conn->src_sent--;
result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
/*
* Cancel complete, clear the state.
@@ -201,13 +201,13 @@ static int _sdp_rcvd_rdma_wr(struct sdp_
* lookup active IOCB read.
*/
iocb = sdp_iocb_q_look(&conn->r_snk);
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn, "Cannot find IOCB for Write Completion.");
result = -EPROTO;
goto error;
}
- SDP_EXPECT((0 < (SDP_IOCB_F_RDMA_W & iocb->flags)));
+ SDP_EXPECT((iocb->flags & SDP_IOCB_F_RDMA_W));
sdp_dbg_data(conn, "Write <%d> size <%d:%d:%Zu> mode <%d> active <%d>",
iocb->key, rwch->size, iocb->len, iocb->size,
@@ -225,7 +225,7 @@ static int _sdp_rcvd_rdma_wr(struct sdp_
* Iocb is done, deregister memory, and generate completion.
*/
iocb = sdp_iocb_q_get_head(&conn->r_snk);
- SDP_EXPECT((NULL != iocb));
+ SDP_EXPECT((iocb));
conn->snk_sent--;
@@ -237,7 +237,7 @@ static int _sdp_rcvd_rdma_wr(struct sdp_
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
result = sdp_iocb_complete(iocb, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
goto error;
@@ -262,7 +262,7 @@ static int _sdp_rcvd_rdma_rd(struct sdp_
* lookup IOCB read.
*/
iocb = sdp_iocb_q_look(&conn->w_src);
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn, "Cannot find IOCB for Read Completion.");
result = -EPROTO;
goto error;
@@ -289,19 +289,19 @@ static int _sdp_rcvd_rdma_rd(struct sdp_
* it on the first successful RDMA to make sure we don't get a
* false positive of data sent. (specification ambiguity/pain)
*/
- iocb->post += (0 == iocb->post) ? (iocb->size - iocb->len) : 0;
+ iocb->post += iocb->post ? 0 : (iocb->size - iocb->len);
iocb->len -= rrch->size;
iocb->post += rrch->size;
conn->send_pipe -= rrch->size;
- conn->oob_offset -= (0 < conn->oob_offset) ? rrch->size : 0;
+ conn->oob_offset -= (conn->oob_offset > 0) ? rrch->size : 0;
/*
* If iocb is done, deregister memory, and generate completion.
*/
- if (!(0 < iocb->len)) {
+ if (iocb->len <= 0) {
iocb = sdp_iocb_q_get_head(&conn->w_src);
- SDP_EXPECT((NULL != iocb));
+ SDP_EXPECT((iocb));
conn->src_sent--;
@@ -309,7 +309,7 @@ static int _sdp_rcvd_rdma_rd(struct sdp_
SDP_CONN_STAT_WQ_DEC(conn, iocb->size);
result = sdp_iocb_complete(iocb, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
goto error;
@@ -319,8 +319,8 @@ static int _sdp_rcvd_rdma_rd(struct sdp_
* If Source Cancel was in process, and there are no more outstanding
* advertisments, then it should now be cleared.
*/
- if (0 < (SDP_CONN_F_SRC_CANCEL_L & conn->flags) &&
- 0 == sdp_iocb_q_size(&conn->w_src)) {
+ if ((conn->flags & SDP_CONN_F_SRC_CANCEL_L) &&
+ !sdp_iocb_q_size(&conn->w_src)) {
conn->src_cncl = 0;
conn->flags &= ~(SDP_CONN_F_SRC_CANCEL_L);
}
@@ -347,7 +347,7 @@ static int _sdp_rcvd_mode_change(struct
* Check if the mode change is to the same mode.
*/
if (((SDP_MSG_MCH_GET_MODE(mch) & 0x7) ==
- ((0 < (SDP_MSG_MCH_GET_MODE(mch) & 0x8)) ?
+ ((SDP_MSG_MCH_GET_MODE(mch) & 0x8) ?
conn->send_mode : conn->recv_mode))) {
sdp_dbg_warn(conn, "Mode transition <%d> is a nop. <%d:%d>",
SDP_MSG_MCH_GET_MODE(mch), conn->recv_mode,
@@ -360,12 +360,12 @@ static int _sdp_rcvd_mode_change(struct
*/
switch (SDP_MSG_MCH_GET_MODE(mch)) {
case SDP_MSG_MCH_BUFF_RECV: /* source to sink */
- if (SDP_MODE_COMB != conn->recv_mode) {
+ if (conn->recv_mode != SDP_MODE_COMB) {
result = -EPROTO;
goto mode_error;
}
- if (0 < conn->src_recv) {
+ if (conn->src_recv > 0) {
sdp_dbg_warn(conn, "mode error <%d> src pending <%d>",
SDP_MSG_MCH_GET_MODE(mch),
conn->src_recv);
@@ -375,21 +375,21 @@ static int _sdp_rcvd_mode_change(struct
break;
case SDP_MSG_MCH_COMB_SEND: /* sink to source */
- if (SDP_MODE_BUFF != conn->send_mode) {
+ if (conn->send_mode != SDP_MODE_BUFF) {
result = -EPROTO;
goto mode_error;
}
break;
case SDP_MSG_MCH_PIPE_RECV: /* source to sink */
- if (SDP_MODE_COMB != conn->recv_mode) {
+ if (conn->recv_mode != SDP_MODE_COMB) {
result = -EPROTO;
goto mode_error;
}
break;
case SDP_MSG_MCH_COMB_RECV: /* source to sink */
- if (SDP_MODE_PIPE != conn->recv_mode) {
+ if (conn->recv_mode != SDP_MODE_PIPE) {
result = -EPROTO;
goto mode_error;
}
@@ -411,7 +411,7 @@ static int _sdp_rcvd_mode_change(struct
/*
* assign new mode
*/
- if (0 < (SDP_MSG_MCH_GET_MODE(mch) & 0x8))
+ if (SDP_MSG_MCH_GET_MODE(mch) & 0x8)
conn->send_mode = SDP_MSG_MCH_GET_MODE(mch) & 0x7;
else
conn->recv_mode = SDP_MSG_MCH_GET_MODE(mch) & 0x7;
@@ -438,7 +438,7 @@ static int _sdp_rcvd_src_cancel(struct s
* If there are no outstanding advertisments, then there is nothing
* to do.
*/
- if (!(0 < conn->src_recv)) {
+ if (conn->src_recv <= 0) {
sdp_dbg_warn(conn, "No SrcAvail advertisments to cancel.");
result = 0;
goto done;
@@ -448,12 +448,12 @@ static int _sdp_rcvd_src_cancel(struct s
* if it's already processed data.
*/
advt = sdp_advt_q_look(&conn->src_pend);
- if (NULL != advt && 0 < advt->post) {
+ if (advt && advt->post > 0) {
/*
* If active, move to the active queue. Otherwise generate an
* immediate completion
*/
- if (0 < (SDP_ADVT_F_READ & advt->flag)) {
+ if (advt->flag & SDP_ADVT_F_READ) {
sdp_advt_q_put(&conn->src_actv,
sdp_advt_q_get(&conn->src_pend));
@@ -463,7 +463,7 @@ static int _sdp_rcvd_src_cancel(struct s
conn->flags |= SDP_CONN_F_SRC_CANCEL_C;
} else {
result = sdp_send_ctrl_rdma_rd(conn, advt->post);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> read completion",
result);
@@ -474,19 +474,19 @@ static int _sdp_rcvd_src_cancel(struct s
/*
* drop the pending advertisment queue.
*/
- while (NULL != (advt = sdp_advt_q_get(&conn->src_pend))) {
+ while ((advt = sdp_advt_q_get(&conn->src_pend))) {
conn->flags |= SDP_CONN_F_SRC_CANCEL_C;
conn->src_recv--;
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
/*
* If there are active reads, mark the connection as being in
* source cancel. Otherwise
*/
- if (0 < sdp_advt_q_size(&conn->src_actv)) {
+ if (sdp_advt_q_size(&conn->src_actv) > 0) {
/*
* Set flag. Adjust sequence number ack. (spec dosn't want the
* seq ack in subsequent messages updated until the cancel has
@@ -499,9 +499,9 @@ static int _sdp_rcvd_src_cancel(struct s
/*
* If a source was dropped, generate an ack.
*/
- if (0 < (SDP_CONN_F_SRC_CANCEL_C & conn->flags)) {
+ if (conn->flags & SDP_CONN_F_SRC_CANCEL_C) {
result = sdp_send_ctrl_send_sm(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error<%d> posting SendSm",
result);
goto done;
@@ -528,7 +528,7 @@ static int _sdp_rcvd_snk_cancel(struct s
* If there are no outstanding advertisments, they we've completed
* since the message was sent, and there is nothing to do.
*/
- if (!(0 < conn->snk_recv)) {
+ if (conn->snk_recv <= 0) {
sdp_dbg_warn(conn, "No SnkAvail advertisments to cancel.");
result = 0;
goto done;
@@ -540,12 +540,12 @@ static int _sdp_rcvd_snk_cancel(struct s
* it's possible to handle the entire Cancel here.
*/
advt = sdp_advt_q_look(&conn->snk_pend);
- if (NULL != advt && 0 < advt->post) {
+ if (advt && advt->post > 0) {
/*
* Generate completion
*/
result = sdp_send_ctrl_rdma_wr(conn, advt->post);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> write completion",
result);
goto done;
@@ -562,20 +562,20 @@ static int _sdp_rcvd_snk_cancel(struct s
/*
* drain the advertisments which have yet to be processed.
*/
- while (NULL != (advt = sdp_advt_q_get(&conn->snk_pend))) {
+ while ((advt = sdp_advt_q_get(&conn->snk_pend))) {
counter++;
conn->snk_recv--;
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
/*
* A cancel ack is sent only if we cancelled an advertisment without
* sending a completion
*/
- if (0 < counter) {
+ if (counter > 0) {
result = sdp_send_ctrl_snk_cancel_ack(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> SnkCacelAck response",
result);
goto done;
@@ -599,7 +599,7 @@ static int _sdp_rcvd_snk_cancel_ack(stru
sdp_dbg_ctrl(conn, "Sink Cancel Ack. actv <%d> mode <%d> flag <%08x>",
conn->snk_sent, conn->recv_mode, conn->flags);
- if (0 == (SDP_CONN_F_SNK_CANCEL & conn->flags)) {
+ if (!(conn->flags & SDP_CONN_F_SNK_CANCEL)) {
sdp_dbg_warn(conn, "Connection not in sink cancel mode <%08x>",
conn->flags);
result = -EPROTO;
@@ -608,11 +608,11 @@ static int _sdp_rcvd_snk_cancel_ack(stru
/*
* drain and complete all active IOCBs
*/
- while (NULL != (iocb = sdp_iocb_q_get_head(&conn->r_snk))) {
+ while ((iocb = sdp_iocb_q_get_head(&conn->r_snk))) {
conn->snk_sent--;
result = sdp_iocb_complete(iocb, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
goto done;
@@ -648,7 +648,7 @@ static int _sdp_rcvd_resize_buff_ack(str
crbh->size, conn->recv_size);
result = sdp_send_ctrl_resize_buff_ack(conn, conn->recv_size);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> acking size change request",
result);
goto error;
@@ -695,7 +695,7 @@ static int _sdp_rcvd_snk_avail(struct sd
/*
* check our send mode, and make sure parameters are within reason.
*/
- if (SDP_MODE_PIPE != conn->send_mode) {
+ if (conn->send_mode != SDP_MODE_PIPE) {
sdp_dbg_warn(conn, "SinkAvail, incorrect source mode <%d>",
conn->send_mode);
result = -EPROTO;
@@ -727,9 +727,9 @@ static int _sdp_rcvd_snk_avail(struct sd
* If there are outstanding SrcAvail messages, they are now
* invalid and the queue needs to be fixed up.
*/
- if (0 < conn->src_sent) {
- while (NULL != (iocb = sdp_iocb_q_get_tail(&conn->w_src))) {
- SDP_EXPECT((0 < (SDP_IOCB_F_ACTIVE & iocb->flags)));
+ if (conn->src_sent > 0) {
+ while ((iocb = sdp_iocb_q_get_tail(&conn->w_src))) {
+ SDP_EXPECT((iocb->flags & SDP_IOCB_F_ACTIVE));
iocb->flags &= ~SDP_IOCB_F_ACTIVE;
conn->src_sent--;
@@ -739,21 +739,21 @@ static int _sdp_rcvd_snk_avail(struct sd
* in cancel processing they need to be
* completed.
*/
- if (0 == (SDP_IOCB_F_CANCEL & iocb->flags)) {
+ if (!(iocb->flags & SDP_IOCB_F_CANCEL)) {
result = sdp_desc_q_put_head(&conn->send_queue,
(struct sdpc_desc *)
iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
} else {
result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
}
/*
* If Source Cancel was in process, it should now
* be cleared.
*/
- if (0 < (SDP_CONN_F_SRC_CANCEL_L & conn->flags)) {
+ if (conn->flags & SDP_CONN_F_SRC_CANCEL_L) {
conn->src_cncl = 0;
conn->flags &= ~(SDP_CONN_F_SRC_CANCEL_L);
}
@@ -762,7 +762,7 @@ static int _sdp_rcvd_snk_avail(struct sd
* create and queue new advertisment
*/
advt = sdp_advt_create();
- if (NULL == advt) {
+ if (!advt) {
sdp_dbg_warn(conn, "SrcAvail cannot be copied.");
result = -ENOMEM;
goto error;
@@ -786,9 +786,9 @@ consume:
/*
* consume any data in the advertisment for the other direction.
*/
- if (0 < (buff->tail - buff->data)) {
+ if ((buff->tail - buff->data) > 0) {
result = sdp_recv_buff(conn, buff);
- if (0 < result)
+ if (result > 0)
/*
* count number of bytes buffered by the connection,
* zero byte buffers or errors can be returned, the
@@ -796,7 +796,7 @@ consume:
*/
conn->byte_strm += result;
else
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> buffer recv",
result);
} else
@@ -828,7 +828,7 @@ static int _sdp_rcvd_src_avail(struct sd
srcah->size, srcah->r_key, size,
(unsigned long long) srcah->addr, conn->recv_mode);
- if (0 < conn->snk_sent) {
+ if (conn->snk_sent > 0) {
/*
* crossed SrcAvail and SnkAvail, the source message is
* discarded.
@@ -839,7 +839,7 @@ static int _sdp_rcvd_src_avail(struct sd
goto done;
}
- if (0 < (SDP_CONN_F_SRC_CANCEL_R & conn->flags)) {
+ if (conn->flags & SDP_CONN_F_SRC_CANCEL_R) {
sdp_dbg_warn(conn, "SrcAvail during SrcAvailCancel. <%d>",
conn->src_recv);
result = -EFAULT;
@@ -850,14 +850,14 @@ static int _sdp_rcvd_src_avail(struct sd
* if data is received and the receive half of the connection has been
* closed. This notifies the peer that the data was not received.
*/
- if (0 < (RCV_SHUTDOWN & conn->shutdown)) {
+ if (RCV_SHUTDOWN & conn->shutdown) {
sdp_dbg_warn(conn, "SrcAvail, receive path closed <%02x>",
conn->shutdown);
/*
* abort connection (send reset)
*/
result = sdp_wall_abort(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* drop packet
*/
@@ -868,7 +868,7 @@ static int _sdp_rcvd_src_avail(struct sd
* save the advertisment
*/
advt = sdp_advt_create();
- if (NULL == advt) {
+ if (!advt) {
sdp_dbg_warn(conn, "SrcAvail cannot be copied.");
result = -ENOMEM;
goto done;
@@ -886,8 +886,8 @@ static int _sdp_rcvd_src_avail(struct sd
break;
case SDP_MODE_COMB:
- if (0 < conn->src_recv ||
- !(0 < size) ||
+ if (conn->src_recv > 0 ||
+ size <= 0 ||
!(srcah->size > size)) {
sdp_dbg_warn(conn,
"SrcAvail mode <%d> mismatch. <%d:%d:%d>",
@@ -910,7 +910,7 @@ static int _sdp_rcvd_src_avail(struct sd
break;
case SDP_MODE_PIPE:
if (SDP_MSG_MAX_ADVS == (conn->src_recv + conn->snk_recv) ||
- 0 != size) {
+ size) {
sdp_dbg_warn(conn,
"SrcAvail mode <%d> mismatch. <%d:%d>",
conn->recv_mode,
@@ -941,7 +941,7 @@ static int _sdp_rcvd_src_avail(struct sd
/*
* process any ULP data in the message
*/
- if (0 == size) {
+ if (!size) {
result = 0;
goto done;
}
@@ -952,7 +952,7 @@ static int _sdp_rcvd_src_avail(struct sd
if (!(SDP_SRC_AVAIL_RECV > size)) {
result = sdp_recv_buff(conn, buff);
- if (0 < result)
+ if (result > 0)
/*
* count number of bytes buffered by the
* connection, zero byte buffers or errors
@@ -961,7 +961,7 @@ static int _sdp_rcvd_src_avail(struct sd
*/
conn->byte_strm += result;
else
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> buffer recv",
result);
} else
@@ -990,7 +990,7 @@ static int _sdp_rcvd_data(struct sdp_opt
* If we are processing a SrcAvail, there should be no
* buffered data
*/
- if (0 < conn->src_recv) {
+ if (conn->src_recv > 0) {
sdp_dbg_warn(conn, "Error, recv'd data with SrcAvail active.");
return -EPROTO;
}
@@ -1010,7 +1010,7 @@ static int _sdp_rcvd_data(struct sdp_opt
conn->nond_recv++;
ret_val = sdp_recv_buff(conn, buff);
- if (0 > ret_val)
+ if (ret_val < 0)
sdp_dbg_warn(conn, "Error <%d> processing buff recv", ret_val);
/*
* result contains the number of bytes in the buffer which
@@ -1122,7 +1122,7 @@ int sdp_event_recv(struct sdp_opt *conn,
* get data
*/
buff = sdp_buff_q_get_head(&conn->recv_post);
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "receive event, but no posted receive?!");
result = -EINVAL;
goto done;
@@ -1167,7 +1167,7 @@ int sdp_event_recv(struct sdp_opt *conn,
* SrcAvailCancel message has been processed.
*/
conn->recv_seq = buff->bsdh_hdr->seq_num;
- conn->advt_seq = (((SDP_CONN_F_SRC_CANCEL_R & conn->flags) > 0) ?
+ conn->advt_seq = ((conn->flags & SDP_CONN_F_SRC_CANCEL_R) ?
conn->advt_seq : conn->recv_seq);
/*
* buffers advertised minus the difference in buffer count between
@@ -1194,8 +1194,8 @@ int sdp_event_recv(struct sdp_opt *conn,
else {
offset = buff->bsdh_hdr->mid & 0x1F;
- if (!(offset < SDP_MSG_EVENT_TABLE_SIZE) ||
- NULL == recv_event_funcs[offset]) {
+ if (offset >= SDP_MSG_EVENT_TABLE_SIZE ||
+ !recv_event_funcs[offset]) {
sdp_dbg_warn(conn, "receive event, unknown MID <%d>",
buff->bsdh_hdr->mid);
result = -EINVAL;
@@ -1210,22 +1210,22 @@ int sdp_event_recv(struct sdp_opt *conn,
/*
* process result.
*/
- if (0 == result) {
+ if (!result) {
result = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* If this buffer was consumed, then make sure sufficient
* recv buffers are posted. Also we might be able to move
* data with a new RDMA SrcAvail advertisment.
*/
result = sdp_recv_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing recv queue",
result);
goto done;
}
} else
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"receive event, dispatch error. <%d>",
result);
@@ -1242,7 +1242,7 @@ int sdp_event_recv(struct sdp_opt *conn,
* recv window and we can flush buffered send data
*/
result = sdp_send_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing send queue",
result);
goto done;
Index: sdp_inet.c
===================================================================
--- sdp_inet.c (revision 1922)
+++ sdp_inet.c (working copy)
@@ -103,17 +103,17 @@ void sdp_inet_wake_send(struct sock *sk)
{
struct sdp_opt *conn;
- if (NULL == sk || NULL == (conn = SDP_GET_CONN(sk)))
+ if (!sk || !(conn = SDP_GET_CONN(sk)))
return;
- if (NULL != sk->sk_socket &&
- 0 < test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
- 0 < __sdp_inet_writable(conn)) {
+ if (sk->sk_socket &&
+ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
+ __sdp_inet_writable(conn)) {
read_lock(&sk->sk_callback_lock);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- if (NULL != sk->sk_sleep &&
- 0 < waitqueue_active(sk->sk_sleep))
+ if (sk->sk_sleep &&
+ waitqueue_active(sk->sk_sleep))
wake_up_interruptible(sk->sk_sleep);
/*
* test, clear, and notify. SOCK_ASYNC_NOSPACE
@@ -128,10 +128,10 @@ void sdp_inet_wake_send(struct sock *sk)
*/
void sdp_inet_wake_generic(struct sock *sk)
{
- if (NULL != sk) {
+ if (sk) {
read_lock(&sk->sk_callback_lock);
- if (NULL != sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
wake_up_interruptible_all(sk->sk_sleep);
read_unlock(&sk->sk_callback_lock);
@@ -143,9 +143,9 @@ void sdp_inet_wake_generic(struct sock *
*/
void sdp_inet_wake_recv(struct sock *sk, int len)
{
- if (NULL != sk) {
+ if (sk) {
read_lock(&sk->sk_callback_lock);
- if (NULL != sk->sk_sleep)
+ if (sk->sk_sleep)
wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk, 1, POLL_IN);
@@ -158,9 +158,9 @@ void sdp_inet_wake_recv(struct sock *sk,
*/
void sdp_inet_wake_error(struct sock *sk)
{
- if (NULL != sk) {
+ if (sk) {
read_lock(&sk->sk_callback_lock);
- if (NULL != sk->sk_sleep)
+ if (sk->sk_sleep)
wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk, 0, POLL_ERR);
@@ -177,7 +177,7 @@ void sdp_inet_wake_urg(struct sock *sk)
* pid for SIGURG/SIGIO has been set. On positive send signal to
* process, on negative send signal to processes group.
*/
- if (NULL != sk)
+ if (sk)
sk_send_sigurg(sk);
}
@@ -205,7 +205,7 @@ static int _sdp_inet_abort(struct sdp_op
case SDP_SOCK_ST_DISCONNECT:
case SDP_SOCK_ST_CLOSING:
result = sdp_wall_abort(conn);
- if (0 > result) {
+ if (result < 0) {
result = -ECONNABORTED;
SDP_CONN_SET_ERR(conn, ECONNABORTED);
@@ -242,7 +242,7 @@ static int _sdp_inet_disconnect(struct s
switch (conn->istate) {
case SDP_SOCK_ST_CONNECT:
result = sdp_wall_abort(conn);
- if (0 > result) {
+ if (result < 0) {
result = -ECONNABORTED;
SDP_CONN_SET_ERR(conn, ECONNABORTED);
conn->istate = SDP_SOCK_ST_ERROR;
@@ -252,7 +252,7 @@ static int _sdp_inet_disconnect(struct s
case SDP_SOCK_ST_ACCEPTED:
conn->istate = SDP_SOCK_ST_DISCONNECT;
result = sdp_wall_send_close(conn);
- if (0 > result) {
+ if (result < 0) {
result = -ECONNABORTED;
SDP_CONN_SET_ERR(conn, ECONNABORTED);
conn->istate = SDP_SOCK_ST_ERROR;
@@ -261,7 +261,7 @@ static int _sdp_inet_disconnect(struct s
case SDP_SOCK_ST_CLOSE:
conn->istate = SDP_SOCK_ST_CLOSING;
result = sdp_wall_send_closing(conn);
- if (0 > result) {
+ if (result < 0) {
result = -ECONNABORTED;
SDP_CONN_SET_ERR(conn, ECONNABORTED);
conn->istate = SDP_SOCK_ST_ERROR;
@@ -306,7 +306,7 @@ static int _sdp_inet_release(struct sock
long timeout;
u32 flags;
- if (NULL == sock->sk) {
+ if (!sock->sk) {
sdp_dbg_warn(NULL, "release empty <%d:%d> flags <%08lx>",
sock->type, sock->state, sock->flags);
return 0;
@@ -326,12 +326,12 @@ static int _sdp_inet_release(struct sock
sdp_conn_lock(conn);
conn->shutdown = SHUTDOWN_MASK;
- if (SDP_SOCK_ST_LISTEN == conn->istate) {
+ if (conn->istate == SDP_SOCK_ST_LISTEN) {
/*
* stop listening
*/
result = sdp_inet_listen_stop(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> while releasing listen",
result);
@@ -341,7 +341,7 @@ static int _sdp_inet_release(struct sock
* get blocking nature of the socket.
*/
if (sock->file)
- flags = (0 < (sock->file->f_flags & O_NONBLOCK)) ? \
+ flags = (sock->file->f_flags & O_NONBLOCK) ? \
MSG_DONTWAIT : 0;
else
flags = 0;
@@ -350,15 +350,15 @@ static int _sdp_inet_release(struct sock
* and consider this an abort. Otherwise consider
* this a gracefull close.
*/
- if (0 < sdp_buff_q_size(&conn->recv_pool) ||
- 0 < conn->src_recv ||
- (0 < sock_flag(sk, SOCK_LINGER) &&
- 0 == sk->sk_lingertime)) {
+ if (sdp_buff_q_size(&conn->recv_pool) > 0 ||
+ conn->src_recv > 0 ||
+ (sock_flag(sk, SOCK_LINGER) &&
+ !sk->sk_lingertime)) {
/*
* abort.
*/
result = _sdp_inet_abort(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> while aborting socket",
result);
@@ -368,7 +368,7 @@ static int _sdp_inet_release(struct sock
* disconnect. (state dependant)
*/
result = _sdp_inet_disconnect(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> while disconnecting socket",
result);
goto done;
@@ -377,8 +377,8 @@ static int _sdp_inet_release(struct sock
* Skip lingering/canceling if
* non-blocking and not exiting.
*/
- if (0 == (MSG_DONTWAIT & flags) ||
- 0 < (PF_EXITING & current->flags)) {
+ if (!(MSG_DONTWAIT & flags) ||
+ (PF_EXITING & current->flags)) {
/*
* Wait if linger is set and
* process is not exiting.
@@ -391,8 +391,8 @@ static int _sdp_inet_release(struct sock
add_wait_queue(sk->sk_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- while (0 < timeout &&
- 0 == (SDP_ST_MASK_CLOSED & conn->istate)) {
+ while (timeout > 0 &&
+ !(SDP_ST_MASK_CLOSED & conn->istate)) {
sdp_conn_unlock(conn);
timeout = schedule_timeout(timeout);
sdp_conn_lock(conn);
@@ -410,18 +410,18 @@ static int _sdp_inet_release(struct sock
* Cancel write and close again to force closing the
* connection.
*/
- if (0 < (SDP_ST_MASK_DRAIN & conn->istate)) {
+ if (SDP_ST_MASK_DRAIN & conn->istate) {
sdp_iocb_q_cancel_all_write(conn, -ECANCELED);
result = _sdp_inet_disconnect(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
#endif
}
done:
- if (0 < (SDP_ST_MASK_CLOSED & conn->istate))
+ if (SDP_ST_MASK_CLOSED & conn->istate)
do {} while(0); /* pass */
/*
@@ -479,15 +479,15 @@ static int _sdp_inet_bind(struct socket
* check bind permission for low ports.
*/
bind_port = ntohs(addr->sin_port);
- if (0 < bind_port &&
- bind_port < PROT_SOCK && 0 == capable(CAP_NET_BIND_SERVICE))
+ if (bind_port > 0 &&
+ bind_port < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
/*
* socket checks.
*/
sdp_conn_lock(conn);
- if (SDP_SOCK_ST_CLOSED != conn->istate || 0 < conn->src_port) {
+ if (conn->istate != SDP_SOCK_ST_CLOSED || conn->src_port > 0) {
result = -EINVAL;
goto done;
}
@@ -498,7 +498,7 @@ static int _sdp_inet_bind(struct socket
conn->src_addr = 0;
result = sdp_inet_port_get(conn, bind_port);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> getting port during bind",
result);
@@ -510,7 +510,7 @@ static int _sdp_inet_bind(struct socket
if (INADDR_ANY != conn->src_addr)
sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
- if (0 < bind_port)
+ if (bind_port > 0)
sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
inet_sk(sk)->rcv_saddr = htonl(conn->src_addr);
@@ -566,14 +566,14 @@ static int _sdp_inet_connect(struct sock
switch (sock->state) {
case SS_UNCONNECTED:
- if (0 == (SDP_ST_MASK_CLOSED & conn->istate)) {
+ if (!(SDP_ST_MASK_CLOSED & conn->istate)) {
result = -EISCONN;
goto done;
}
- if (0 == conn->src_port) {
+ if (!conn->src_port) {
result = sdp_inet_port_get(conn, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> getting port",
result);
goto done;
@@ -598,7 +598,7 @@ static int _sdp_inet_connect(struct sock
* post the SDP hello message
*/
result = sdp_cm_connect(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> initiating connect",
result);
@@ -630,13 +630,13 @@ static int _sdp_inet_connect(struct sock
* wait for connection to complete.
*/
timeout = sock_sndtimeo(sk, (O_NONBLOCK & flags));
- if (0 < timeout) {
+ if (timeout > 0) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk->sk_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- while (0 < timeout && SDP_SOCK_ST_CONNECT == conn->istate) {
+ while (timeout > 0 && conn->istate == SDP_SOCK_ST_CONNECT) {
sdp_conn_unlock(conn);
timeout = schedule_timeout(timeout);
@@ -651,9 +651,9 @@ static int _sdp_inet_connect(struct sock
set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sk_sleep, &wait);
- if (SDP_SOCK_ST_CONNECT == conn->istate) {
+ if (conn->istate == SDP_SOCK_ST_CONNECT) {
- if (0 < timeout) {
+ if (timeout > 0) {
result = sock_intr_errno(timeout);
}
@@ -711,23 +711,23 @@ static int _sdp_inet_listen(struct socke
sdp_conn_lock(conn);
if (SS_UNCONNECTED != sock->state ||
- (SDP_SOCK_ST_CLOSED != conn->istate &&
- SDP_SOCK_ST_LISTEN != conn->istate)) {
+ (conn->istate != SDP_SOCK_ST_CLOSED &&
+ conn->istate != SDP_SOCK_ST_LISTEN)) {
result = -EINVAL;
goto done;
}
- if (SDP_SOCK_ST_LISTEN != conn->istate) {
+ if (conn->istate != SDP_SOCK_ST_LISTEN) {
result = sdp_inet_listen_start(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> starting listen",
result);
goto done;
}
- if (0 == conn->src_port) {
+ if (!conn->src_port) {
result = sdp_inet_port_get(conn, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> getting port",
result);
goto done;
@@ -772,7 +772,7 @@ static int _sdp_inet_accept(struct socke
sdp_conn_lock(listen_conn);
- if (SDP_SOCK_ST_LISTEN != listen_conn->istate) {
+ if (listen_conn->istate != SDP_SOCK_ST_LISTEN) {
result = -EINVAL;
goto listen_done;
}
@@ -782,19 +782,19 @@ static int _sdp_inet_accept(struct socke
* if there is no socket on the queue, wait for one. It' done in a
* loop in case there is a problem with the first socket we hit.
*/
- while (NULL == accept_conn) {
+ while (!accept_conn) {
/*
* No pending socket wait.
*/
accept_conn = sdp_inet_accept_q_get(listen_conn);
- if (NULL == accept_conn) {
+ if (!accept_conn) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(listen_sk->sk_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- while (0 < timeout &&
- SDP_SOCK_ST_LISTEN == listen_conn->istate &&
- 0 == listen_conn->backlog_cnt) {
+ while (timeout > 0 &&
+ listen_conn->istate == SDP_SOCK_ST_LISTEN &&
+ !listen_conn->backlog_cnt) {
sdp_conn_unlock(listen_conn);
timeout = schedule_timeout(timeout);
sdp_conn_lock(listen_conn);
@@ -808,14 +808,14 @@ static int _sdp_inet_accept(struct socke
/*
* process result
*/
- if (0 == listen_conn->backlog_cnt) {
+ if (!listen_conn->backlog_cnt) {
result = 0;
- if (SDP_SOCK_ST_LISTEN != listen_conn->istate)
+ if (listen_conn->istate != SDP_SOCK_ST_LISTEN)
result = -EINVAL;
if (signal_pending(current))
result = sock_intr_errno(timeout);
- if (0 == timeout)
+ if (!timeout)
result = -EAGAIN;
goto listen_done;
@@ -880,7 +880,7 @@ static int _sdp_inet_accept(struct socke
break;
}
- if (NULL != accept_conn)
+ if (accept_conn)
/*
* Connections returned from the AcceptQueue
* are holding their lock, before returning
@@ -898,12 +898,11 @@ listen_done:
sdp_dbg_ctrl(listen_conn,
"ACCEPT: complete <%d> <%08x:%04x><%08x:%04x>",
- (NULL == accept_conn ?
- SDP_DEV_SK_INVALID : accept_conn->hashent),
- (NULL == accept_sk ? 0 : accept_conn->src_addr),
- (NULL == accept_sk ? 0 : accept_conn->src_port),
- (NULL == accept_sk ? 0 : accept_conn->dst_addr),
- (NULL == accept_sk ? 0 : accept_conn->dst_port));
+ (accept_conn ? accept_conn->hashent : SDP_DEV_SK_INVALID),
+ (accept_sk ? accept_conn->src_addr : 0),
+ (accept_sk ? accept_conn->src_port : 0),
+ (accept_sk ? accept_conn->dst_addr : 0),
+ (accept_sk ? accept_conn->dst_port : 0));
return result;
}
@@ -926,9 +925,9 @@ static int _sdp_inet_getname(struct sock
conn->dst_addr, conn->dst_port);
addr->sin_family = _proto_family;
- if (0 < peer)
- if (0 < htons(conn->dst_port) &&
- 0 == (SDP_ST_MASK_CLOSED & conn->istate)) {
+ if (peer > 0)
+ if (htons(conn->dst_port) > 0 &&
+ !(SDP_ST_MASK_CLOSED & conn->istate)) {
addr->sin_port = htons(conn->dst_port);
addr->sin_addr.s_addr = htonl(conn->dst_addr);
@@ -977,7 +976,7 @@ static unsigned int _sdp_inet_poll(struc
*/
switch (conn->istate) {
case SDP_SOCK_ST_LISTEN:
- mask |= (0 < conn->backlog_cnt) ? (POLLIN | POLLRDNORM) : 0;
+ mask |= (conn->backlog_cnt > 0) ? (POLLIN | POLLRDNORM) : 0;
break;
case SDP_SOCK_ST_ERROR:
mask |= POLLERR;
@@ -994,7 +993,7 @@ static unsigned int _sdp_inet_poll(struc
* recv EOF _and_ recv data
*/
if (!(conn->byte_strm < sk->sk_rcvlowat) ||
- 0 < (RCV_SHUTDOWN & conn->shutdown))
+ (RCV_SHUTDOWN & conn->shutdown))
mask |= POLLIN | POLLRDNORM;
/*
@@ -1002,7 +1001,7 @@ static unsigned int _sdp_inet_poll(struc
* (Some poll() Linux documentation says that POLLHUP is
* incompatible with the POLLOUT/POLLWR flags)
*/
- if (0 < (SEND_SHUTDOWN & conn->shutdown))
+ if (SEND_SHUTDOWN & conn->shutdown)
mask |= POLLHUP;
else {
/*
@@ -1014,7 +1013,7 @@ static unsigned int _sdp_inet_poll(struc
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
set_bit(SOCK_NOSPACE, &sock->flags);
- if (0 < __sdp_inet_writable(conn)) {
+ if (__sdp_inet_writable(conn)) {
mask |= POLLOUT | POLLWRNORM;
clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
@@ -1022,7 +1021,7 @@ static unsigned int _sdp_inet_poll(struc
}
}
- if (0 < conn->rcv_urg_cnt)
+ if (conn->rcv_urg_cnt > 0)
mask |= POLLPRI;
}
@@ -1058,7 +1057,7 @@ static int _sdp_inet_ioctl(struct socket
* standard INET IOCTLs
*/
case SIOCGSTAMP:
- if (0 == sk->sk_stamp.tv_sec)
+ if (!sk->sk_stamp.tv_sec)
result = -ENOENT;
else {
result = copy_to_user((void __user *)arg,
@@ -1110,7 +1109,7 @@ static int _sdp_inet_ioctl(struct socket
case SIOCINQ:
sdp_conn_lock(conn);
- if (SDP_SOCK_ST_LISTEN != conn->istate) {
+ if (conn->istate != SDP_SOCK_ST_LISTEN) {
/*
* TODO need to subtract/add URG (inline vs. OOB)
*/
@@ -1124,7 +1123,7 @@ static int _sdp_inet_ioctl(struct socket
case SIOCOUTQ:
sdp_conn_lock(conn);
- if (SDP_SOCK_ST_LISTEN != conn->istate) {
+ if (conn->istate != SDP_SOCK_ST_LISTEN) {
value = conn->send_qud;
result = put_user(value, (int __user *) arg);
} else
@@ -1137,10 +1136,10 @@ static int _sdp_inet_ioctl(struct socket
value = 0;
- if (0 < conn->rcv_urg_cnt) {
+ if (conn->rcv_urg_cnt > 0) {
buff = sdp_buff_q_look_head(&conn->recv_pool);
- if (NULL != buff &&
- 0 < (SDP_BUFF_F_OOB_PRES & buff->flags) &&
+ if (buff &&
+ (buff->flags & SDP_BUFF_F_OOB_PRES) &&
1 == (buff->tail - buff->data))
value = 1;
}
@@ -1185,11 +1184,11 @@ static int _sdp_inet_setopt(struct socke
switch (optname) {
case TCP_NODELAY:
- conn->nodelay = (0 == value) ? 0 : 1;
+ conn->nodelay = value ? 1 : 0;
- if (0 < conn->nodelay) {
+ if (conn->nodelay > 0) {
result = sdp_send_flush(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
break;
@@ -1302,7 +1301,7 @@ static int _sdp_inet_shutdown(struct soc
* 1 - send shutdown
* 2 - send/recv shutdown.
*/
- if (0 > flag || 2 < flag)
+ if (flag < 0 || flag < 2)
return -EINVAL;
else
flag++; /* match shutdown mask. */
@@ -1319,9 +1318,9 @@ static int _sdp_inet_shutdown(struct soc
/*
* Send shutdown is benign.
*/
- if (0 < (RCV_SHUTDOWN & flag)) {
+ if (RCV_SHUTDOWN & flag) {
result = sdp_inet_listen_stop(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "listen stop error <%d>",
result);
}
@@ -1334,14 +1333,14 @@ static int _sdp_inet_shutdown(struct soc
case SDP_SOCK_ST_ACCEPTED:
case SDP_SOCK_ST_CONNECT:
result = _sdp_inet_abort(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> aborting connection",
result);
break;
case SDP_SOCK_ST_ESTABLISHED:
case SDP_SOCK_ST_CLOSE:
result = _sdp_inet_disconnect(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> disconnecting conn",
result);
break;
@@ -1368,7 +1367,7 @@ static int _sdp_inet_shutdown(struct soc
result = -EFAULT;
}
- if (0 > result)
+ if (result < 0)
sdp_inet_wake_generic(sock->sk);
else
sdp_inet_wake_error(sock->sk);
@@ -1420,7 +1419,7 @@ static int _sdp_inet_create(struct socke
}
conn = sdp_conn_alloc(GFP_KERNEL);
- if (NULL == conn) {
+ if (!conn) {
sdp_dbg_warn(conn, "SOCKET: failed to create socekt <%d:%d>",
sock->type, protocol);
return -ENOMEM;
@@ -1470,7 +1469,7 @@ static int __init sdp_init(void)
* proc entries
*/
result = sdp_main_proc_init();
- if (0 > result) {
+ if (result < 0) {
sdp_warn("INIT: Error <%d> creating proc entries.", result);
goto error_proc;
}
@@ -1478,7 +1477,7 @@ static int __init sdp_init(void)
* advertisment table
*/
result = sdp_main_advt_init();
- if (0 > result) {
+ if (result < 0) {
sdp_warn("INIT: Error <%d> initializing advertisments",
result);
goto error_advt;
@@ -1487,7 +1486,7 @@ static int __init sdp_init(void)
* link data
*/
result = sdp_link_addr_init();
- if (0 > result) {
+ if (result < 0) {
sdp_warn("INIT: Error <%d> initializing link",
result);
goto error_link;
@@ -1499,7 +1498,7 @@ static int __init sdp_init(void)
_buff_max,
_alloc_inc,
_free_mark);
- if (0 > result) {
+ if (result < 0) {
sdp_warn("INIT: Error <%d> initializing buffer pool.", result);
goto error_buff;
}
@@ -1513,7 +1512,7 @@ static int __init sdp_init(void)
_send_post_max,
_send_buff_max,
_send_usig_max);
- if (0 > result) {
+ if (result < 0) {
sdp_warn("INIT: Error <%d> initializing connection table.",
result);
goto error_conn;
Index: sdp_proto.h
===================================================================
--- sdp_proto.h (revision 1922)
+++ sdp_proto.h (working copy)
@@ -487,7 +487,7 @@ extern int sdp_debug_level;
#define __sdp_conn_dbg(level, type, conn, format, arg...) \
do { \
struct sdp_opt *x = (conn); \
- if (NULL != x) { \
+ if (x) { \
__sdp_dbg_out(level, type, \
"<%d> <%04x:%04x> " format, \
x->hashent, x->istate, x->state, \
@@ -572,7 +572,7 @@ static inline int __sdp_inet_write_space
* write space is determined by amount of outstanding bytes of data
* and number of buffers used for transmission by this connection
*/
- if (0 < (SDP_ST_MASK_OPEN & conn->istate) &&
+ if ((SDP_ST_MASK_OPEN & conn->istate) &&
(conn->send_max >
sdp_desc_q_types_size(&conn->send_queue,
SDP_DESC_TYPE_BUFF)))
@@ -586,7 +586,7 @@ static inline int __sdp_inet_write_space
*/
static inline int __sdp_inet_writable(struct sdp_opt *conn)
{
- if (0 < (SDP_ST_MASK_OPEN & conn->istate))
+ if (SDP_ST_MASK_OPEN & conn->istate)
return (__sdp_inet_write_space(conn, 0) <
(conn->send_qud / 2)) ? 0 : 1;
else
@@ -605,8 +605,8 @@ static inline int __sdp_conn_stat_dump(s
conn->src_serv, conn->snk_serv);
for (counter = 0; counter < 0x20; counter++)
- if (0 < conn->send_mid[counter] ||
- 0 < conn->recv_mid[counter]) {
+ if (conn->send_mid[counter] > 0 ||
+ conn->recv_mid[counter] > 0) {
sdp_dbg_init("STAT: MID send <%02x> <%u>", counter,
conn->send_mid[counter]);
sdp_dbg_init("STAT: MID recv <%02x> <%u>", counter,
@@ -626,7 +626,7 @@ static inline int __sdp_conn_state_dump(
sdp_dbg_init("STATE: Connection <%04x> state:", conn->hashent);
- if (SDP_CONN_ST_INVALID == conn->state_rec.state[0]) {
+ if (conn->state_rec.state[0] == SDP_CONN_ST_INVALID) {
sdp_dbg_init("STATE: No state history. <%d>",
conn->state_rec.value);
return 0;
Index: sdp_read.c
===================================================================
--- sdp_read.c (revision 1922)
+++ sdp_read.c (working copy)
@@ -53,38 +53,38 @@ static int _sdp_event_read_advt(struct s
* what MS does... (Either choice is correct)
*/
advt = sdp_advt_q_look(&conn->src_actv);
- if (NULL == advt || advt->wrid != comp->wr_id) {
+ if (!advt || advt->wrid != comp->wr_id) {
advt = sdp_advt_q_look(&conn->src_pend);
- if (NULL != advt && advt->wrid == comp->wr_id)
+ if (advt && advt->wrid == comp->wr_id)
advt->flag &= ~SDP_ADVT_F_READ;
goto done;
}
advt = sdp_advt_q_get(&conn->src_actv);
- SDP_EXPECT((NULL != advt));
+ SDP_EXPECT((advt));
conn->src_recv--;
result = sdp_send_ctrl_rdma_rd(conn, advt->post);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* If a SrcAvailCancel was received, and all RDMA reads
* have been flushed, perform tail processing
*/
- if (0 < (SDP_CONN_F_SRC_CANCEL_R & conn->flags) &&
- 0 == conn->src_recv) {
+ if ((conn->flags & SDP_CONN_F_SRC_CANCEL_R) &&
+ !conn->src_recv) {
conn->flags &= ~SDP_CONN_F_SRC_CANCEL_R;
conn->advt_seq = conn->recv_seq;
/*
* If any data was canceled, post a SendSm, also
*/
- if (0 < (SDP_CONN_F_SRC_CANCEL_C & conn->flags)) {
+ if (conn->flags & SDP_CONN_F_SRC_CANCEL_C) {
result = sdp_send_ctrl_send_sm(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting SendSm",
result);
goto error;
@@ -162,7 +162,7 @@ int sdp_event_read(struct sdp_opt *conn,
switch (type) {
case SDP_DESC_TYPE_BUFF:
buff = (struct sdpc_buff *) sdp_desc_q_get_head(&conn->r_src);
- SDP_EXPECT((NULL != buff));
+ SDP_EXPECT((buff));
if (comp->wr_id != buff->wrid) {
sdp_dbg_warn(conn, "work request mismatch <%llu:%llu>",
@@ -177,7 +177,7 @@ int sdp_event_read(struct sdp_opt *conn,
* post data to the stream interface
*/
result = sdp_recv_buff(conn, buff);
- if (0 < result) {
+ if (result > 0) {
/*
* count number of bytes buffered by the connection,
* zero byte buffers can be returned. If data was
@@ -187,22 +187,22 @@ int sdp_event_read(struct sdp_opt *conn,
sdp_inet_wake_recv(conn->sk, conn->byte_strm);
} else {
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> receiving buff",
result);
result = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
break;
case SDP_DESC_TYPE_IOCB:
iocb = (struct sdpc_iocb *) sdp_desc_q_look_head(&conn->r_src);
- if (NULL == iocb || iocb->wrid != comp->wr_id)
+ if (!iocb || iocb->wrid != comp->wr_id)
break;
iocb = (struct sdpc_iocb *)sdp_desc_q_get_head(&conn->r_src);
- SDP_EXPECT((NULL != iocb));
+ SDP_EXPECT((iocb));
iocb->flags &= ~(SDP_IOCB_F_ACTIVE | SDP_IOCB_F_RDMA_R);
@@ -210,7 +210,7 @@ int sdp_event_read(struct sdp_opt *conn,
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
result = sdp_iocb_complete(iocb, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
goto done;
@@ -219,7 +219,7 @@ int sdp_event_read(struct sdp_opt *conn,
break;
case SDP_DESC_TYPE_NONE:
iocb = sdp_iocb_q_look(&conn->r_pend);
- if (NULL == iocb) {
+ if (!iocb) {
result = -EPROTO;
goto done;
}
@@ -233,13 +233,13 @@ int sdp_event_read(struct sdp_opt *conn,
break;
iocb = sdp_iocb_q_get_head(&conn->r_pend);
- SDP_EXPECT((NULL != iocb));
+ SDP_EXPECT((iocb));
SDP_CONN_STAT_READ_INC(conn, iocb->post);
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
result = sdp_iocb_complete(iocb, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
goto done;
@@ -256,7 +256,7 @@ int sdp_event_read(struct sdp_opt *conn,
* The advertisment which generated this READ needs to be checked.
*/
result = _sdp_event_read_advt(conn, comp);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> handling READ advertisment",
result);
goto done;
@@ -266,7 +266,7 @@ int sdp_event_read(struct sdp_opt *conn,
* of some RDMAs
*/
result = sdp_send_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing send queue.", result);
goto done;
}
@@ -275,7 +275,7 @@ int sdp_event_read(struct sdp_opt *conn,
* reads.
*/
result = sdp_recv_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing recv queue.", result);
goto done;
}
Index: sdp_msgs.h
===================================================================
--- sdp_msgs.h (revision 1922)
+++ sdp_msgs.h (working copy)
@@ -68,7 +68,7 @@
/*
* message type masks
*/
-#define SDP_MID_CTRL(mid) ((0 < (0xF0 & mid)) ? 0 : 1)
+#define SDP_MID_CTRL(mid) ((0xF0 & mid) ? 0 : 1)
/*
* Base Sockets Direct Header (header for all SDP messages)
*/
Index: sdp_send.c
===================================================================
--- sdp_send.c (revision 1922)
+++ sdp_send.c (working copy)
@@ -54,7 +54,7 @@ static int _sdp_send_buff_post(struct sd
conn->s_wq_size++;
conn->l_advt_bf = conn->l_recv_bf;
conn->send_pipe -= buff->data_size;
- conn->oob_offset -= (0 < conn->oob_offset) ? buff->data_size : 0;
+ conn->oob_offset -= (conn->oob_offset > 0) ? buff->data_size : 0;
buff->wrid = conn->send_wrid++;
buff->lkey = conn->l_key;
@@ -81,8 +81,7 @@ static int _sdp_send_buff_post(struct sd
* the flag. This allows for at least one pending urgent message
* to send early notification.
*/
- if (0 < (SDP_CONN_F_OOB_SEND & conn->flags) &&
- !(0xFFFF < conn->oob_offset)) {
+ if ((conn->flags & SDP_CONN_F_OOB_SEND) && conn->oob_offset <= 0xFFFF) {
SDP_BSDH_SET_OOB_PEND(buff->bsdh_hdr);
SDP_BUFF_F_SET_SE(buff);
@@ -93,11 +92,11 @@ static int _sdp_send_buff_post(struct sd
* and present flag is set, potentially OOB offset is cleared. pending
* is set if this buffer has never had pending set.
*/
- if (0 < (SDP_BUFF_F_OOB_PRES & buff->flags)) {
- if (0 < conn->oob_offset)
+ if (buff->flags & SDP_BUFF_F_OOB_PRES) {
+ if (conn->oob_offset > 0)
SDP_BSDH_SET_OOB_PEND(buff->bsdh_hdr);
else {
- SDP_EXPECT(!(0 > conn->oob_offset));
+ SDP_EXPECT(conn->oob_offset >= 0);
conn->oob_offset = -1;
}
@@ -107,12 +106,12 @@ static int _sdp_send_buff_post(struct sd
/*
* solicite event bit.
*/
- if (0 < SDP_BUFF_F_GET_SE(buff))
+ if (SDP_BUFF_F_GET_SE(buff))
send_param.send_flags |= IB_SEND_SOLICITED;
/*
* unsignalled event
*/
- if (0 < SDP_BUFF_F_GET_UNSIG(buff) &&
+ if (SDP_BUFF_F_GET_UNSIG(buff) &&
conn->usig_max > conn->send_cons) {
conn->send_usig++;
conn->send_cons++;
@@ -124,13 +123,13 @@ static int _sdp_send_buff_post(struct sd
/*
* check queue membership. (first send attempt vs. flush)
*/
- if (0 < sdp_desc_q_member((struct sdpc_desc *) buff))
+ if (sdp_desc_q_member((struct sdpc_desc *) buff) > 0)
sdp_desc_q_remove((struct sdpc_desc *) buff);
/*
* save the buffer for the event handler.
*/
result = sdp_buff_q_put_tail(&conn->send_post, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queueing send buffer", result);
goto done;
}
@@ -149,7 +148,7 @@ static int _sdp_send_buff_post(struct sd
send_param.opcode = IB_WR_SEND;
result = ib_post_send(conn->qp, &send_param, &bad_wr);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn,
"Error <%d> posting send. <%d:%d> <%d:%d:%d>",
result, conn->s_wq_cur, conn->s_wq_size,
@@ -164,11 +163,9 @@ static int _sdp_send_buff_post(struct sd
* source cancels require us to save the sequence number
* for validation of the cancel's completion.
*/
- if (0 < (SDP_CONN_F_SRC_CANCEL_L & conn->flags))
- conn->src_cseq =
- ((SDP_MID_SRC_CANCEL ==
- buff->bsdh_hdr->mid) ? buff->bsdh_hdr->seq_num : conn->
- src_cseq);
+ if (conn->flags & SDP_CONN_F_SRC_CANCEL_L)
+ conn->src_cseq = ((buff->bsdh_hdr->mid == SDP_MID_SRC_CANCEL) ?
+ buff->bsdh_hdr->seq_num : conn->src_cseq);
return 0;
done:
@@ -199,17 +196,17 @@ static int _sdp_send_data_buff_post(stru
* 3) buffer from head of queue or as parameter
* 4) nodelay check.
*/
- if (3 > conn->r_recv_bf || 0 < conn->src_sent)
+ if (conn->r_recv_bf < 3 || conn->src_sent > 0)
return ENOBUFS;
/*
* The rest of the checks can proceed if there is a signalled event
* in the pipe, otherwise we could stall...
*/
if (conn->send_usig < sdp_buff_q_size(&conn->send_post) ||
- 0 < sdp_desc_q_size(&conn->w_snk)) {
+ sdp_desc_q_size(&conn->w_snk) > 0) {
if (buff->tail < buff->end &&
- 0 == (SDP_BUFF_F_OOB_PRES & buff->flags) &&
- 0 == conn->nodelay)
+ !(buff->flags & SDP_BUFF_F_OOB_PRES) &&
+ !conn->nodelay)
/*
* If the buffer is not full, and there is already
* data in the SDP pipe, then hold on to the buffer
@@ -223,7 +220,7 @@ static int _sdp_send_data_buff_post(stru
* asymmetric connections. This is desirable to offload
* the remote host.
*/
- if (!(conn->s_wq_cur > conn->s_wq_size)) {
+ if (conn->s_wq_cur <= conn->s_wq_size) {
/*
* slow down the up take in the send data path to
* give the remote side some time to post available
@@ -257,7 +254,7 @@ static int _sdp_send_data_buff_post(stru
* update non-discard counter.
* Make consideration for a pending sink. (can be forced by OOB)
*/
- if (0 < sdp_advt_q_size(&conn->snk_pend)) {
+ if (sdp_advt_q_size(&conn->snk_pend) > 0) {
/*
* As sink advertisment needs to be discarded. We always
* complete an advertisment if there is not enough room
@@ -268,10 +265,10 @@ static int _sdp_send_data_buff_post(stru
* then the buffer.)
*/
advt = sdp_advt_q_get(&conn->snk_pend);
- SDP_EXPECT((NULL != advt));
+ SDP_EXPECT((advt));
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* update sink advertisments.
*/
@@ -282,7 +279,7 @@ static int _sdp_send_data_buff_post(stru
* transmision time
*/
result = _sdp_send_buff_post(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting send data buffer",
result);
goto error;
@@ -310,19 +307,19 @@ static int _sdp_send_data_buff_snk(struc
*
* 1) sufficient send resources.
*/
- if (!(conn->send_cq_size > conn->s_wq_size))
+ if (conn->send_cq_size <= conn->s_wq_size)
return ENOBUFS;
/*
* confirm type
*/
- if (SDP_DESC_TYPE_BUFF != buff->type)
+ if (buff->type != SDP_DESC_TYPE_BUFF)
return -ENOBUFS;
/*
* nodelay buffering
*/
#if 0
if (buff->tail < buff->end &&
- 0 == conn->nodelay &&
+ !conn->nodelay &&
conn->send_usig < sdp_buff_q_size(&conn->send_post)) {
/*
* If the buffer is not full, and there is already data in the
@@ -337,7 +334,7 @@ static int _sdp_send_data_buff_snk(struc
* get advertisment.
*/
advt = sdp_advt_q_look(&conn->snk_pend);
- if (NULL == advt)
+ if (!advt)
return ENOBUFS;
/*
* signalled? With no delay turned off, data transmission may be
@@ -369,11 +366,11 @@ static int _sdp_send_data_buff_snk(struc
/*
* dequeue if needed and the queue buffer
*/
- if (0 < sdp_desc_q_member((struct sdpc_desc *) buff))
+ if (sdp_desc_q_member((struct sdpc_desc *) buff) > 0)
sdp_desc_q_remove((struct sdpc_desc *) buff);
result = sdp_desc_q_put_tail(&conn->w_snk, (struct sdpc_desc *)buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queueing write buffer. <%d>",
result, sdp_desc_q_size(&conn->w_snk));
goto error;
@@ -383,7 +380,7 @@ static int _sdp_send_data_buff_snk(struc
*/
conn->s_wq_size++;
conn->send_pipe -= buff->data_size;
- conn->oob_offset -= (0 < conn->oob_offset) ? buff->data_size : 0;
+ conn->oob_offset -= (conn->oob_offset > 0) ? buff->data_size : 0;
/*
* post RDMA
*/
@@ -393,7 +390,7 @@ static int _sdp_send_data_buff_snk(struc
send_param.num_sge = 1;
result = ib_post_send(conn->qp, &send_param, &bad_wr);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn, "Error <%d> posting rdma write", result);
conn->s_wq_size--;
@@ -405,15 +402,15 @@ static int _sdp_send_data_buff_snk(struc
*/
if (conn->send_size > advt->size) {
advt = sdp_advt_q_get(&conn->snk_pend);
- SDP_EXPECT((NULL != advt));
+ SDP_EXPECT((advt));
zcopy = advt->post;
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
result = sdp_send_ctrl_rdma_wr(conn, zcopy);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing sink. <%d>",
result, zcopy);
result = -ENODEV;
@@ -449,7 +446,7 @@ static int _sdp_send_data_iocb_snk(struc
* to fill the advertisement
*/
result = sdp_iocb_register(iocb, conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> registering IOCB. <%d:%d>",
result, iocb->key, iocb->len);
goto error;
@@ -457,12 +454,12 @@ static int _sdp_send_data_iocb_snk(struc
/*
* check queue depth
*/
- while (0 < iocb->len && conn->send_cq_size > conn->s_wq_size) {
+ while (iocb->len > 0 && conn->send_cq_size > conn->s_wq_size) {
/*
* get the pending sink advertisment.
*/
advt = sdp_advt_q_look(&conn->snk_pend);
- if (NULL == advt)
+ if (!advt)
break;
/*
* amount of data to zcopy.
@@ -499,7 +496,7 @@ static int _sdp_send_data_iocb_snk(struc
*/
conn->s_wq_size++;
conn->send_pipe -= zcopy;
- conn->oob_offset -= (0 < conn->oob_offset) ? zcopy : 0;
+ conn->oob_offset -= (conn->oob_offset > 0) ? zcopy : 0;
/*
* post RDMA
*/
@@ -509,7 +506,7 @@ static int _sdp_send_data_iocb_snk(struc
send_param.num_sge = 1;
result = ib_post_send(conn->qp, &send_param, &bad_wr);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn, "Error <%d> posting rdma write",
result);
@@ -523,11 +520,11 @@ static int _sdp_send_data_iocb_snk(struc
* outstanding sink advertisment to see how the advt
* size remaining is picked.)
*/
- if (!(conn->send_size > advt->size))
+ if (conn->send_size <= advt->size)
continue;
advt = sdp_advt_q_get(&conn->snk_pend);
- if (NULL == advt) {
+ if (!advt) {
sdp_dbg_warn(conn, "sink advertisment disappeared.");
result = -ENODEV;
goto error;
@@ -536,10 +533,10 @@ static int _sdp_send_data_iocb_snk(struc
zcopy = advt->post;
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
result = sdp_send_ctrl_rdma_wr(conn, zcopy);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> completing sink. <%d>",
result, zcopy);
result = -ENODEV;
@@ -570,16 +567,16 @@ static int _sdp_send_data_iocb_src(struc
* 1) local source cancel is pending
* 2) sufficient send credits for buffered transmission.
*/
- if (0 < (SDP_CONN_F_SRC_CANCEL_L & conn->flags) || 3 > conn->r_recv_bf)
+ if ((conn->flags & SDP_CONN_F_SRC_CANCEL_L) || conn->r_recv_bf < 3)
return ENOBUFS;
switch (conn->send_mode) {
case SDP_MODE_PIPE:
- if (!(conn->s_cur_adv > conn->src_sent))
+ if (conn->s_cur_adv <= conn->src_sent)
return ENOBUFS;
if (conn->s_cur_adv < conn->r_max_adv) {
- if (!(SDP_SEND_POST_FRACTION > conn->s_par_adv)) {
+ if (conn->s_par_adv >= SDP_SEND_POST_FRACTION) {
conn->s_cur_adv++;
conn->s_par_adv = 0;
}
@@ -596,7 +593,7 @@ static int _sdp_send_data_iocb_src(struc
#endif
break;
case SDP_MODE_COMB:
- if (0 < conn->src_sent)
+ if (conn->src_sent > 0)
return ENOBUFS;
break;
default:
@@ -608,7 +605,7 @@ static int _sdp_send_data_iocb_src(struc
* get buffer
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Error allocating SrcAvail buffer. <%d>",
iocb->key);
return -ENOMEM;
@@ -619,7 +616,7 @@ static int _sdp_send_data_iocb_src(struc
* for the same IOCB.
*/
result = sdp_iocb_register(iocb, conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> registering IOCB. <%d:%d>",
result, iocb->key, iocb->len);
goto drop;
@@ -647,7 +644,7 @@ static int _sdp_send_data_iocb_src(struc
SDP_BUFF_F_CLR_SE(buff);
SDP_BUFF_F_CLR_UNSIG(buff);
- if (SDP_MODE_COMB == conn->send_mode) {
+ if (conn->send_mode == SDP_MODE_COMB) {
void *addr;
int pos;
int off;
@@ -673,7 +670,7 @@ static int _sdp_send_data_iocb_src(struc
* map, copy, unmap.
*/
addr = __sdp_kmap(iocb->page_array[pos]);
- if (NULL == addr) {
+ if (!addr) {
result = -ENOMEM;
goto error;
}
@@ -701,7 +698,7 @@ static int _sdp_send_data_iocb_src(struc
* queue/send SrcAvail message
*/
result = _sdp_send_buff_post(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting SrcAvail for IOCB <%d>",
result, iocb->key);
goto release;
@@ -709,10 +706,10 @@ static int _sdp_send_data_iocb_src(struc
return 0;
release:
- conn->nond_send -= (SDP_MODE_COMB == conn->send_mode) ? 1 : 0;
+ conn->nond_send -= (conn->send_mode == SDP_MODE_COMB) ? 1 : 0;
conn->src_sent--;
- iocb->len += ((SDP_MODE_COMB == conn->send_mode) ?
+ iocb->len += ((conn->send_mode == SDP_MODE_COMB) ?
SDP_SRC_AVAIL_MIN : 0);
error:
iocb->flags &= ~(SDP_IOCB_F_RDMA_R | SDP_IOCB_F_ACTIVE);
@@ -740,12 +737,12 @@ static int _sdp_send_iocb_buff_write(str
offset = (iocb->post + iocb->page_offset) & (~PAGE_MASK);
while (buff->tail < buff->end &&
- 0 < iocb->len) {
+ iocb->len > 0) {
/*
* map correct page of iocb
*/
addr = __sdp_kmap(iocb->page_array[counter]);
- if (NULL == addr)
+ if (!addr)
break;
copy = min((PAGE_SIZE - offset),
@@ -779,21 +776,21 @@ static int _sdp_send_data_iocb_buff(stru
int result;
int copy;
- if (0 < conn->src_sent)
+ if (conn->src_sent > 0)
return ENOBUFS;
/*
* loop through queued buffers and copy them to the destination
*/
copy = min(__sdp_inet_write_space(conn, 0), iocb->len);
- while (0 < copy &&
- 2 < conn->r_recv_bf &&
+ while (copy > 0 &&
+ conn->r_recv_bf > 2 &&
conn->send_cq_size > conn->s_wq_size) {
/*
* get a buffer for posting.
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
result = -ENOMEM;
goto error;
}
@@ -814,7 +811,7 @@ static int _sdp_send_data_iocb_buff(stru
* TODO: need to be checking OOB here.
*/
result =_sdp_send_iocb_buff_write(iocb, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> copy from IOCB <%d>.",
result, iocb->key);
goto drop;
@@ -829,7 +826,7 @@ static int _sdp_send_data_iocb_buff(stru
* since the IOCB queue took care of the increment.
*/
result = _sdp_send_buff_post(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> send queue buff post",
result);
goto drop;
@@ -850,30 +847,30 @@ static int _sdp_send_data_iocb(struct sd
{
int result = ENOBUFS;
- if (!(conn->send_cq_size > conn->s_wq_size))
+ if (conn->send_cq_size <= conn->s_wq_size)
goto done;
/*
* confirm IOCB usage.
*/
- if (SDP_DESC_TYPE_IOCB != iocb->type)
+ if (iocb->type != SDP_DESC_TYPE_IOCB)
return -ENOBUFS;
/*
* determin if we are sending Buffered, Source or Sink.
*/
- if (0 < sdp_advt_q_size(&conn->snk_pend)) {
+ if (sdp_advt_q_size(&conn->snk_pend) > 0) {
result = _sdp_send_data_iocb_snk(conn, iocb);
- if (0 == result) {
+ if (!result) {
/*
* IOCB completely processed. Otherwise we allow the
* callers to determine the fate of the IOCB on
* failure or partial processing.
*/
- if (0 < sdp_desc_q_member((struct sdpc_desc *)iocb))
+ if (sdp_desc_q_member((struct sdpc_desc *)iocb) > 0)
sdp_desc_q_remove((struct sdpc_desc *)iocb);
result = sdp_desc_q_put_tail(&conn->w_snk,
(struct sdpc_desc *)iocb);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> queuing write IOCB.",
result);
@@ -887,25 +884,25 @@ static int _sdp_send_data_iocb(struct sd
* hope that a new sink advertisment will arrive, because
* sinks are more efficient.
*/
- if (0 < sdp_desc_q_size(&conn->w_snk))
+ if (sdp_desc_q_size(&conn->w_snk) > 0)
goto done;
if (conn->src_zthresh > iocb->len ||
- SDP_MODE_BUFF == conn->send_mode ||
- 0 < (SDP_IOCB_F_BUFF & iocb->flags)) {
+ conn->send_mode == SDP_MODE_BUFF ||
+ (iocb->flags & SDP_IOCB_F_BUFF)) {
result = _sdp_send_data_iocb_buff(conn, iocb);
- if (0 == result) {
+ if (!result) {
/*
* complete this IOCB
*/
- if (0 < sdp_desc_q_member((struct sdpc_desc *) iocb))
+ if (sdp_desc_q_member((struct sdpc_desc *) iocb) > 0)
sdp_desc_q_remove((struct sdpc_desc *) iocb);
SDP_CONN_STAT_WRITE_INC(conn, iocb->post);
SDP_CONN_STAT_WQ_DEC(conn, iocb->size);
result = sdp_iocb_complete(iocb, 0);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn,
"Error <%d> completing iocb <%d>",
result, iocb->key);
@@ -915,15 +912,15 @@ static int _sdp_send_data_iocb(struct sd
}
result = _sdp_send_data_iocb_src(conn, iocb);
- if (0 == result) {
+ if (!result) {
/*
* queue IOCB
*/
- if (0 < sdp_desc_q_member((struct sdpc_desc *) iocb))
+ if (sdp_desc_q_member((struct sdpc_desc *) iocb) > 0)
sdp_desc_q_remove((struct sdpc_desc *)iocb);
result = sdp_iocb_q_put_tail(&conn->w_src, iocb);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> queueing write <%d:%d>",
result, iocb->key,
sdp_iocb_q_size(&conn->w_src));
@@ -946,15 +943,15 @@ static int _sdp_send_data_queue_test(str
* 1) Invalid state for transmission
* 2) source advertisment cancel in progress.
*/
- if (0 == (SDP_ST_MASK_SEND_OK & conn->state) ||
- 0 < (SDP_CONN_F_SRC_CANCEL_L & conn->flags))
+ if (!(SDP_ST_MASK_SEND_OK & conn->state) ||
+ (conn->flags & SDP_CONN_F_SRC_CANCEL_L))
return ENOBUFS;
- if (SDP_DESC_TYPE_IOCB == element->type)
+ if (element->type == SDP_DESC_TYPE_IOCB)
return _sdp_send_data_iocb(conn, (struct sdpc_iocb *)element);
- if (0 == sdp_advt_q_look(&conn->snk_pend) ||
- (SDP_BUFF_F_OOB_PRES & ((struct sdpc_buff *)element)->flags))
+ if (!sdp_advt_q_look(&conn->snk_pend) ||
+ (((struct sdpc_buff *)element)->flags & SDP_BUFF_F_OOB_PRES))
result = _sdp_send_data_buff_post(conn,
(struct sdpc_buff *)element);
else
@@ -977,9 +974,9 @@ static int _sdp_send_data_queue_flush(st
* non-zero result is generated.
* (positive: no space; negative: error)
*/
- while (!result && 0 < sdp_desc_q_size(&conn->send_queue)) {
+ while (!result && sdp_desc_q_size(&conn->send_queue) > 0) {
element = sdp_desc_q_look_head(&conn->send_queue);
- SDP_EXPECT((NULL != element));
+ SDP_EXPECT((element));
result = _sdp_send_data_queue_test(conn, element);
if (!result)
@@ -987,7 +984,7 @@ static int _sdp_send_data_queue_flush(st
/*
* error
*/
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> post data during flush",
result);
/*
@@ -995,10 +992,10 @@ static int _sdp_send_data_queue_flush(st
* since called functions can dequeue the
* element, and not know how to requeue it.
*/
- if (0 == sdp_desc_q_member(element)) {
+ if (!sdp_desc_q_member(element)) {
result = sdp_desc_q_put_head(&conn->send_queue,
element);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
}
}
@@ -1021,10 +1018,10 @@ static int _sdp_send_data_queue(struct s
* requires us to buffer, a negative result is an error, a return
* value of zero is a successful transmission
*/
- if (0 < sdp_desc_q_size(&conn->send_queue) ||
- 0 < (result = _sdp_send_data_queue_test(conn, element))) {
+ if (sdp_desc_q_size(&conn->send_queue) > 0 ||
+ (result = _sdp_send_data_queue_test(conn, element)) > 0) {
result = sdp_desc_q_put_tail(&conn->send_queue, element);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queueing data for send",
result);
goto done;
@@ -1032,12 +1029,11 @@ static int _sdp_send_data_queue(struct s
/*
* Potentially request a switch to pipelined mode.
*/
- if (SDP_MODE_COMB == conn->send_mode &&
- !(SDP_INET_SEND_MODE >
- sdp_desc_q_size(&conn->send_queue))) {
+ if (conn->send_mode == SDP_MODE_COMB &&
+ sdp_desc_q_size(&conn->send_queue) >= SDP_INET_SEND_MODE) {
result = sdp_send_ctrl_mode_ch(conn,
SDP_MSG_MCH_PIPE_RECV);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> posting mode change",
result);
@@ -1046,7 +1042,7 @@ static int _sdp_send_data_queue(struct s
}
}
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> during data send posting",
result);
goto done;
@@ -1069,11 +1065,11 @@ static inline struct sdpc_buff *_sdp_sen
*/
buff = (struct sdpc_buff *)sdp_desc_q_look_type_tail(&conn->send_queue,
SDP_DESC_TYPE_BUFF);
- if (NULL == buff ||
+ if (!buff ||
buff->tail == buff->end ||
- 0 < (SDP_BUFF_F_OOB_PRES & buff->flags)) {
+ (buff->flags & SDP_BUFF_F_OOB_PRES)) {
buff = sdp_buff_pool_get();
- if (NULL != buff) {
+ if (buff) {
buff->tail = buff->end - conn->send_size;
buff->data = buff->tail;
}
@@ -1096,12 +1092,12 @@ static inline int _sdp_send_data_buff_pu
/*
* See note on send OOB implementation in SendBuffPost.
*/
- if (0 < urg) {
+ if (urg > 0) {
buff->flags |= SDP_BUFF_F_OOB_PRES;
/*
* The OOB PEND and PRES flags need to match up as pairs.
*/
- if (0 > conn->oob_offset) {
+ if (conn->oob_offset < 0) {
conn->oob_offset = conn->send_pipe + size;
conn->flags |= SDP_CONN_F_OOB_SEND;
}
@@ -1110,7 +1106,7 @@ static inline int _sdp_send_data_buff_pu
* if the buffer is already queue, then this was a fill of a partial
* buffer and dosn't need to be queued now.
*/
- if (0 < (SDP_BUFF_F_QUEUED & buff->flags)) {
+ if (buff->flags & SDP_BUFF_F_QUEUED) {
buff->data_size += size;
conn->send_qud += size;
conn->send_pipe += size;
@@ -1124,12 +1120,12 @@ static inline int _sdp_send_data_buff_pu
* finally send the data buffer
*/
result = _sdp_send_data_queue(conn, (struct sdpc_desc *) buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> buffer to SEND queue.",
result);
expect = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
}
}
@@ -1148,16 +1144,16 @@ static int _sdp_send_ctrl_buff_test(stru
{
int result = 0;
- if (0 == (SDP_ST_MASK_CTRL_OK & conn->state) ||
+ if (!(SDP_ST_MASK_CTRL_OK & conn->state) ||
!(conn->send_cq_size > conn->s_wq_size) ||
- !(0 < conn->r_recv_bf) ||
- (conn->l_recv_bf == conn->l_advt_bf && 1 == conn->r_recv_bf))
+ conn->r_recv_bf <= 0 ||
+ (conn->l_recv_bf == conn->l_advt_bf && conn->r_recv_bf == 1))
return ENOBUFS;
/*
* post the control buffer
*/
result = _sdp_send_buff_post(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control send", result);
goto error;
}
@@ -1179,24 +1175,24 @@ static int _sdp_send_ctrl_buff_flush(str
* As long as there are buffers, try to post until a non-zero
* result is generated. (positive: no space; negative: error)
*/
- while (!result && 0 < sdp_desc_q_size(&conn->send_ctrl)) {
+ while (!result && sdp_desc_q_size(&conn->send_ctrl) > 0) {
element = sdp_desc_q_look_head(&conn->send_ctrl);
- SDP_EXPECT((NULL != element));
+ SDP_EXPECT((element));
result = _sdp_send_ctrl_buff_test(conn,
(struct sdpc_buff *)element);
if (!result)
continue;
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> failed to flush control msg",
result);
- if (0 == sdp_desc_q_member(element)) {
+ if (!sdp_desc_q_member(element)) {
result = sdp_desc_q_put_head(&conn->send_ctrl,
element);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
}
}
@@ -1215,21 +1211,21 @@ static int _sdp_send_ctrl_buff_buffered(
/*
* Either post a send, or buffer the packet in the tx queue
*/
- if (0 < sdp_desc_q_size(&conn->send_ctrl) ||
- 0 < (result = _sdp_send_ctrl_buff_test(conn, buff))) {
+ if (sdp_desc_q_size(&conn->send_ctrl) > 0 ||
+ (result = _sdp_send_ctrl_buff_test(conn, buff)) > 0) {
/*
* save the buffer for later flushing into the post queue.
*/
result = sdp_desc_q_put_tail(&conn->send_ctrl,
(struct sdpc_desc *)buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queueing control buff",
result);
goto error;
}
}
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> during control send posting",
result);
goto error;
@@ -1256,7 +1252,7 @@ static int _sdp_send_ctrl_buff(struct sd
* (don't need to worry about header space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buffer for control");
result = -ENOMEM;
goto error;
@@ -1287,7 +1283,7 @@ static int _sdp_send_ctrl_buff(struct sd
* Either post a send, or buffer the packet in the tx queue
*/
result = _sdp_send_ctrl_buff_buffered(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
(void)sdp_buff_pool_put(buff);
@@ -1310,7 +1306,7 @@ static int _sdp_send_ctrl_disconnect(str
* (don't need to worry about header space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buffer for disconnect");
result = -ENOMEM;
goto error;
@@ -1335,7 +1331,7 @@ static int _sdp_send_ctrl_disconnect(str
* send
*/
result = _sdp_send_ctrl_buff_buffered(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
goto error;
@@ -1360,10 +1356,10 @@ int sdp_send_ctrl_disconnect(struct sdp_
* clean. The state is now in a disconnect send, the message will be
* sent once data is flushed.
*/
- if (0 < (SDP_ST_MASK_DIS_PEND & conn->state)) {
- if (0 == (SDP_CONN_F_DIS_HOLD & conn->flags) &&
- 0 == sdp_desc_q_size(&conn->send_queue) &&
- 0 == conn->src_sent)
+ if (SDP_ST_MASK_DIS_PEND & conn->state) {
+ if (!(conn->flags & SDP_CONN_F_DIS_HOLD) &&
+ !sdp_desc_q_size(&conn->send_queue) &&
+ !conn->src_sent)
result = _sdp_send_ctrl_disconnect(conn);
else {
sdp_dbg_ctrl(conn, "defer disconnect <%d:%d> <%08x>",
@@ -1391,9 +1387,9 @@ int sdp_send_ctrl_ack(struct sdp_opt *co
* conditions then the control queue, so there is more checking to
* be done, then whether there is data in the queue.
*/
- if (0 < sdp_desc_q_size(&conn->send_ctrl) ||
- (0 < sdp_desc_q_size(&conn->send_queue) &&
- 2 < conn->l_advt_bf))
+ if (sdp_desc_q_size(&conn->send_ctrl) > 0 ||
+ (sdp_desc_q_size(&conn->send_queue) > 0 &&
+ conn->l_advt_bf > 2))
return 0;
return _sdp_send_ctrl_buff(conn, SDP_MID_DATA, 0, 0);
@@ -1456,7 +1452,7 @@ int sdp_send_ctrl_resize_buff_ack(struct
* (don't need to worry about header space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buffer for resize ack");
result = -ENOMEM;
goto error;
@@ -1483,7 +1479,7 @@ int sdp_send_ctrl_resize_buff_ack(struct
* Either post a send, or buffer the packet in the tx queue
*/
result = _sdp_send_ctrl_buff_buffered(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
(void)sdp_buff_pool_put(buff);
@@ -1505,7 +1501,7 @@ int sdp_send_ctrl_rdma_rd(struct sdp_opt
/*
* check size
*/
- if (0 > size) {
+ if (size < 0) {
sdp_dbg_warn(conn, "RDMA read completion <%d> too small.",
size);
return -ERANGE;
@@ -1515,7 +1511,7 @@ int sdp_send_ctrl_rdma_rd(struct sdp_opt
* (don't need to worry about header space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buffer for RDMA rd");
result = -ENOMEM;
goto error;
@@ -1552,7 +1548,7 @@ int sdp_send_ctrl_rdma_rd(struct sdp_opt
* Either post a send, or buffer the packet in the tx queue
*/
result = _sdp_send_ctrl_buff_buffered(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
(void)sdp_buff_pool_put(buff);
@@ -1576,7 +1572,7 @@ int sdp_send_ctrl_rdma_wr(struct sdp_opt
* (don't need to worry about header space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buffer for RDMA wr");
result = -ENOMEM;
goto error;
@@ -1609,7 +1605,7 @@ int sdp_send_ctrl_rdma_wr(struct sdp_opt
* Either post a send, or buffer the packet in the tx queue
*/
result = _sdp_send_ctrl_buff_buffered(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
(void)sdp_buff_pool_put(buff);
@@ -1634,7 +1630,7 @@ int sdp_send_ctrl_snk_avail(struct sdp_o
/*
* check mode
*/
- if (SDP_MODE_PIPE != conn->recv_mode) {
+ if (conn->recv_mode != SDP_MODE_PIPE) {
result = -EPROTO;
goto error;
}
@@ -1643,7 +1639,7 @@ int sdp_send_ctrl_snk_avail(struct sdp_o
* (don't need to worry about header space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buffer for SnkAvail");
result = -ENOMEM;
goto error;
@@ -1674,7 +1670,7 @@ int sdp_send_ctrl_snk_avail(struct sdp_o
* Either post a send, or buffer the packet in the tx queue
*/
result = _sdp_send_ctrl_buff_buffered(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
goto error;
@@ -1699,19 +1695,19 @@ int sdp_send_ctrl_mode_ch(struct sdp_opt
*/
switch (mode) {
case SDP_MSG_MCH_BUFF_RECV: /* source to sink */
- conn->send_mode = ((SDP_MODE_COMB == conn->send_mode) ?
+ conn->send_mode = ((conn->send_mode == SDP_MODE_COMB) ?
SDP_MODE_BUFF : SDP_MODE_ERROR);
break;
case SDP_MSG_MCH_COMB_SEND: /* sink to source */
- conn->recv_mode = ((SDP_MODE_BUFF == conn->recv_mode) ?
+ conn->recv_mode = ((conn->recv_mode == SDP_MODE_BUFF) ?
SDP_MODE_COMB : SDP_MODE_ERROR);
break;
case SDP_MSG_MCH_PIPE_RECV: /* source to sink */
- conn->send_mode = ((SDP_MODE_COMB == conn->send_mode) ?
+ conn->send_mode = ((conn->send_mode == SDP_MODE_COMB) ?
SDP_MODE_PIPE : SDP_MODE_ERROR);
break;
case SDP_MSG_MCH_COMB_RECV: /* source to sink */
- conn->send_mode = ((SDP_MODE_PIPE == conn->send_mode) ?
+ conn->send_mode = ((conn->send_mode == SDP_MODE_PIPE) ?
SDP_MODE_COMB : SDP_MODE_ERROR);
break;
default:
@@ -1721,8 +1717,8 @@ int sdp_send_ctrl_mode_ch(struct sdp_opt
goto error;
}
- if (SDP_MODE_ERROR == conn->send_mode ||
- SDP_MODE_ERROR == conn->recv_mode) {
+ if (conn->send_mode == SDP_MODE_ERROR ||
+ conn->recv_mode == SDP_MODE_ERROR) {
sdp_dbg_warn(conn, "mode transition error <%d:%d:%d>",
mode, conn->send_mode, conn->recv_mode);
result = -EPROTO;
@@ -1733,7 +1729,7 @@ int sdp_send_ctrl_mode_ch(struct sdp_opt
* (don't need to worry about header space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buffer for ModeChange");
result = -ENOMEM;
goto error;
@@ -1760,7 +1756,7 @@ int sdp_send_ctrl_mode_ch(struct sdp_opt
* Either post a send, or buffer the packet in the tx queue
*/
result = _sdp_send_ctrl_buff_buffered(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting control message",
result);
(void)sdp_buff_pool_put(buff);
@@ -1782,7 +1778,7 @@ static int _sdp_write_src_lookup(struct
struct sdpc_iocb *iocb = (struct sdpc_iocb *) element;
struct kiocb *req = (struct kiocb *)arg;
- if (SDP_DESC_TYPE_IOCB == element->type && iocb->key == req->ki_key)
+ if (element->type == SDP_DESC_TYPE_IOCB && iocb->key == req->ki_key)
return 0;
else
return -ERANGE;
@@ -1801,7 +1797,7 @@ static int _sdp_inet_write_cancel(struct
sdp_dbg_ctrl(NULL, "Cancel Write IOCB user <%d> key <%d> flag <%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
- if (NULL == si || NULL == si->sock || NULL == si->sock->sk) {
+ if (!si || !si->sock || !si->sock->sk) {
sdp_warn("Cancel empty write IOCB users <%d> flags <%d:%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
result = -EFAULT;
@@ -1823,38 +1819,38 @@ static int _sdp_inet_write_cancel(struct
iocb = (struct sdpc_iocb *)sdp_desc_q_lookup(&conn->send_queue,
_sdp_write_src_lookup,
req);
- if (NULL != iocb) {
+ if (iocb) {
/*
* always remove the IOCB.
* If active, then place it into the correct active queue
*/
sdp_desc_q_remove((struct sdpc_desc *)iocb);
- if (0 < (SDP_IOCB_F_ACTIVE & iocb->flags)) {
- if (0 < (SDP_IOCB_F_RDMA_W & iocb->flags)) {
+ if (iocb->flags & SDP_IOCB_F_ACTIVE) {
+ if (iocb->flags & SDP_IOCB_F_RDMA_W) {
result = sdp_desc_q_put_tail(&conn->w_snk,
(struct sdpc_desc *)iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
} else {
- SDP_EXPECT((SDP_IOCB_F_RDMA_R & iocb->flags));
+ SDP_EXPECT((iocb->flags & SDP_IOCB_F_RDMA_R));
result = sdp_iocb_q_put_tail(&conn->w_src,
iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
} else {
/*
* empty IOCBs can be deleted, while partials
* needs to be compelted.
*/
- if (0 < iocb->post) {
+ if (iocb->post > 0) {
result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
result = -EAGAIN;
} else {
result = sdp_iocb_destroy(iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* completion reference
*/
@@ -1874,7 +1870,7 @@ static int _sdp_inet_write_cancel(struct
_sdp_write_src_lookup,
req);
- if (NULL != iocb) {
+ if (iocb) {
iocb->flags |= SDP_IOCB_F_CANCEL;
result = -EAGAIN;
@@ -1885,7 +1881,7 @@ static int _sdp_inet_write_cancel(struct
* needs to be issued.
*/
iocb = sdp_iocb_q_lookup(&conn->w_src, req->ki_key);
- if (NULL != iocb) {
+ if (iocb) {
/*
* Unfortunetly there is only a course grain cancel in SDP,
* so we have to cancel everything. This is OKish since it
@@ -1897,14 +1893,14 @@ static int _sdp_inet_write_cancel(struct
* connection is marked as being in cancel processing so no
* other writes get into the outbound pipe.
*/
- if (0 == (SDP_CONN_F_SRC_CANCEL_L & conn->flags) &&
- 0 == (SDP_IOCB_F_CANCEL & iocb->flags)) {
+ if (!(conn->flags & SDP_CONN_F_SRC_CANCEL_L) &&
+ !(iocb->flags & SDP_IOCB_F_CANCEL)) {
conn->src_cncl++;
iocb->flags |= SDP_IOCB_F_CANCEL;
if (conn->src_cncl == sdp_iocb_q_size(&conn->w_src)) {
result = sdp_send_ctrl_src_cancel(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
conn->flags |= SDP_CONN_F_SRC_CANCEL_L;
}
@@ -1944,21 +1940,21 @@ static int _sdp_send_flush_advt(struct s
* for transmission, and the remote host needs to be notified of
* present data. (rdma ping-pong letency test...)
*/
- if (0 == sdp_desc_q_size(&conn->send_queue)) {
+ if (!sdp_desc_q_size(&conn->send_queue)) {
/*
* might be more aggressive then we want it to be. maybe
* check if the active sink queue is empty as well?
*/
advt = sdp_advt_q_look(&conn->snk_pend);
- if (NULL != advt && 0 < advt->post) {
+ if (advt && advt->post > 0) {
advt = sdp_advt_q_get(&conn->snk_pend);
- SDP_EXPECT((NULL != advt));
+ SDP_EXPECT((advt));
result = sdp_send_ctrl_rdma_wr(conn, advt->post);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* update sink advertisments.
*/
@@ -1982,7 +1978,7 @@ int sdp_send_flush(struct sdp_opt *conn)
* send credit utilization rules.
*/
result = _sdp_send_ctrl_buff_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing control", result);
goto done;
}
@@ -1990,16 +1986,16 @@ int sdp_send_flush(struct sdp_opt *conn)
* data flush
*/
result = _sdp_send_data_queue_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing data queue", result);
goto done;
}
/*
* Sink advertisment flush.
*/
- if (0 < sdp_advt_q_size(&conn->snk_pend)) {
+ if (sdp_advt_q_size(&conn->snk_pend) > 0) {
result = _sdp_send_flush_advt(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> flushing sink advertisments",
result);
@@ -2009,9 +2005,9 @@ int sdp_send_flush(struct sdp_opt *conn)
/*
* disconnect flush
*/
- if (0 < (SDP_ST_MASK_DIS_PEND & conn->state)) {
+ if (SDP_ST_MASK_DIS_PEND & conn->state) {
result = sdp_send_ctrl_disconnect(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing disconnect",
result);
goto done;
@@ -2065,7 +2061,7 @@ int sdp_inet_send(struct kiocb *req,
* continue being processed, it'll wait below until the send window
* is opened on sucessful connect, or error on an unsucessful attempt.
*/
- if (0 < (SDP_ST_MASK_CLOSED & conn->istate)) {
+ if (SDP_ST_MASK_CLOSED & conn->istate) {
result = -EPIPE;
goto done;
}
@@ -2092,9 +2088,9 @@ int sdp_inet_send(struct kiocb *req,
* observed...) use a different threshold for urgent
* data to allow some space for sending.
*/
- while (0 < __sdp_inet_write_space(conn, oob)) {
+ while (__sdp_inet_write_space(conn, oob) > 0) {
buff = _sdp_send_data_buff_get(conn);
- if (NULL == buff) {
+ if (!buff) {
result = -ENOMEM;
goto done;
}
@@ -2107,7 +2103,7 @@ int sdp_inet_send(struct kiocb *req,
result = memcpy_fromiovec(buff->tail,
msg->msg_iov,
copy);
- if (0 > result) {
+ if (result < 0) {
(void)sdp_buff_pool_put(buff);
goto done;
}
@@ -2120,7 +2116,7 @@ int sdp_inet_send(struct kiocb *req,
result = _sdp_send_data_buff_put(conn, buff, copy,
((copied ==
size) ? oob : 0));
- if (0 > result)
+ if (result < 0)
goto done;
if (copied == size)
@@ -2141,34 +2137,34 @@ skip: /* entry point for IOCB based tran
/*
* onetime setup of timeout, but only if it's needed.
*/
- if (0 > timeout)
+ if (timeout < 0)
timeout = sock_sndtimeo(sk, (MSG_DONTWAIT &
msg->msg_flags));
- if (0 != SDP_CONN_GET_ERR(conn)) {
- result = (0 < copied) ? 0 : sdp_conn_error(conn);
+ if (SDP_CONN_GET_ERR(conn)) {
+ result = (copied > 0) ? 0 : sdp_conn_error(conn);
break;
}
- if (0 < (SEND_SHUTDOWN & conn->shutdown)) {
+ if (SEND_SHUTDOWN & conn->shutdown) {
result = -EPIPE;
break;
}
- if (SDP_SOCK_ST_ERROR == conn->istate) {
+ if (conn->istate == SDP_SOCK_ST_ERROR) {
result = -EPROTO; /* error should always be set, but
just in case */
break;
}
- if (0 == timeout) {
+ if (!timeout) {
result = -EAGAIN;
break;
}
if (signal_pending(current)) {
result =
- (0 < timeout) ? sock_intr_errno(timeout) : -EAGAIN;
+ (timeout > 0) ? sock_intr_errno(timeout) : -EAGAIN;
break;
}
/*
@@ -2190,7 +2186,7 @@ skip: /* entry point for IOCB based tran
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
sdp_conn_unlock(conn);
- if (!(0 < __sdp_inet_write_space(conn, oob)))
+ if (__sdp_inet_write_space(conn, oob) <= 0)
timeout = schedule_timeout(timeout);
sdp_conn_lock(conn);
@@ -2203,7 +2199,7 @@ skip: /* entry point for IOCB based tran
* create IOCB with remaining space
*/
iocb = sdp_iocb_create();
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn, "Failed to allocate IOCB <%Zu:%d>",
size, copied);
result = -ENOMEM;
@@ -2220,7 +2216,7 @@ skip: /* entry point for IOCB based tran
req->ki_cancel = _sdp_inet_write_cancel;
result = sdp_iocb_lock(iocb);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> locking IOCB <%Zu:%d>",
result, size, copied);
@@ -2233,7 +2229,7 @@ skip: /* entry point for IOCB based tran
conn->send_pipe += iocb->len;
result = _sdp_send_data_queue(conn, (struct sdpc_desc *)iocb);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queueing write IOCB",
result);
@@ -2247,9 +2243,9 @@ skip: /* entry point for IOCB based tran
done:
sdp_conn_unlock(conn);
- result = ((0 < copied) ? copied : result);
+ result = ((copied > 0) ? copied : result);
- if (-EPIPE == result && 0 == (MSG_NOSIGNAL & msg->msg_flags))
+ if (result == -EPIPE && !(MSG_NOSIGNAL & msg->msg_flags))
send_sig(SIGPIPE, current, 0);
return result;
Index: sdp_actv.c
===================================================================
--- sdp_actv.c (revision 1922)
+++ sdp_actv.c (working copy)
@@ -70,7 +70,7 @@ void sdp_cm_actv_error(struct sdp_opt *c
IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> sending CM REJ.",
result);
@@ -82,7 +82,7 @@ void sdp_cm_actv_error(struct sdp_opt *c
* full disconnect.
*/
result = ib_send_cm_dreq(conn->cm_id, NULL, 0);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(NULL, "Error <%d> sending CM DREQ",
result);
@@ -151,7 +151,7 @@ static int _sdp_actv_conn_establish(stru
* post receive buffers.
*/
result = sdp_recv_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting recv buffers.", result);
goto done;
}
@@ -175,7 +175,7 @@ static int _sdp_actv_conn_establish(stru
* respond to the remote connection manager with a RTU
*/
result = ib_send_cm_rtu(conn->cm_id, NULL, 0);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> sending CM RTU.", result);
goto done;
}
@@ -196,7 +196,7 @@ static int _sdp_actv_conn_establish(stru
inet_sk(sk)->rcv_saddr = htonl(conn->src_addr);
result = sdp_send_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing receives.", result);
goto done;
}
@@ -238,7 +238,7 @@ static int _sdp_cm_hello_ack_check(struc
return -EINVAL;
}
- if (!(0 < hello_ack->hah.max_adv)) {
+ if (hello_ack->hah.max_adv <= 0) {
sdp_dbg_warn(NULL, "hello ack, bad zcopy advertisment. <%d>",
hello_ack->hah.max_adv);
return -EINVAL;
@@ -276,7 +276,7 @@ int sdp_cm_rep_handler(struct ib_cm_id *
struct sdp_msg_hello_ack *hello_ack;
int result = -EPROTO;
- if (NULL == conn)
+ if (!conn)
return -EINVAL;
if (cm_id != conn->cm_id) {
@@ -297,7 +297,7 @@ int sdp_cm_rep_handler(struct ib_cm_id *
* the connection.
*/
result = _sdp_cm_hello_ack_check(hello_ack);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> hello ack check.", result);
goto error;
}
@@ -326,7 +326,7 @@ int sdp_cm_rep_handler(struct ib_cm_id *
(void)sdp_buff_pool_put(sdp_buff_q_get_head(&conn->send_post));
result = _sdp_actv_conn_establish(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> accept receive failed", result);
goto error;
}
@@ -392,7 +392,7 @@ static void _sdp_cm_path_complete(u64 id
/*
* create address handle
*/
- if (0 != status) {
+ if (status) {
sdp_dbg_warn(conn, "Path record completion error <%d>",
status);
goto failed;
@@ -410,7 +410,7 @@ static void _sdp_cm_path_complete(u64 id
* allocate IB resources.
*/
result = sdp_conn_alloc_ib(conn, ca, hw_port, path->pkey);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> allocating IB connection",
result);
goto failed;
@@ -420,7 +420,7 @@ static void _sdp_cm_path_complete(u64 id
* space reservation)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buff for Hello Msg.");
goto failed;
}
@@ -462,11 +462,11 @@ static void _sdp_cm_path_complete(u64 id
* save message
*/
result = sdp_buff_q_put(&conn->send_post, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> buffering hello msg.", result);
expect = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
status = -EPROTO;
goto failed;
@@ -475,7 +475,7 @@ static void _sdp_cm_path_complete(u64 id
/*
* Mellanox performance bug workaround.
*/
- if (IB_MTU_1024 < path->mtu)
+ if (path->mtu > IB_MTU_1024)
path->mtu = IB_MTU_1024;
#endif
conn->path_mtu = path->mtu;
@@ -509,7 +509,7 @@ static void _sdp_cm_path_complete(u64 id
#endif
conn->cm_id = ib_create_cm_id(sdp_cm_event_handler,
__hashent_arg(conn->hashent));
- if (NULL == conn->cm_id) {
+ if (!conn->cm_id) {
sdp_dbg_warn(conn, "Failed to create CM handle, %d",
(u8)(buff->tail - buff->data));
goto failed;
@@ -519,7 +519,7 @@ static void _sdp_cm_path_complete(u64 id
* initiate connection
*/
result = ib_send_cm_req(conn->cm_id, ¶m);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn, "Error <%d> CM connect request", result);
status = result;
goto failed;
@@ -566,7 +566,7 @@ int sdp_cm_connect(struct sdp_opt *conn)
&conn->plid);
sdp_conn_lock(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> getting link <%08x:%08x> addr",
result,
htonl(conn->dst_addr),
Index: sdp_conn.c
===================================================================
--- sdp_conn.c (revision 1922)
+++ sdp_conn.c (working copy)
@@ -81,10 +81,10 @@ int sdp_inet_accept_q_put(struct sdp_opt
{
struct sdp_opt *next_conn;
- if (NULL != listen_conn->parent ||
- NULL != accept_conn->parent ||
- NULL == listen_conn->accept_next ||
- NULL == listen_conn->accept_prev)
+ if (listen_conn->parent ||
+ accept_conn->parent ||
+ !listen_conn->accept_next ||
+ !listen_conn->accept_prev)
return -EFAULT;
next_conn = listen_conn->accept_next;
@@ -112,9 +112,9 @@ struct sdp_opt *sdp_inet_accept_q_get(st
struct sdp_opt *prev_conn;
struct sdp_opt *accept_conn;
- if (NULL != listen_conn->parent ||
- NULL == listen_conn->accept_next ||
- NULL == listen_conn->accept_prev ||
+ if (listen_conn->parent ||
+ !listen_conn->accept_next ||
+ !listen_conn->accept_prev ||
listen_conn == listen_conn->accept_next ||
listen_conn == listen_conn->accept_prev)
return NULL;
@@ -151,7 +151,7 @@ int sdp_inet_accept_q_remove(struct sdp_
struct sdp_opt *next_conn;
struct sdp_opt *prev_conn;
- if (NULL == accept_conn->parent)
+ if (!accept_conn->parent)
return -EFAULT;
/*
* Removes the connection from the listening sockets accept queue.
@@ -185,7 +185,7 @@ int sdp_inet_listen_start(struct sdp_opt
{
unsigned long flags;
- if (SDP_SOCK_ST_CLOSED != conn->istate) {
+ if (conn->istate != SDP_SOCK_ST_CLOSED) {
sdp_dbg_warn(conn, "Incorrect connection state to listen.");
return -EBADFD;
}
@@ -204,7 +204,7 @@ int sdp_inet_listen_start(struct sdp_opt
_dev_root_s.listen_list = conn;
conn->lstn_p_next = &_dev_root_s.listen_list;
- if (NULL != conn->lstn_next)
+ if (conn->lstn_next)
conn->lstn_next->lstn_p_next = &conn->lstn_next;
spin_unlock_irqrestore(&_dev_root_s.listen_lock, flags);
@@ -220,7 +220,7 @@ int sdp_inet_listen_stop(struct sdp_opt
int result;
unsigned long flags;
- if (SDP_SOCK_ST_LISTEN != listen_conn->istate) {
+ if (listen_conn->istate != SDP_SOCK_ST_LISTEN) {
sdp_dbg_warn(listen_conn, "Incorrect state to stop listen.");
return -EBADFD;
}
@@ -233,7 +233,7 @@ int sdp_inet_listen_stop(struct sdp_opt
/*
* remove from listening list.
*/
- if (NULL != listen_conn->lstn_next)
+ if (listen_conn->lstn_next)
listen_conn->lstn_next->lstn_p_next = listen_conn->lstn_p_next;
*(listen_conn->lstn_p_next) = listen_conn->lstn_next;
@@ -245,7 +245,7 @@ int sdp_inet_listen_stop(struct sdp_opt
/*
* reject and delete all pending connections
*/
- while (NULL != (accept_conn = sdp_inet_accept_q_get(listen_conn))) {
+ while ((accept_conn = sdp_inet_accept_q_get(listen_conn))) {
/*
* The connection is going to be dropped now, mark the
* state as such in case of conntension for this conn.
@@ -255,7 +255,7 @@ int sdp_inet_listen_stop(struct sdp_opt
accept_conn->istate = SDP_SOCK_ST_CLOSED;
result = sdp_wall_send_abort(accept_conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(accept_conn, "Error <%d> during abort",
result);
@@ -285,7 +285,7 @@ struct sdp_opt *sdp_inet_listen_lookup(u
/*
* first find a listening connection
*/
- for (conn = _dev_root_s.listen_list; NULL != conn;
+ for (conn = _dev_root_s.listen_list; conn;
conn = conn->lstn_next)
if (port == conn->src_port &&
(INADDR_ANY == conn->src_addr || addr == conn->src_addr)) {
@@ -321,9 +321,9 @@ int sdp_inet_port_get(struct sdp_opt *co
/*
* simple linked list of sockets ordered on local port number.
*/
- if (0 < port) {
+ if (port > 0) {
for (look = _dev_root_s.bind_list, port_ok = 1;
- NULL != look; look = look->bind_next) {
+ look; look = look->bind_next) {
srch = look->sk;
/*
* 1) same port
@@ -340,9 +340,9 @@ int sdp_inet_port_get(struct sdp_opt *co
* 3) either socket has reuse turned off
* 4) socket already listening on this port
*/
- if (0 == sk->sk_reuse ||
- 0 == srch->sk_reuse ||
- SDP_SOCK_ST_LISTEN == look->istate) {
+ if (!sk->sk_reuse ||
+ !srch->sk_reuse ||
+ look->istate == SDP_SOCK_ST_LISTEN) {
/*
* 5) neither socket is using a
* specific address
@@ -370,14 +370,14 @@ int sdp_inet_port_get(struct sdp_opt *co
}
}
- if (0 == port_ok) {
+ if (!port_ok) {
result = -EADDRINUSE;
goto done;
}
} else {
low_port = SDP_INET_PORT_LOW;
top_port = SDP_INET_PORT_HIGH;
- rover = (0 > rover) ? low_port : rover;
+ rover = (rover < 0) ? low_port : rover;
for (counter = (top_port - low_port) + 1; counter > 0;
counter--) {
@@ -386,17 +386,17 @@ int sdp_inet_port_get(struct sdp_opt *co
rover = low_port;
for (look = _dev_root_s.bind_list;
- NULL != look && look->src_port != port;
+ look && look->src_port != port;
look = look->bind_next)
do {} while(0); /* pass */
- if (NULL == look) {
+ if (!look) {
port = rover;
break;
}
}
- if (0 == port) {
+ if (!port) {
result = -EADDRINUSE;
goto done;
}
@@ -410,7 +410,7 @@ int sdp_inet_port_get(struct sdp_opt *co
_dev_root_s.bind_list = conn;
conn->bind_p_next = &_dev_root_s.bind_list;
- if (NULL != conn->bind_next)
+ if (conn->bind_next)
conn->bind_next->bind_p_next = &conn->bind_next;
result = 0;
@@ -426,7 +426,7 @@ int sdp_inet_port_put(struct sdp_opt *co
{
unsigned long flags;
- if (NULL == conn->bind_p_next)
+ if (!conn->bind_p_next)
return -EADDRNOTAVAIL;
/*
* lock table
@@ -435,7 +435,7 @@ int sdp_inet_port_put(struct sdp_opt *co
/*
* remove from bind list.
*/
- if (NULL != conn->bind_next)
+ if (conn->bind_next)
conn->bind_next->bind_p_next = conn->bind_p_next;
*(conn->bind_p_next) = conn->bind_next;
@@ -461,7 +461,7 @@ int sdp_inet_port_inherit(struct sdp_opt
*/
spin_lock_irqsave(&_dev_root_s.bind_lock, flags);
- if (NULL != child->bind_p_next ||
+ if (child->bind_p_next ||
child->src_port != parent->src_port) {
sdp_dbg_warn(child, "child already bound. <%d:%d>",
parent->src_port, child->src_port);
@@ -475,7 +475,7 @@ int sdp_inet_port_inherit(struct sdp_opt
parent->bind_next = child;
child->bind_p_next = &parent->bind_next;
- if (NULL != child->bind_next)
+ if (child->bind_next)
child->bind_next->bind_p_next = &child->bind_next;
result = 0;
@@ -508,7 +508,7 @@ static int _sdp_conn_table_insert(struct
if (!(_dev_root_s.sk_rover < _dev_root_s.sk_size))
_dev_root_s.sk_rover = 0;
- if (NULL == _dev_root_s.sk_array[_dev_root_s.sk_rover]) {
+ if (!_dev_root_s.sk_array[_dev_root_s.sk_rover]) {
_dev_root_s.sk_array[_dev_root_s.sk_rover] = conn;
_dev_root_s.sk_entry++;
conn->hashent = _dev_root_s.sk_rover;
@@ -546,7 +546,7 @@ int sdp_conn_table_remove(struct sdp_opt
if (SDP_DEV_SK_INVALID == conn->hashent)
goto done;
- if (0 > conn->hashent || conn != _dev_root_s.sk_array[conn->hashent]) {
+ if (conn->hashent < 0 || conn != _dev_root_s.sk_array[conn->hashent]) {
result = -ERANGE;
goto done;
}
@@ -580,7 +580,7 @@ struct sdp_opt *sdp_conn_table_lookup(s3
/*
* validate range
*/
- if (0 > entry || !(_dev_root_s.sk_size > entry)) {
+ if (entry < 0 || entry >= _dev_root_s.sk_size) {
conn = NULL;
goto done;
@@ -588,7 +588,7 @@ struct sdp_opt *sdp_conn_table_lookup(s3
#endif
conn = _dev_root_s.sk_array[entry];
- if (NULL == conn)
+ if (!conn)
goto done;
sdp_conn_hold(conn);
@@ -602,14 +602,14 @@ done:
*/
static int _sdp_desc_q_cancel_lookup_func(struct sdpc_desc *element, void *arg)
{
- return ((SDP_DESC_TYPE_IOCB == element->type) ? 0 : -ERANGE);
+ return ((element->type == SDP_DESC_TYPE_IOCB) ? 0 : -ERANGE);
}
static void _sdp_desc_q_cancel_iocb(struct sdpc_desc_q *table, ssize_t error)
{
struct sdpc_iocb *iocb;
- while (NULL != (iocb = (struct sdpc_iocb *)sdp_desc_q_lookup
+ while ((iocb = (struct sdpc_iocb *)sdp_desc_q_lookup
(table,
_sdp_desc_q_cancel_lookup_func,
NULL))) {
@@ -653,7 +653,7 @@ int sdp_conn_destruct(struct sdp_opt *co
int result = 0;
int dump = 0;
- if (NULL == conn) {
+ if (!conn) {
sdp_dbg_warn(NULL, "sk destruct, no connection!");
result = -EINVAL;
goto done;
@@ -671,13 +671,13 @@ int sdp_conn_destruct(struct sdp_opt *co
* remove connection from table
*/
result = sdp_conn_table_remove(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> removing connection <%u:%u>",
result, _dev_root_s.sk_entry,
_dev_root_s.sk_size);
result = __sdp_conn_stat_dump(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* really there shouldn't be anything in these tables, but it's
* really bad if we leave a dangling reference here.
@@ -719,7 +719,7 @@ int sdp_conn_destruct(struct sdp_opt *co
*/
if (conn->qp) {
result = ib_destroy_qp(conn->qp);
- if (0 > result && -EINVAL != result) {
+ if (result < 0 && result != -EINVAL) {
sdp_dbg_warn(conn, "Error <%d> detroying QP", result);
dump++;
}
@@ -729,7 +729,7 @@ int sdp_conn_destruct(struct sdp_opt *co
*/
if (conn->recv_cq) {
result = ib_destroy_cq(conn->recv_cq);
- if (0 > result && -EINVAL != result) {
+ if (result < 0 && result != -EINVAL) {
sdp_dbg_warn(conn, "Error <%d> detroying recv CQ",
result);
dump++;
@@ -738,7 +738,7 @@ int sdp_conn_destruct(struct sdp_opt *co
if (conn->send_cq) {
result = ib_destroy_cq(conn->send_cq);
- if (0 > result && -EINVAL != result) {
+ if (result < 0 && result != -EINVAL) {
sdp_dbg_warn(conn, "Error <%d> detroying send CQ",
result);
dump++;
@@ -758,13 +758,13 @@ int sdp_conn_destruct(struct sdp_opt *co
/*
* check consistancy
*/
- if (0 > atomic_read(&conn->refcnt))
+ if (atomic_read(&conn->refcnt) < 0)
sdp_dbg_warn(conn, "destruct low ref count <%04x>",
atomic_read(&conn->refcnt));
/*
* free the OS socket structure
*/
- if (NULL == conn->sk)
+ if (!conn->sk)
sdp_dbg_warn(conn, "destruct, no socket! continuing.");
else {
sk_free(conn->sk);
@@ -776,9 +776,9 @@ int sdp_conn_destruct(struct sdp_opt *co
result = 0;
done:
- if (0 != dump) {
+ if (dump) {
result = __sdp_conn_state_dump(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
return result;
@@ -800,7 +800,7 @@ void sdp_conn_internal_lock(struct sdp_o
spin_lock_irqsave(&(conn->lock.slock), f);
*flags = f;
- if (0 == conn->lock.users)
+ if (!conn->lock.users)
break;
}
@@ -826,7 +826,7 @@ void sdp_conn_relock(struct sdp_opt *con
result_r = ib_poll_cq(conn->recv_cq, 1, &entry);
if (1 == result_r) {
result = sdp_cq_event_locked(&entry, conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn,
"Error <%d> from event handler.",
result);
@@ -837,17 +837,17 @@ void sdp_conn_relock(struct sdp_opt *con
result_s = ib_poll_cq(conn->send_cq, 1, &entry);
if (1 == result_s) {
result = sdp_cq_event_locked(&entry, conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn,
"Error <%d> from event handler.",
result);
rearm = 1;
}
- if (0 != result_r || 0 != result_s)
+ if (result_r || result_s)
continue;
- if (0 < rearm) {
+ if (rearm > 0) {
result = ib_req_notify_cq(conn->recv_cq,
IB_CQ_NEXT_COMP);
if (result)
@@ -896,7 +896,7 @@ int sdp_conn_cq_drain(struct ib_cq *cq,
* to be armed.
*/
result = sdp_cq_event_locked(&entry, conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> event handler.",
result);
@@ -907,7 +907,7 @@ int sdp_conn_cq_drain(struct ib_cq *cq,
}
if (!result) {
- if (0 < rearm) {
+ if (rearm > 0) {
result = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
if (result)
sdp_dbg_warn(conn,
@@ -933,11 +933,11 @@ void sdp_conn_internal_unlock(struct sdp
/*
* poll CQs for events.
*/
- if (NULL != conn) {
- if (0 < (SDP_CONN_F_RECV_CQ_PEND & conn->flags))
+ if (conn) {
+ if (conn->flags & SDP_CONN_F_RECV_CQ_PEND)
calls += sdp_conn_cq_drain(conn->recv_cq, conn);
- if (0 < (SDP_CONN_F_SEND_CQ_PEND & conn->flags))
+ if (conn->flags & SDP_CONN_F_SEND_CQ_PEND)
calls += sdp_conn_cq_drain(conn->send_cq, conn);
conn->flags &= ~SDP_CONN_F_MASK_EVENT;
@@ -976,7 +976,7 @@ int sdp_conn_alloc_ib(struct sdp_opt *co
if (!hca)
return -ERANGE;
- for (port = hca->port_list; NULL != port; port = port->next)
+ for (port = hca->port_list; port; port = port->next)
if (hw_port == port->index)
break;
@@ -986,13 +986,13 @@ int sdp_conn_alloc_ib(struct sdp_opt *co
* allocate creation parameters
*/
qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
- if (NULL == qp_attr) {
+ if (!qp_attr) {
result = -ENOMEM;
goto error_attr;
}
init_attr = kmalloc(sizeof(*init_attr), GFP_KERNEL);
- if (NULL == init_attr) {
+ if (!init_attr) {
result = -ENOMEM;
goto error_param;
}
@@ -1028,7 +1028,7 @@ int sdp_conn_alloc_ib(struct sdp_opt *co
conn->send_cq_size = conn->send_cq->cqe;
result = ib_req_notify_cq(conn->send_cq, IB_CQ_NEXT_COMP);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> arming send CQ.",
result);
goto error_rcq;
@@ -1052,7 +1052,7 @@ int sdp_conn_alloc_ib(struct sdp_opt *co
conn->recv_cq_size = conn->recv_cq->cqe;
result = ib_req_notify_cq(conn->recv_cq, IB_CQ_NEXT_COMP);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> arming recv CQ.",
result);
goto error_qp;
@@ -1106,7 +1106,7 @@ int sdp_conn_alloc_ib(struct sdp_opt *co
result = ib_modify_qp(conn->qp, qp_attr, attr_mask);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn, "Error <%d> modifying QP", result);
goto error_mod;
}
@@ -1143,7 +1143,7 @@ struct sdp_opt *sdp_conn_alloc(int prior
int result;
sk = sk_alloc(_dev_root_s.proto, priority, 1, _dev_root_s.sock_cache);
- if (NULL == sk) {
+ if (!sk) {
sdp_dbg_warn(NULL, "socket alloc error for protocol. <%d:%d>",
_dev_root_s.proto, priority);
return NULL;
@@ -1171,7 +1171,7 @@ struct sdp_opt *sdp_conn_alloc(int prior
* create/modifies must be in that context.
*/
conn = kmem_cache_alloc(_dev_root_s.conn_cache, priority);
- if (NULL == conn) {
+ if (!conn) {
sdp_dbg_warn(conn, "connection alloc error. <%d>", priority);
result = -ENOMEM;
goto error;
@@ -1301,7 +1301,7 @@ struct sdp_opt *sdp_conn_alloc(int prior
* insert connection into lookup table
*/
result = _sdp_conn_table_insert(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> conn table insert <%d:%d>",
result, _dev_root_s.sk_entry,
@@ -1366,7 +1366,7 @@ int sdp_proc_dump_conn_main(char *buffer
/*
* header should only be printed once
*/
- if (0 == start_index) {
+ if (!start_index) {
offset += sprintf((buffer + offset), SDP_PROC_CONN_MAIN_HEAD);
offset += sprintf((buffer + offset), SDP_PROC_CONN_MAIN_SEP);
}
@@ -1386,7 +1386,7 @@ int sdp_proc_dump_conn_main(char *buffer
counter < _dev_root_s.sk_size &&
!(SDP_CONN_PROC_MAIN_SIZE > (max_size - offset));
counter++) {
- if (NULL == _dev_root_s.sk_array[counter])
+ if (!_dev_root_s.sk_array[counter])
continue;
conn = _dev_root_s.sk_array[counter];
@@ -1471,7 +1471,7 @@ int sdp_proc_dump_conn_data(char *buffer
/*
* header should only be printed once
*/
- if (0 == start_index) {
+ if (!start_index) {
offset += sprintf((buffer + offset), SDP_PROC_CONN_DATA_HEAD);
offset += sprintf((buffer + offset), SDP_PROC_CONN_DATA_SEP);
}
@@ -1490,7 +1490,7 @@ int sdp_proc_dump_conn_data(char *buffer
for (counter = start_index; counter < _dev_root_s.sk_size &&
!(SDP_CONN_PROC_DATA_SIZE > (max_size - offset));
counter++) {
- if (NULL == _dev_root_s.sk_array[counter])
+ if (!_dev_root_s.sk_array[counter])
continue;
conn = _dev_root_s.sk_array[counter];
@@ -1567,7 +1567,7 @@ int sdp_proc_dump_conn_rdma(char *buffer
/*
* header should only be printed once
*/
- if (0 == start_index) {
+ if (!start_index) {
offset += sprintf((buffer + offset), SDP_PROC_CONN_RDMA_HEAD);
offset += sprintf((buffer + offset), SDP_PROC_CONN_RDMA_SEP);
}
@@ -1586,7 +1586,7 @@ int sdp_proc_dump_conn_rdma(char *buffer
for (counter = start_index; counter < _dev_root_s.sk_size &&
!(SDP_CONN_PROC_RDMA_SIZE > (max_size - offset));
counter++) {
- if (NULL == _dev_root_s.sk_array[counter])
+ if (!_dev_root_s.sk_array[counter])
continue;
conn = _dev_root_s.sk_array[counter];
@@ -1646,7 +1646,7 @@ int sdp_proc_dump_conn_sopt(char *buffer
/*
* header should only be printed once
*/
- if (0 == start_index) {
+ if (!start_index) {
offset += sprintf((buffer + offset), SDP_PROC_CONN_SOPT_HEAD);
offset += sprintf((buffer + offset), SDP_PROC_CONN_SOPT_SEP);
}
@@ -1665,7 +1665,7 @@ int sdp_proc_dump_conn_sopt(char *buffer
for (counter = start_index; counter < _dev_root_s.sk_size &&
!(SDP_SOPT_PROC_DUMP_SIZE > (max_size - offset));
counter++) {
- if (NULL == _dev_root_s.sk_array[counter])
+ if (!_dev_root_s.sk_array[counter])
continue;
conn = _dev_root_s.sk_array[counter];
@@ -1706,7 +1706,7 @@ int sdp_proc_dump_device(char *buffer,
/*
* header should only be printed once
*/
- if (0 == start_index) {
+ if (!start_index) {
offset += sprintf((buffer + offset),
"connection table maximum: <%d>\n",
_dev_root_s.sk_size);
@@ -1755,7 +1755,7 @@ static void sdp_device_init_one(struct i
* allocate per-HCA structure
*/
hca = kmalloc(sizeof(struct sdev_hca), GFP_KERNEL);
- if (NULL == hca) {
+ if (!hca) {
sdp_warn("Error allocating HCA <%s> memory.", device->name);
return;
}
@@ -1819,7 +1819,7 @@ static void sdp_device_init_one(struct i
port_count < device->phys_port_cnt;
port_count++) {
port = kmalloc(sizeof(struct sdev_hca_port), GFP_KERNEL);
- if (NULL == port) {
+ if (!port) {
sdp_warn("Error allocating HCA <%s> port <%d:%d>",
device->name, port_count,
device->phys_port_cnt);
@@ -1837,7 +1837,7 @@ static void sdp_device_init_one(struct i
port->index,
0, /* index */
&port->gid);
- if (0 != result) {
+ if (result) {
sdp_warn("Error <%d> getting GID for HCA <%s:%d:%d>",
result, device->name,
port->index, device->phys_port_cnt);
@@ -1850,7 +1850,7 @@ static void sdp_device_init_one(struct i
return;
error:
- while (NULL != hca->port_list) {
+ while (hca->port_list) {
port = hca->port_list;
hca->port_list = port->next;
port->next = NULL;
@@ -1881,12 +1881,12 @@ static void sdp_device_remove_one(struct
hca = ib_get_client_data(device, &sdp_client);
- if (NULL == hca) {
+ if (!hca) {
sdp_warn("Device <%s> has no HCA info.", device->name);
return;
}
- while (NULL != hca->port_list) {
+ while (hca->port_list) {
port = hca->port_list;
hca->port_list = port->next;
port->next = NULL;
@@ -1953,14 +1953,14 @@ int sdp_conn_table_init(int proto_family
* Get HCA/port list
*/
result = ib_register_client(&sdp_client);
- if (0 > result) {
+ if (result < 0) {
sdp_warn("Error <%d> registering SDP client.", result);
goto error_hca;
}
/*
* create socket table
*/
- if (!(0 < conn_size)) {
+ if (conn_size <= 0) {
sdp_warn("Invalid connection table size. <%d>", conn_size);
result = -EINVAL;
goto error_size;
@@ -1973,7 +1973,7 @@ int sdp_conn_table_init(int proto_family
_dev_root_s.sk_array = (void *) __get_free_pages(GFP_KERNEL,
_dev_root_s.sk_ordr);
- if (NULL == _dev_root_s.sk_array) {
+ if (!_dev_root_s.sk_array) {
sdp_warn("Failed to create connection table. <%d:%d:%d>",
byte_size, page_size, _dev_root_s.sk_ordr);
result = -ENOMEM;
@@ -1989,7 +1989,7 @@ int sdp_conn_table_init(int proto_family
* IOCB table
*/
result = sdp_main_iocb_init();
- if (0 > result) {
+ if (result < 0) {
sdp_warn("Error <%d> initializing SDP IOCB table.", result);
goto error_iocb;
}
@@ -1998,7 +1998,7 @@ int sdp_conn_table_init(int proto_family
sizeof(struct sdp_opt),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (NULL == _dev_root_s.conn_cache) {
+ if (!_dev_root_s.conn_cache) {
sdp_warn("Failed to initialize connection cache.");
result = -ENOMEM;
goto error_conn;
@@ -2008,7 +2008,7 @@ int sdp_conn_table_init(int proto_family
sizeof(struct inet_sock),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (NULL == _dev_root_s.sock_cache) {
+ if (!_dev_root_s.sock_cache) {
sdp_warn("Failed to initialize sock cache.");
result = -ENOMEM;
goto error_sock;
@@ -2018,7 +2018,7 @@ int sdp_conn_table_init(int proto_family
* start listening
*/
result = sdp_cm_listen_start(&_dev_root_s);
- if (0 > result) {
+ if (result < 0) {
sdp_warn("Error <%d> listening for connections on HCA.",
result);
goto error_listen;
@@ -2052,7 +2052,7 @@ int sdp_conn_table_clear(void)
/*
* drain all the connections
*/
- while (NULL != (conn = _dev_root_s.conn_list)) {
+ while ((conn = _dev_root_s.conn_list)) {
}
#endif
Index: sdp_advt.c
===================================================================
--- sdp_advt.c (revision 1922)
+++ sdp_advt.c (working copy)
@@ -52,7 +52,7 @@ struct sdpc_advt *sdp_advt_create(void)
struct sdpc_advt *advt;
advt = kmem_cache_alloc(__sdp_advt_cache, SLAB_KERNEL);
- if (NULL != advt) {
+ if (advt) {
advt->next = NULL;
advt->prev = NULL;
advt->size = 0;
@@ -72,7 +72,7 @@ struct sdpc_advt *sdp_advt_create(void)
*/
int sdp_advt_destroy(struct sdpc_advt *advt)
{
- if (NULL != advt->next || NULL != advt->prev)
+ if (advt->next || advt->prev)
return -EACCES;
/*
* return the object to its cache
@@ -92,7 +92,7 @@ struct sdpc_advt *sdp_advt_q_get(struct
struct sdpc_advt *prev;
advt = table->head;
- if (NULL == advt)
+ if (!advt)
return NULL;
if (advt->next == advt && advt->prev == advt)
@@ -130,7 +130,7 @@ void sdp_advt_q_put(struct sdpc_advt_q *
struct sdpc_advt *next;
struct sdpc_advt *prev;
- if (NULL == table->head) {
+ if (!table->head) {
advt->next = advt;
advt->prev = advt;
table->head = advt;
@@ -168,9 +168,9 @@ void sdp_advt_q_clear(struct sdpc_advt_q
/*
* drain the table of any objects
*/
- while (NULL != (advt = sdp_advt_q_get(table))) {
+ while ((advt = sdp_advt_q_get(table))) {
result = sdp_advt_destroy(advt);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
}
@@ -187,7 +187,7 @@ int sdp_main_advt_init(void)
/*
* initialize the caches only once.
*/
- if (NULL != __sdp_advt_cache) {
+ if (__sdp_advt_cache) {
sdp_warn("Advertisment caches already initialized.");
return -EINVAL;
}
@@ -196,7 +196,7 @@ int sdp_main_advt_init(void)
sizeof(struct sdpc_advt),
0, SLAB_HWCACHE_ALIGN, NULL,
NULL);
- if (NULL == __sdp_advt_cache)
+ if (!__sdp_advt_cache)
return -ENOMEM;
return 0;
Index: sdp_recv.c
===================================================================
--- sdp_recv.c (revision 1922)
+++ sdp_recv.c (working copy)
@@ -52,7 +52,7 @@ static int _sdp_post_recv_buff(struct sd
* get a buffer
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "failed to allocate buff for recv queue.");
result = -ENOMEM;
goto error;
@@ -75,7 +75,7 @@ static int _sdp_post_recv_buff(struct sd
* post function returns.
*/
result = sdp_buff_q_put_tail(&conn->recv_post, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queuing recv buffer.", result);
goto drop;
}
@@ -97,7 +97,7 @@ static int _sdp_post_recv_buff(struct sd
receive_param.num_sge = 1;
result = ib_post_recv(conn->qp, &receive_param, &bad_wr);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn, "Error <%d> posting receive buffer",
result);
(void)sdp_buff_q_get_tail(&conn->recv_post);
@@ -134,7 +134,7 @@ static int _sdp_post_rdma_buff(struct sd
* get a reference to the first SrcAvail advertisment.
*/
advt = sdp_advt_q_look(&conn->src_pend);
- if (NULL == advt) {
+ if (!advt) {
result = ENODEV;
goto done;
}
@@ -142,7 +142,7 @@ static int _sdp_post_rdma_buff(struct sd
* get a buffer
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "failed to allocate buff for rdma read.");
result = -ENOMEM;
goto error;
@@ -172,7 +172,7 @@ static int _sdp_post_rdma_buff(struct sd
* If there is no more advertised space move the advertisment to the
* active list, and match the WRID.
*/
- if (!(0 < advt->size))
+ if (advt->size <= 0)
sdp_advt_q_put(&conn->src_actv,
sdp_advt_q_get(&conn->src_pend));
/*
@@ -181,7 +181,7 @@ static int _sdp_post_rdma_buff(struct sd
* post function returns.
*/
result = sdp_desc_q_put_tail(&conn->r_src, (struct sdpc_desc *) buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queuing rdma read.", result);
goto drop;
}
@@ -202,7 +202,7 @@ static int _sdp_post_rdma_buff(struct sd
send_param.num_sge = 1;
result = ib_post_send(conn->qp, &send_param, &bad_wr);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn, "Error <%d> posting rdma read", result);
(void)sdp_desc_q_get_tail(&conn->r_src);
@@ -243,7 +243,7 @@ static int _sdp_post_rdma_iocb_src(struc
* get a reference to the first SrcAvail advertisment.
*/
advt = sdp_advt_q_look(&conn->src_pend);
- if (NULL == advt) {
+ if (!advt) {
result = ENODEV;
goto done;
}
@@ -254,7 +254,7 @@ static int _sdp_post_rdma_iocb_src(struc
* (final complete RDMA will clear it out.)
*/
iocb = sdp_iocb_q_look(&conn->r_pend);
- if (NULL == iocb) {
+ if (!iocb) {
result = ENODEV;
goto done;
}
@@ -262,7 +262,7 @@ static int _sdp_post_rdma_iocb_src(struc
* register IOCBs physical memory.
*/
result = sdp_iocb_register(iocb, conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> registering IOCB. <%d:%d>",
result, iocb->key, iocb->len);
goto error;
@@ -297,15 +297,15 @@ static int _sdp_post_rdma_iocb_src(struc
* if there is no more advertised space, queue the
* advertisment for completion
*/
- if (!(0 < advt->size))
+ if (advt->size <= 0)
sdp_advt_q_put(&conn->src_actv,
sdp_advt_q_get(&conn->src_pend));
/*
* if there is no more iocb space queue the it for completion
*/
- if (!(0 < iocb->len)) {
+ if (iocb->len <= 0) {
iocb = sdp_iocb_q_get_head(&conn->r_pend);
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn, "read IOCB disappeared. <%d>",
sdp_iocb_q_size(&conn->r_pend));
result = -ENODEV;
@@ -314,7 +314,7 @@ static int _sdp_post_rdma_iocb_src(struc
result = sdp_desc_q_put_tail(&conn->r_src,
(struct sdpc_desc *)iocb);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queuing read IOCB",
result);
(void)sdp_iocb_destroy(iocb);
@@ -338,7 +338,7 @@ static int _sdp_post_rdma_iocb_src(struc
send_param.num_sge = 1;
result = ib_post_send(conn->qp, &send_param, &bad_wr);
- if (0 != result) {
+ if (result) {
sdp_dbg_warn(conn, "Error <%d> posting rdma read", result);
conn->s_wq_size--;
goto error;
@@ -361,7 +361,7 @@ static int _sdp_post_rdma_iocb_snk(struc
/*
* check if sink cancel is pending
*/
- if (0 < (SDP_CONN_F_SNK_CANCEL & conn->flags)) {
+ if (conn->flags & SDP_CONN_F_SNK_CANCEL) {
result = ENODEV;
goto error;
}
@@ -369,7 +369,7 @@ static int _sdp_post_rdma_iocb_snk(struc
* get the pending iocb
*/
iocb = sdp_iocb_q_look(&conn->r_pend);
- if (NULL == iocb) {
+ if (!iocb) {
result = ENODEV;
goto error;
}
@@ -393,7 +393,7 @@ static int _sdp_post_rdma_iocb_snk(struc
result = sdp_iocb_register(iocb, conn);
if (result) {
result = (-EAGAIN == result ? EAGAIN : result);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> registering IOCB",
result);
@@ -408,14 +408,14 @@ static int _sdp_post_rdma_iocb_snk(struc
* queue IOCB
*/
iocb = sdp_iocb_q_get_head(&conn->r_pend);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "read IOCB missing from pending table <%d>",
sdp_iocb_q_size(&conn->r_pend));
goto release;
}
result = sdp_iocb_q_put_tail(&conn->r_snk, iocb);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> queueing active write IOCB",
result);
goto re_q;
@@ -427,7 +427,7 @@ static int _sdp_post_rdma_iocb_snk(struc
iocb->len,
iocb->r_key,
iocb->io_addr);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> sending SnkAvail message",
result);
goto de_q;
@@ -462,15 +462,15 @@ static int _sdp_post_rdma(struct sdp_opt
* the sink advertisment, something to explore, but SrcAvail
* slow start might make that unneccessart?
*/
- if (0 == (SDP_ST_MASK_SEND_OK & conn->state))
+ if (!(SDP_ST_MASK_SEND_OK & conn->state))
return 0;
/*
* loop flushing IOCB RDMAs. Read sources, otherwise post sinks.
*/
- if (0 < sdp_advt_q_size(&conn->src_pend)) {
- if (0 == sdp_desc_q_types_size(&conn->r_src,
+ if (sdp_advt_q_size(&conn->src_pend) > 0) {
+ if (!sdp_desc_q_types_size(&conn->r_src,
SDP_DESC_TYPE_BUFF))
- while (0 == (result = _sdp_post_rdma_iocb_src(conn))) {
+ while (!(result = _sdp_post_rdma_iocb_src(conn))) {
/*
* pass, nothing to do in loop.
*/
@@ -478,7 +478,7 @@ static int _sdp_post_rdma(struct sdp_opt
/*
* check non-zero result
*/
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting RDMA IOCB read",
result);
goto done;
@@ -486,8 +486,8 @@ static int _sdp_post_rdma(struct sdp_opt
/*
* loop posting RDMA reads, if there is room.
*/
- if (0 == sdp_iocb_q_size(&conn->r_pend))
- while (0 < sdp_advt_q_size(&conn->src_pend) &&
+ if (!sdp_iocb_q_size(&conn->r_pend))
+ while (sdp_advt_q_size(&conn->src_pend) > 0 &&
conn->recv_max >
sdp_buff_q_size(&conn->recv_pool) &&
conn->rwin_max > conn->byte_strm) {
@@ -501,22 +501,22 @@ static int _sdp_post_rdma(struct sdp_opt
/*
* check non-zero result
*/
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting RDMA BUFF read",
result);
goto done;
}
} else {
- if (0 < sdp_iocb_q_size(&conn->r_pend) &&
- SDP_MODE_PIPE == conn->recv_mode &&
- 0 == sdp_advt_q_size(&conn->src_actv))
- while (0 == (result = _sdp_post_rdma_iocb_snk(conn))) {
+ if (sdp_iocb_q_size(&conn->r_pend) > 0 &&
+ conn->recv_mode == SDP_MODE_PIPE &&
+ !sdp_advt_q_size(&conn->src_actv))
+ while (!(result = _sdp_post_rdma_iocb_snk(conn))) {
/*
* pass
*/
}
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting RDMA read sink",
result);
goto done;
@@ -539,7 +539,7 @@ int sdp_recv_flush(struct sdp_opt *conn)
/*
* verify that the connection is in a posting state
*/
- if (0 == (SDP_ST_MASK_RCV_POST & conn->state))
+ if (!(SDP_ST_MASK_RCV_POST & conn->state))
return 0;
/*
* loop posting receive buffers onto the queue
@@ -563,7 +563,7 @@ int sdp_recv_flush(struct sdp_opt *conn)
counter = min(counter,
((s32)conn->recv_cq_size - (s32)conn->l_recv_bf));
- while (0 < counter--) {
+ while (counter-- > 0) {
result = _sdp_post_recv_buff(conn);
if (result)
/*
@@ -572,7 +572,7 @@ int sdp_recv_flush(struct sdp_opt *conn)
break;
}
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting recv buff.", result);
goto done;
}
@@ -580,8 +580,8 @@ int sdp_recv_flush(struct sdp_opt *conn)
* If we are in Sink Cancel processing, and the active sink queue has
* been consumed, we can come out of sink processing.
*/
- if (0 < (SDP_CONN_F_SNK_CANCEL & conn->flags) &&
- 0 == sdp_iocb_q_size(&conn->r_snk))
+ if ((conn->flags & SDP_CONN_F_SNK_CANCEL) &&
+ !sdp_iocb_q_size(&conn->r_snk))
conn->flags &= ~SDP_CONN_F_SNK_CANCEL;
/*
* Next the connection should consume RDMA Source advertisments or
@@ -590,7 +590,7 @@ int sdp_recv_flush(struct sdp_opt *conn)
* connection peer.
*/
result = _sdp_post_rdma(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting RDMAs.", result);
goto done;
}
@@ -610,12 +610,12 @@ int sdp_recv_flush(struct sdp_opt *conn)
* 4) The peer has no source or sink advertisments pending. In process
* advertisments generate completions, that's why no ack.
*/
- if ((3 > conn->l_advt_bf &&
+ if ((conn->l_advt_bf < 3&&
conn->l_recv_bf > conn->l_advt_bf) ||
(SDP_RECV_POST_ACK < (conn->l_recv_bf - conn->l_advt_bf) &&
- 0 == ((u32)conn->snk_recv + (u32)conn->src_recv))) {
+ !((u32)conn->snk_recv + (u32)conn->src_recv))) {
result = sdp_send_ctrl_ack(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting gratuitous ACK",
result);
goto done;
@@ -653,7 +653,7 @@ static int _sdp_read_buff_iocb(struct sd
data = buff->data;
tail = buff->tail;
- buff->tail -= (0 < (SDP_BUFF_F_OOB_PRES & buff->flags)) ? 1 : 0;
+ buff->tail -= (buff->flags & SDP_BUFF_F_OOB_PRES) ? 1 : 0;
/*
* initialize counter to correct page and offset.
*/
@@ -661,12 +661,12 @@ static int _sdp_read_buff_iocb(struct sd
offset = (iocb->post + iocb->page_offset) & (~PAGE_MASK);
while (buff->data < buff->tail &&
- 0 < iocb->len) {
+ iocb->len > 0) {
/*
* map correct page of iocb
*/
addr = __sdp_kmap(iocb->page_array[counter]);
- if (NULL == addr)
+ if (!addr)
break;
copy = min((PAGE_SIZE - offset),
@@ -706,18 +706,18 @@ static int _sdp_recv_buff_iocb_active(st
* Get the IOCB, We'll fill with exactly one
*/
iocb = sdp_iocb_q_get_head(&conn->r_snk);
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn, "Empty active IOCB queue. <%d>",
sdp_iocb_q_size(&conn->r_snk));
return -EPROTO;
}
- SDP_EXPECT((0 < (SDP_IOCB_F_RDMA_W & iocb->flags)));
+ SDP_EXPECT((iocb->flags & SDP_IOCB_F_RDMA_W));
/*
* TODO: need to be checking OOB here.
*/
result = _sdp_read_buff_iocb(iocb, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> data copy <%d:%u> to IOCB",
result, iocb->len,
(unsigned)(buff->tail - buff->data));
@@ -735,7 +735,7 @@ static int _sdp_recv_buff_iocb_active(st
* callback to complete IOCB
*/
result = sdp_iocb_complete(iocb, 0);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
@@ -755,7 +755,7 @@ static int _sdp_recv_buff_iocb_pending(s
* check the IOCB
*/
iocb = sdp_iocb_q_look(&conn->r_pend);
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn, "Empty pending IOCB queue. <%d>",
sdp_iocb_q_size(&conn->r_pend));
return -EPROTO;
@@ -764,7 +764,7 @@ static int _sdp_recv_buff_iocb_pending(s
* TODO: need to be checking OOB here.
*/
result = _sdp_read_buff_iocb(iocb, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> data copy <%d:%u> to IOCB",
result, iocb->len,
(unsigned)(buff->tail - buff->data));
@@ -779,14 +779,14 @@ static int _sdp_recv_buff_iocb_pending(s
* b) the amount of data moved into the IOCB is greater then the
* socket recv low water mark.
*/
- if (0 == iocb->len ||
- (0 == conn->src_recv &&
+ if (!iocb->len ||
+ (!conn->src_recv &&
!(conn->sk->sk_rcvlowat > iocb->post))) {
/*
* complete IOCB
*/
iocb = sdp_iocb_q_get_head(&conn->r_pend);
- SDP_EXPECT((NULL != iocb));
+ SDP_EXPECT((iocb));
SDP_CONN_STAT_READ_INC(conn, iocb->post);
SDP_CONN_STAT_RQ_DEC(conn, iocb->size);
@@ -794,7 +794,7 @@ static int _sdp_recv_buff_iocb_pending(s
* callback to complete IOCB
*/
result = sdp_iocb_complete(iocb, 0);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> completing iocb. <%d>",
result, iocb->key);
}
@@ -817,14 +817,14 @@ int sdp_recv_buff(struct sdp_opt *conn,
* if data is received and the receive half of the connection has been
* closed. This notifies the peer that the data was not received.
*/
- if (0 < (RCV_SHUTDOWN & conn->shutdown)) {
+ if (RCV_SHUTDOWN & conn->shutdown) {
sdp_dbg_warn(conn, "Receive data path closed. <%02x>",
conn->shutdown);
/*
* abort connection (send reset)
*/
result = sdp_wall_abort(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* drop packet
*/
@@ -834,7 +834,7 @@ int sdp_recv_buff(struct sdp_opt *conn,
/*
* oob notification.
*/
- if (0 < (SDP_BUFF_F_OOB_PEND & buff->flags)) {
+ if (buff->flags & SDP_BUFF_F_OOB_PEND) {
conn->rcv_urg_cnt++;
sdp_inet_wake_urg(conn->sk);
}
@@ -842,7 +842,7 @@ int sdp_recv_buff(struct sdp_opt *conn,
* loop while there are available IOCB's, break if there is no
* more data to read
*/
- while (0 < (sdp_iocb_q_size(&conn->r_pend) +
+ while ((sdp_iocb_q_size(&conn->r_pend) +
sdp_iocb_q_size(&conn->r_snk))) {
/*
* if there is OOB data in a buffer, the two functions below
@@ -852,7 +852,7 @@ int sdp_recv_buff(struct sdp_opt *conn,
* will not be consumed until the next AIO buffer is posted,
* or a socket recv (regular or OOB) is called.
*/
- if (0 < (SDP_BUFF_F_OOB_PRES & buff->flags) &&
+ if ((buff->flags & SDP_BUFF_F_OOB_PRES) &&
1 == (buff->tail - buff->data))
break;
/*
@@ -860,17 +860,17 @@ int sdp_recv_buff(struct sdp_opt *conn,
* discarded with exactly one buffer, or process a pending
* IOCB.
*/
- if (0 < conn->snk_sent)
+ if (conn->snk_sent > 0)
result = _sdp_recv_buff_iocb_active(conn, buff);
else
result = _sdp_recv_buff_iocb_pending(conn, buff);
/*
* Check result. Postitive result is data left in the buffer
*/
- if (0 == result)
+ if (!result)
break;
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> processing IOCB. <%d:%d:%d>",
result, conn->snk_sent,
@@ -884,9 +884,9 @@ int sdp_recv_buff(struct sdp_opt *conn,
*/
buffered = buff->tail - buff->data;
- if (0 < buffered) {
+ if (buffered > 0) {
result = sdp_buff_q_put_tail(&conn->recv_pool, buff);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
return buffered;
@@ -906,7 +906,7 @@ static int _sdp_read_src_lookup(struct s
struct sdpc_iocb *iocb = (struct sdpc_iocb *) element;
struct kiocb *req = (struct kiocb *)arg;
- if (SDP_DESC_TYPE_IOCB == element->type && iocb->key == req->ki_key)
+ if (element->type == SDP_DESC_TYPE_IOCB && iocb->key == req->ki_key)
return 0;
else
return -ERANGE;
@@ -926,7 +926,7 @@ static int _sdp_inet_read_cancel(struct
sdp_dbg_ctrl(NULL, "Cancel Read IOCB. user <%d> key <%d> flag <%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
- if (NULL == si || NULL == si->sock || NULL == si->sock->sk) {
+ if (!si || !si->sock || !si->sock->sk) {
sdp_warn("Cancel empty read IOCB. users <%d> flags <%d:%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
result = -EFAULT;
@@ -946,7 +946,7 @@ static int _sdp_inet_read_cancel(struct
* whether this is a read or write.
*/
iocb = sdp_iocb_q_lookup(&conn->r_pend, req->ki_key);
- if (NULL != iocb) {
+ if (iocb) {
/*
* always remove the IOCB. If active, then place it into
* the correct active queue. Inactive empty IOCBs can be
@@ -954,19 +954,19 @@ static int _sdp_inet_read_cancel(struct
*/
sdp_iocb_q_remove(iocb);
- if (0 == (SDP_IOCB_F_ACTIVE & iocb->flags)) {
- if (0 < iocb->post) {
+ if (!(iocb->flags & SDP_IOCB_F_ACTIVE)) {
+ if (iocb->post > 0) {
/*
* callback to complete IOCB, or drop reference
*/
result = sdp_iocb_complete(iocb, 0);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
result = -EAGAIN;
}
else {
result = sdp_iocb_destroy(iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
/*
* completion reference
*/
@@ -978,16 +978,16 @@ static int _sdp_inet_read_cancel(struct
goto unlock;
}
- if (0 < (SDP_IOCB_F_RDMA_W & iocb->flags)) {
+ if (iocb->flags & SDP_IOCB_F_RDMA_W) {
result = sdp_iocb_q_put_tail(&conn->r_snk, iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
else {
- SDP_EXPECT((SDP_IOCB_F_RDMA_R & iocb->flags));
+ SDP_EXPECT((iocb->flags & SDP_IOCB_F_RDMA_R));
result = sdp_desc_q_put_tail(&conn->r_src,
(struct sdpc_desc *)iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
}
/*
@@ -997,7 +997,7 @@ static int _sdp_inet_read_cancel(struct
iocb = (struct sdpc_iocb *)sdp_desc_q_lookup(&conn->r_src,
_sdp_read_src_lookup,
req);
- if (NULL != iocb) {
+ if (iocb) {
iocb->flags |= SDP_IOCB_F_CANCEL;
result = -EAGAIN;
@@ -1008,17 +1008,17 @@ static int _sdp_inet_read_cancel(struct
* needs to be issued.
*/
iocb = sdp_iocb_q_lookup(&conn->r_snk, req->ki_key);
- if (NULL != iocb) {
+ if (iocb) {
/*
* Unfortunetly there is only a course grain cancel in SDP, so
* we have to cancel everything. This is OKish since it usually
* only happens at connection termination, and the remaining
* source probably will get cancel requests as well.
*/
- if (0 == (SDP_CONN_F_SNK_CANCEL & conn->flags)) {
+ if (!(conn->flags & SDP_CONN_F_SNK_CANCEL)) {
result = sdp_send_ctrl_snk_cancel(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
conn->flags |= SDP_CONN_F_SNK_CANCEL;
}
@@ -1060,13 +1060,13 @@ static int _sdp_inet_recv_urg_trav(struc
u8 *value = (u8 *) arg;
u8 update;
- if (0 < (SDP_BUFF_F_OOB_PRES & buff->flags)) {
+ if (buff->flags & SDP_BUFF_F_OOB_PRES) {
SDP_EXPECT((buff->tail > buff->data));
update = *value;
*value = *(u8 *) (buff->tail - 1);
- if (0 < update) {
+ if (update > 0) {
buff->tail--;
buff->flags &= ~SDP_BUFF_F_OOB_PRES;
}
@@ -1092,35 +1092,35 @@ static int _sdp_inet_recv_urg(struct soc
conn = SDP_GET_CONN(sk);
- if (sock_flag(sk, SOCK_URGINLINE) || 0 == conn->rcv_urg_cnt)
+ if (sock_flag(sk, SOCK_URGINLINE) || !conn->rcv_urg_cnt)
return -EINVAL;
/*
* don't cosume data on PEEK, but do consume data on TRUNC
*/
#if 0
- value = (0 < (MSG_PEEK & flags)) || (0 == size) ? 0 : 1;
+ value = (MSG_PEEK & flags) || size ? 1 : 0;
#else
- value = (0 < (MSG_PEEK & flags)) ? 0 : 1;
+ value = (MSG_PEEK & flags) ? 0 : 1;
#endif
result = sdp_buff_q_trav_head(&conn->recv_pool,
_sdp_inet_recv_urg_trav,
(void *)&value);
- if (-ERANGE != result) {
- result = (0 != result) ? result : -EAGAIN;
+ if (result != -ERANGE) {
+ result = result ? result : -EAGAIN;
goto done;
}
msg->msg_flags |= MSG_OOB;
- if (0 < size) {
+ if (size > 0) {
result = memcpy_toiovec(msg->msg_iov, &value, 1);
- if (0 != result)
+ if (result)
goto done;
/*
* clear urgent pointer on consumption
*/
- if (0 == (MSG_PEEK & flags)) {
+ if (!(MSG_PEEK & flags)) {
conn->rcv_urg_cnt -= 1;
conn->byte_strm -= 1;
@@ -1133,12 +1133,12 @@ static int _sdp_inet_recv_urg(struct soc
buff = sdp_buff_q_fetch(&conn->recv_pool,
_sdp_inet_recv_urg_test,
(void *)0);
- if (NULL != buff) {
+ if (buff) {
result = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
result = sdp_recv_flush(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
result = 1;
@@ -1191,28 +1191,28 @@ int sdp_inet_recv(struct kiocb *req,
/*
* TODO: unhandled, but need to be handled.
*/
- if (0 < (MSG_TRUNC & flags))
+ if (MSG_TRUNC & flags)
return -EOPNOTSUPP;
- if (0 < (MSG_PEEK & flags)) {
+ if (MSG_PEEK & flags) {
sdp_buff_q_init(&peek_queue);
msg->msg_flags |= MSG_PEEK;
}
sdp_conn_lock(conn);
- if (SDP_SOCK_ST_LISTEN == conn->istate ||
- SDP_SOCK_ST_CLOSED == conn->istate) {
+ if (conn->istate == SDP_SOCK_ST_LISTEN ||
+ conn->istate == SDP_SOCK_ST_CLOSED) {
result = -ENOTCONN;
goto done;
}
/*
* process urgent data
*/
- if (0 < (MSG_OOB & flags)) {
+ if (MSG_OOB & flags) {
result = _sdp_inet_recv_urg(sk, msg, size, flags);
- copied = (0 < result) ? result : 0;
- result = (0 < result) ? 0 : result;
+ copied = (result > 0) ? result : 0;
+ result = (result > 0) ? 0 : result;
goto done;
}
/*
@@ -1232,7 +1232,7 @@ int sdp_inet_recv(struct kiocb *req,
length = buff->tail - buff->data;
update = 0;
- if (SDP_BUFF_F_OOB_PRES & buff->flags) {
+ if (buff->flags & SDP_BUFF_F_OOB_PRES) {
/*
* if data has already been read, and the
* next byte is the urgent byte, reading
@@ -1246,7 +1246,7 @@ int sdp_inet_recv(struct kiocb *req,
if (1 < length)
length--;
else {
- if (0 < copied) {
+ if (copied > 0) {
/*
* update such that we pass
* through the copy phase,
@@ -1275,22 +1275,22 @@ int sdp_inet_recv(struct kiocb *req,
copy = min((size_t) (size - copied), length);
- if (0 < copy) {
+ if (copy > 0) {
#ifndef _SDP_DATA_PATH_NULL
result = memcpy_toiovec(msg->msg_iov,
buff->data,
copy);
- if (0 > result) {
+ if (result < 0) {
expect =
sdp_buff_q_put_head(&conn->
recv_pool,
buff);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
goto done;
}
#endif
- update = (0 < (MSG_PEEK & flags)) ? 0 : copy;
+ update = (MSG_PEEK & flags) ? 0 : copy;
}
SDP_CONN_STAT_RECV_INC(conn, update);
@@ -1299,10 +1299,10 @@ int sdp_inet_recv(struct kiocb *req,
buff->data += update;
copied += copy;
- if (0 < (buff->tail - buff->data)) {
+ if ((buff->tail - buff->data) > 0) {
expect = sdp_buff_q_put_head(&conn->recv_pool,
buff);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
/*
* always break, PEEK and OOB together could
* throw us into a loop without a forced
@@ -1317,9 +1317,9 @@ int sdp_inet_recv(struct kiocb *req,
if (MSG_PEEK & flags) {
expect = sdp_buff_q_put_head(&peek_queue,
buff);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
} else {
- if (SDP_BUFF_F_OOB_PRES & buff->flags)
+ if (buff->flags & SDP_BUFF_F_OOB_PRES)
conn->rcv_urg_cnt -= 1;
/*
* create a link of buffers which
@@ -1337,7 +1337,7 @@ int sdp_inet_recv(struct kiocb *req,
*/
if (SDP_RECV_POST_FREQ < ++ack) {
result = sdp_recv_flush(conn);
- if (0 > result)
+ if (result < 0)
goto done;
ack = 0;
@@ -1348,7 +1348,7 @@ int sdp_inet_recv(struct kiocb *req,
* urgent data needs to break up the data stream, regardless
* of low water mark, or whether there is room in the buffer.
*/
- if (0 < oob) {
+ if (oob > 0) {
result = 0;
break;
}
@@ -1365,7 +1365,7 @@ int sdp_inet_recv(struct kiocb *req,
*/
sdp_conn_relock(conn);
- if (0 < sdp_buff_q_size(&conn->recv_pool))
+ if (sdp_buff_q_size(&conn->recv_pool) > 0)
continue;
}
/*
@@ -1376,7 +1376,7 @@ int sdp_inet_recv(struct kiocb *req,
* data is pending and accessible.
*/
if (!(copied < low_water) &&
- 0 == conn->src_recv) {
+ !conn->src_recv) {
#if 0 /* performance cheat. LM */
if (!(conn->snk_zthresh > size)) {
@@ -1384,7 +1384,7 @@ int sdp_inet_recv(struct kiocb *req,
result = sdp_send_ctrl_snk_avail(conn,
0, 0, 0);
- if (0 > result) {
+ if (result < 0) {
/*
* since the message did not go out,
* back out the non_discard counter
@@ -1399,23 +1399,23 @@ int sdp_inet_recv(struct kiocb *req,
* check connection errors, and then wait for more data.
* check status. POSIX 1003.1g order.
*/
- if (0 != SDP_CONN_GET_ERR(conn)) {
- result = (0 < copied) ? 0 : sdp_conn_error(conn);
+ if (SDP_CONN_GET_ERR(conn)) {
+ result = (copied > 0) ? 0 : sdp_conn_error(conn);
break;
}
- if (0 < (RCV_SHUTDOWN & conn->shutdown)) {
+ if (RCV_SHUTDOWN & conn->shutdown) {
result = 0;
break;
}
- if (SDP_SOCK_ST_ERROR == conn->istate) {
+ if (conn->istate == SDP_SOCK_ST_ERROR) {
result = -EPROTO; /* error should always be
set, but just in case */
break;
}
- if (0 == timeout) {
+ if (!timeout) {
result = -EAGAIN;
break;
}
@@ -1430,7 +1430,7 @@ int sdp_inet_recv(struct kiocb *req,
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- if (0 == sdp_buff_q_size(&conn->recv_pool)) {
+ if (!sdp_buff_q_size(&conn->recv_pool)) {
sdp_conn_unlock(conn);
timeout = schedule_timeout(timeout);
sdp_conn_lock(conn);
@@ -1443,7 +1443,7 @@ int sdp_inet_recv(struct kiocb *req,
* check signal pending
*/
if (signal_pending(current)) {
- result = ((0 < timeout) ?
+ result = ((timeout > 0) ?
sock_intr_errno(timeout) : -EAGAIN);
break;
}
@@ -1452,7 +1452,7 @@ int sdp_inet_recv(struct kiocb *req,
* create IOCB with remaining space
*/
iocb = sdp_iocb_create();
- if (NULL == iocb) {
+ if (!iocb) {
sdp_dbg_warn(conn,
"Error allocating IOCB <%Zu:%d>",
size, copied);
@@ -1470,7 +1470,7 @@ int sdp_inet_recv(struct kiocb *req,
req->ki_cancel = _sdp_inet_read_cancel;
result = sdp_iocb_lock(iocb);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> IOCB lock <%Zu:%d>",
result, size, copied);
@@ -1482,7 +1482,7 @@ int sdp_inet_recv(struct kiocb *req,
SDP_CONN_STAT_RQ_INC(conn, iocb->size);
result = sdp_iocb_q_put_tail(&conn->r_pend, iocb);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> IOCB queue <%Zu:%d>",
result, size, copied);
@@ -1503,9 +1503,9 @@ done:
/*
* acknowledge moved data
*/
- if (0 < ack) {
+ if (ack > 0) {
expect = sdp_recv_flush(conn);
- if (0 > expect)
+ if (expect < 0)
sdp_dbg_warn(conn, "Error <%d> flushing recv queue.",
expect);
}
@@ -1514,13 +1514,13 @@ done:
/*
* return any peeked buffers to the recv queue, in the correct order.
*/
- if (0 < (MSG_PEEK & flags)) {
- while (NULL != (buff = sdp_buff_q_get_tail(&peek_queue))) {
+ if (MSG_PEEK & flags) {
+ while ((buff = sdp_buff_q_get_tail(&peek_queue))) {
expect = sdp_buff_q_put_head(&conn->recv_pool, buff);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
}
}
sdp_conn_unlock(conn);
- return ((0 < copied) ? copied : result);
+ return ((copied > 0) ? copied : result);
}
Index: sdp_wall.c
===================================================================
--- sdp_wall.c (revision 1922)
+++ sdp_wall.c (working copy)
@@ -62,11 +62,11 @@ int sdp_wall_send_close(struct sdp_opt *
* clear out the sent HelloAck message
*/
buff = sdp_buff_q_get_head(&conn->send_post);
- if (NULL == buff)
+ if (!buff)
sdp_dbg_warn(conn, "Error, hello ack missing.");
else {
result = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
/*
* fall through
@@ -86,7 +86,7 @@ int sdp_wall_send_close(struct sdp_opt *
SDP_CONN_ST_SET(conn, SDP_CONN_ST_DIS_PEND_1);
result = sdp_send_ctrl_disconnect(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> send disconnect request",
result);
@@ -108,7 +108,7 @@ error:
SDP_CONN_ST_SET(conn, SDP_CONN_ST_ERROR_STRM);
result = sdp_cm_disconnect(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> CM disconnect send", result);
return result;
@@ -139,7 +139,7 @@ int sdp_wall_send_closing(struct sdp_opt
SDP_CONN_ST_SET(conn, SDP_CONN_ST_DIS_PEND_2);
result = sdp_send_ctrl_disconnect(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> send disconnect request",
result);
@@ -160,7 +160,7 @@ error:
SDP_CONN_ST_SET(conn, SDP_CONN_ST_ERROR_STRM);
result = sdp_cm_disconnect(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> CM disconnect send", result);
return result;
@@ -193,7 +193,7 @@ int sdp_wall_send_abort(struct sdp_opt *
* post abort
*/
result = sdp_send_ctrl_abort(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> send abort request",
result);
goto error;
@@ -261,7 +261,7 @@ error:
SDP_CONN_ST_SET(conn, SDP_CONN_ST_ERROR_STRM);
result = sdp_cm_disconnect(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> CM disconnect send", result);
return 0;
@@ -347,7 +347,7 @@ int sdp_wall_recv_failed(struct sdp_opt
* the connection has failed, move to error, and notify anyone
* waiting of the state change.
*/
- SDP_EXPECT((SDP_SOCK_ST_ACCEPTED == conn->istate));
+ SDP_EXPECT((conn->istate == SDP_SOCK_ST_ACCEPTED));
switch (conn->istate) {
default:
@@ -420,7 +420,7 @@ int sdp_wall_recv_closing(struct sdp_opt
/*
* change state, finalize the close, and wake the closer.
*/
- SDP_EXPECT((SDP_SOCK_ST_DISCONNECT == conn->istate));
+ SDP_EXPECT((conn->istate == SDP_SOCK_ST_DISCONNECT));
conn->send_buf = 0;
conn->istate = SDP_SOCK_ST_CLOSED;
@@ -502,7 +502,7 @@ int sdp_wall_recv_drop(struct sdp_opt *c
* pull the listen sockets accept queue.
*/
result = sdp_inet_accept_q_remove(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn,
"Error <%d> removing from accept queue.",
result);
@@ -555,11 +555,11 @@ int sdp_wall_abort(struct sdp_opt *conn)
* notify both halves of the wall that the connection is being aborted.
*/
result = sdp_wall_recv_abort(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> recving abort request", result);
/* if */
result = sdp_wall_send_abort(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> sending abort request", result);
return 0;
Index: sdp_conn.h
===================================================================
--- sdp_conn.h (revision 1922)
+++ sdp_conn.h (working copy)
@@ -492,8 +492,8 @@ static inline void sdp_conn_unlock(struc
unsigned long flags;
spin_lock_irqsave(&conn->lock.slock, flags);
- if (0 < (SDP_CONN_F_MASK_EVENT & conn->flags) &&
- 0 < (SDP_ST_MASK_EVENTS & conn->state)) {
+ if ((conn->flags & SDP_CONN_F_MASK_EVENT) &&
+ (SDP_ST_MASK_EVENTS & conn->state)) {
sdp_conn_internal_unlock(conn);
}
Index: sdp_proc.c
===================================================================
--- sdp_proc.c (revision 1922)
+++ sdp_proc.c (working copy)
@@ -56,15 +56,15 @@ static int _sdp_proc_read_parse(char *pa
int size;
#if 0
- if (NULL == *start && 0 != offset) {
+ if (!*start && offset) {
return 0; /* I'm not sure why this always gets
called twice... */
}
#endif
size = sub_entry->read(page, count, offset, &end_index);
- if (0 < size) {
- if (0 < end_index) {
+ if (size > 0) {
+ if (end_index > 0) {
*start = (char *)end_index;
*eof = 0;
} else {
@@ -141,7 +141,7 @@ int sdp_main_proc_cleanup(void)
*/
for (counter = 0; counter < SDP_PROC_ENTRIES; counter++) {
sub_entry = &_file_entry_list[counter];
- if (NULL != sub_entry->entry) {
+ if (sub_entry->entry) {
remove_proc_entry(sub_entry->name, _dir_root);
sub_entry->entry = NULL;
}
@@ -177,7 +177,7 @@ int sdp_main_proc_init(void)
return -EFAULT;
}
- if (NULL != _dir_root) {
+ if (_dir_root) {
sdp_warn("/proc already initialized!");
return -EINVAL;
}
@@ -185,7 +185,7 @@ int sdp_main_proc_init(void)
* create a gateway root, and main directories
*/
_dir_root = proc_mkdir(_dir_name_root, proc_net);
- if (NULL == _dir_root) {
+ if (!_dir_root) {
sdp_warn("Failed to create <%s> proc entry.",
_dir_name_root);
return -EINVAL;
@@ -203,7 +203,7 @@ int sdp_main_proc_init(void)
sub_entry->entry = create_proc_entry(sub_entry->name,
S_IRUGO | S_IWUGO,
_dir_root);
- if (NULL == sub_entry->entry) {
+ if (!sub_entry->entry) {
sdp_warn("Failed to create <%s> framework proc entry.",
sub_entry->name);
result = -EINVAL;
Index: sdp_pass.c
===================================================================
--- sdp_pass.c (revision 1922)
+++ sdp_pass.c (working copy)
@@ -62,7 +62,7 @@ static int _sdp_cm_accept(struct sdp_opt
* (don't need to worry about header space reservation on sends)
*/
buff = sdp_buff_pool_get();
- if (NULL == buff) {
+ if (!buff) {
sdp_dbg_warn(conn, "Failed to allocate buff for Hello Ack.");
result = -ENOMEM;
goto error;
@@ -97,12 +97,12 @@ static int _sdp_cm_accept(struct sdp_opt
* save message
*/
result = sdp_buff_q_put(&conn->send_post, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> buffering hello ack packet.",
result);
expect = sdp_buff_pool_put(buff);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
goto error;
}
@@ -110,7 +110,7 @@ static int _sdp_cm_accept(struct sdp_opt
* modify QP. INIT->RTR
*/
qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
- if (NULL == qp_attr) {
+ if (!qp_attr) {
sdp_dbg_warn(conn, "Failed to allocate QP attribute.");
result = -ENOMEM;
goto error;
@@ -147,7 +147,7 @@ static int _sdp_cm_accept(struct sdp_opt
* Post receive buffers for this connection
*/
result = sdp_recv_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing receive queue",
result);
goto error;
@@ -170,7 +170,7 @@ static int _sdp_cm_accept(struct sdp_opt
param.rnr_retry_count = SDP_CM_PARAM_RNR_RETRY;
result = ib_send_cm_rep(conn->cm_id, ¶m);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> CM accept request.", result);
goto error;
}
@@ -198,7 +198,7 @@ static int _sdp_cm_listen_lookup(struct
* first find a listening connection
*/
listen_conn = sdp_inet_listen_lookup(conn->src_addr, conn->src_port);
- if (NULL == listen_conn) {
+ if (!listen_conn) {
/*
* no connection, reject
*/
@@ -224,7 +224,7 @@ static int _sdp_cm_listen_lookup(struct
}
result = sdp_inet_port_inherit(listen_conn, conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(listen_conn, "Error <%d> listen port inherit.",
result);
result = -EFAULT;
@@ -270,7 +270,7 @@ static int _sdp_cm_listen_lookup(struct
* initiate a CM response message.
*/
result = _sdp_cm_accept(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> CM connect accept", result);
goto locked_err;
}
@@ -278,7 +278,7 @@ static int _sdp_cm_listen_lookup(struct
* place connection into the listen connections accept queue.
*/
result = sdp_inet_accept_q_put(listen_conn, conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"Error <%d> adding socket to accept queue",
result);
@@ -322,7 +322,7 @@ static int _sdp_cm_hello_check(struct sd
return -EINVAL;
}
- if (!(0 < msg_hello->hh.max_adv)) {
+ if (msg_hello->hh.max_adv <= 0) {
sdp_dbg_warn(NULL, "hello msg, bad zcopy count <%d>",
msg_hello->hh.max_adv);
return -EINVAL;
@@ -383,7 +383,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
* check Hello Header, to determine if we want the connection.
*/
result = _sdp_cm_hello_check(msg_hello);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(NULL, "Error <%d> validating hello msg. <%08x>",
result, cm_id->local_id);
goto done;
@@ -392,7 +392,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
* Create a connection for this request.
*/
conn = sdp_conn_alloc(GFP_KERNEL); /* CM sk reference */
- if (NULL == conn) {
+ if (!conn) {
sdp_dbg_warn(NULL, "Failed to allocate connection. <%08x>",
cm_id->local_id);
result = -ENOMEM;
@@ -440,7 +440,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
event->param.req_rcvd.device,
event->param.req_rcvd.port,
event->param.req_rcvd.primary_path->pkey);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> binding connection to HCA/port",
result);
goto error;
@@ -458,7 +458,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
* into listeners accept queue.
*/
result = _sdp_cm_listen_lookup(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> matching listen socket queue",
result);
goto error;
Index: sdp_sent.c
===================================================================
--- sdp_sent.c (revision 1922)
+++ sdp_sent.c (working copy)
@@ -66,7 +66,7 @@ static int _sdp_sent_disconnect(struct s
* Begin IB/CM disconnect
*/
result = sdp_cm_disconnect(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> posting CM disconnect",
result);
goto error;
@@ -84,7 +84,7 @@ static int _sdp_sent_disconnect(struct s
* acknowledge disconnect to framework
*/
result = sdp_wall_recv_closing(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> closing connection.",
result);
goto error;
@@ -97,7 +97,7 @@ static int _sdp_sent_disconnect(struct s
* if the remote DREQ was already received, but unprocessed, do
* not treat it as an error
*/
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> CM disconnect", result);
break;
@@ -123,11 +123,11 @@ static int _sdp_sent_abort(struct sdp_op
* The gateway interface should be in error state, initiate CM
* disconnect.
*/
- SDP_EXPECT((SDP_CONN_ST_ERROR_STRM == conn->state));
+ SDP_EXPECT((conn->state == SDP_CONN_ST_ERROR_STRM));
SDP_CONN_ST_SET(conn, SDP_CONN_ST_ERROR_STRM);
result = sdp_cm_disconnect(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> CM disconnect", result);
return result;
@@ -356,17 +356,17 @@ int sdp_event_send(struct sdp_opt *conn,
/*
* get buffer.
*/
- while (NULL != (buff = sdp_buff_q_get_head(&conn->send_post))) {
+ while ((buff = sdp_buff_q_get_head(&conn->send_post))) {
/*
* sanity checks
*/
- if (NULL == buff->bsdh_hdr) {
+ if (!buff->bsdh_hdr) {
sdp_dbg_warn(conn, "Send header is missing?!");
result = -ENODATA;
goto drop;
}
/* check WRID taking into account wrap around */
- if (0 > (s64)(comp->wr_id - buff->wrid)) {
+ if (((s64)(comp->wr_id - buff->wrid)) < 0) {
/*
* error
*/
@@ -403,7 +403,7 @@ int sdp_event_send(struct sdp_opt *conn,
offset = buff->bsdh_hdr->mid & 0x1F;
if (!(offset < SDP_MSG_EVENT_TABLE_SIZE) ||
- NULL == send_event_funcs[offset]) {
+ !send_event_funcs[offset]) {
sdp_dbg_warn(conn,
"Send complete unknown MID <%d>",
buff->bsdh_hdr->mid);
@@ -415,7 +415,7 @@ int sdp_event_send(struct sdp_opt *conn,
dispatch_func = send_event_funcs[offset];
result = dispatch_func(conn, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Sent dispatch error. <%d>",
result);
goto drop;
@@ -428,7 +428,7 @@ int sdp_event_send(struct sdp_opt *conn,
*/
conn->s_wq_size--;
- if (0 < SDP_BUFF_F_GET_UNSIG(buff))
+ if (SDP_BUFF_F_GET_UNSIG(buff) > 0)
conn->send_usig--;
/*
* create a link of buffers which will be returned to
@@ -444,9 +444,9 @@ int sdp_event_send(struct sdp_opt *conn,
}
result = sdp_buff_pool_chain_put(head, free_count);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
- if (!(0 < free_count) || 0 > conn->send_usig) {
+ if (free_count <= 0 || conn->send_usig < 0) {
sdp_dbg_warn(conn,
"Send processing mismatch. <%llu:%llu:%d:%d>",
(unsigned long long)comp->wr_id,
@@ -459,7 +459,7 @@ int sdp_event_send(struct sdp_opt *conn,
* Flush queued send data into the post queue if there is room.
*/
result = sdp_send_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing send queue.", result);
goto done;
}
Index: sdp_iocb.c
===================================================================
--- sdp_iocb.c (revision 1922)
+++ sdp_iocb.c (working copy)
@@ -52,7 +52,7 @@ static int _sdp_iocb_unlock(struct sdpc_
struct vm_area_struct *vma;
vma = find_vma(iocb->mm, (iocb->addr & PAGE_MASK));
- if (NULL == vma)
+ if (!vma)
sdp_warn("No VMA for IOCB <%lx:%Zu> unlock",
iocb->addr, iocb->size);
@@ -104,7 +104,7 @@ int sdp_iocb_unlock(struct sdpc_iocb *io
/*
* check if IOCB is locked.
*/
- if (0 == (SDP_IOCB_F_LOCKED & iocb->flags))
+ if (!(iocb->flags & SDP_IOCB_F_LOCKED))
return 0;
/*
* spin lock since this could be from interrupt context.
@@ -145,7 +145,7 @@ static int _sdp_iocb_page_save(struct sd
pte_t *ptep;
pte_t pte;
- if (!(0 < iocb->page_count) || !(0 < iocb->size) || 0 == iocb->addr)
+ if (iocb->page_count <= 0 || iocb->size <= 0 || !iocb->addr)
return -EINVAL;
/*
* create array to hold page value which are later needed to register
@@ -153,12 +153,12 @@ static int _sdp_iocb_page_save(struct sd
*/
iocb->addr_array = kmalloc((sizeof(u64) * iocb->page_count),
GFP_KERNEL);
- if (NULL == iocb->addr_array)
+ if (!iocb->addr_array)
goto err_addr;
iocb->page_array = kmalloc((sizeof(struct page *) * iocb->page_count),
GFP_KERNEL);
- if (NULL == iocb->page_array)
+ if (!iocb->page_array)
goto err_page;
/*
* iocb->addr - buffer start address
@@ -177,7 +177,7 @@ static int _sdp_iocb_page_save(struct sd
spin_lock(&iocb->mm->page_table_lock);
for (counter = 0;
- 0 < size;
+ size > 0;
counter++, addr += PAGE_SIZE, size -= PAGE_SIZE) {
pgd = pgd_offset_gate(iocb->mm, addr);
if (!pgd || pgd_none(*pgd))
@@ -280,7 +280,7 @@ int sdp_iocb_lock(struct sdpc_iocb *iocb
* them do not copy, reference counting, and saving them.
*/
vma = find_vma(iocb->mm, addr);
- if (NULL == vma)
+ if (!vma)
/*
* sanity check.
*/
@@ -290,7 +290,7 @@ int sdp_iocb_lock(struct sdpc_iocb *iocb
while (vma) {
spin_lock(&iocb->mm->page_table_lock);
- if (0 == (VM_LOCKED & vma->vm_flags))
+ if (!(VM_LOCKED & vma->vm_flags))
sdp_warn("Unlocked vma! <%08lx>", vma->vm_flags);
if (PAGE_SIZE < (unsigned long)vma->vm_private_data)
@@ -367,13 +367,13 @@ static int _sdp_mem_lock_init(void)
sdp_dbg_init("Memory Locking initialization.");
kallsyms = filp_open("/proc/kallsyms", O_RDONLY, 0);
- if (NULL == kallsyms) {
+ if (!kallsyms) {
sdp_warn("Failed to open /proc/kallsyms");
goto done;
}
seq = (struct seq_file *)kallsyms->private_data;
- if (NULL == seq) {
+ if (!seq) {
sdp_warn("Failed to fetch sequential file.");
goto err_close;
}
@@ -381,10 +381,10 @@ static int _sdp_mem_lock_init(void)
for (iter = seq->op->start(seq, &pos);
iter != NULL;
iter = seq->op->next(seq, iter, &pos))
- if (0 == strcmp(iter->name, "do_mlock"))
+ if (!strcmp(iter->name, "do_mlock"))
_mlock_ptr = (do_mlock_ptr_t)iter->value;
- if (NULL == _mlock_ptr)
+ if (!_mlock_ptr)
sdp_warn("Failed to find lock pointer.");
else
ret = 0;
@@ -423,7 +423,7 @@ int sdp_iocb_register(struct sdpc_iocb *
/*
* register only once.
*/
- if (SDP_IOCB_F_REG & iocb->flags)
+ if (iocb->flags & SDP_IOCB_F_REG)
return 0;
/*
* prime io address with physical address of first byte?
@@ -439,7 +439,7 @@ int sdp_iocb_register(struct sdpc_iocb *
if (IS_ERR(iocb->mem)) {
result = (int)PTR_ERR(iocb->mem);
- if (-EAGAIN != result)
+ if (result != -EAGAIN)
sdp_dbg_err("Error <%d> fmr_pool_map_phys <%d:%d:%d>",
result,
iocb->len,
@@ -471,11 +471,11 @@ int sdp_iocb_release(struct sdpc_iocb *i
{
int result;
- if (0 == (SDP_IOCB_F_REG & iocb->flags))
+ if (!(iocb->flags & SDP_IOCB_F_REG))
return 0;
result = ib_fmr_pool_unmap(iocb->mem);
- if (0 > result)
+ if (result < 0)
sdp_dbg_err("Error <%d> releasing IOCB <%d> memory <%ld>",
result, iocb->key, iocb->addr);
@@ -496,20 +496,20 @@ static void _sdp_iocb_complete(void *arg
* release memory
*/
result = sdp_iocb_release(iocb);
- if (0 > result)
+ if (result < 0)
sdp_dbg_err("Error <%d> releasing IOCB <%d> memory <%ld>",
result, iocb->key, iocb->addr);
/*
* unlock now, after aio_complete the mm reference will be released.
*/
result = sdp_iocb_unlock(iocb);
- if (0 > result)
+ if (result < 0)
sdp_dbg_err("Error <%d> unlocking IOCB <%d memory <%ld>>",
result, iocb->key, iocb->addr);
/*
* callback to complete IOCB
*/
- value = (0 < iocb->post) ? iocb->post : iocb->status;
+ value = (iocb->post > 0) ? iocb->post : iocb->status;
sdp_dbg_data(NULL, "IOCB complete. <%d:%d:%08lx> value <%ld>",
iocb->req->ki_users, iocb->req->ki_key,
@@ -523,7 +523,7 @@ static void _sdp_iocb_complete(void *arg
* delete IOCB
*/
result = sdp_iocb_destroy(iocb);
- if (0 > result)
+ if (result < 0)
sdp_dbg_err("Error <%d> deleting IOCB <%d> of status <%Zu>",
result, iocb->key, iocb->status);
}
@@ -602,7 +602,7 @@ struct sdpc_iocb *sdp_iocb_create(void)
struct sdpc_iocb *iocb;
iocb = kmem_cache_alloc(__sdp_iocb_cache, SLAB_KERNEL);
- if (NULL != iocb) {
+ if (iocb) {
memset(iocb, 0, sizeof(struct sdpc_iocb));
/*
* non-zero initialization
@@ -620,10 +620,10 @@ struct sdpc_iocb *sdp_iocb_create(void)
*/
int sdp_iocb_destroy(struct sdpc_iocb *iocb)
{
- if (NULL == iocb)
+ if (!iocb)
return -EINVAL;
- if (NULL != iocb->next || NULL != iocb->prev)
+ if (iocb->next || iocb->prev)
return -EACCES;
/*
* release iocb registered memory
@@ -636,10 +636,10 @@ int sdp_iocb_destroy(struct sdpc_iocb *i
/*
* array dealloc
*/
- if (NULL != iocb->page_array)
+ if (iocb->page_array)
kfree(iocb->page_array);
- if (NULL != iocb->addr_array)
+ if (iocb->addr_array)
kfree(iocb->addr_array);
/*
* clear IOCB to check for usage after free...
@@ -672,7 +672,7 @@ static struct sdpc_iocb *_sdp_iocb_q_get
struct sdpc_iocb *next;
struct sdpc_iocb *prev;
- if (NULL == table->head)
+ if (!table->head)
return NULL;
if (head)
@@ -710,7 +710,7 @@ static int _sdp_iocb_q_put(struct sdpc_i
struct sdpc_iocb *next;
struct sdpc_iocb *prev;
- if (NULL == table->head) {
+ if (!table->head) {
iocb->next = iocb;
iocb->prev = iocb;
table->head = iocb;
@@ -787,7 +787,7 @@ void sdp_iocb_q_cancel(struct sdpc_iocb_
counter < total; counter++) {
next = iocb->next;
- if (0 < (mask & iocb->flags) || SDP_IOCB_F_ALL == mask) {
+ if ((mask & iocb->flags) || msk == SDP_IOCB_F_ALL) {
sdp_dbg_err("IOCB <%d> cancel <%Zu> flag <%04x> "
"size <%Zu:%d:%d>",
iocb->key, comp, iocb->flags, iocb->size,
@@ -796,7 +796,7 @@ void sdp_iocb_q_cancel(struct sdpc_iocb_
sdp_iocb_q_remove(iocb);
result = sdp_iocb_complete(iocb, comp);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
iocb = next;
@@ -823,9 +823,9 @@ void sdp_iocb_q_clear(struct sdpc_iocb_q
/*
* drain the table of any objects
*/
- while (NULL != (iocb = sdp_iocb_q_get_head(table))) {
+ while ((iocb = sdp_iocb_q_get_head(table))) {
result = sdp_iocb_destroy(iocb);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
}
@@ -845,14 +845,14 @@ int sdp_main_iocb_init(void)
* initialize locking code.
*/
result = _sdp_mem_lock_init();
- if (0 > result) {
+ if (result < 0) {
sdp_warn("Error <%d> initializing memory locking.", result);
return result;
}
/*
* initialize the caches only once.
*/
- if (NULL != __sdp_iocb_cache) {
+ if (__sdp_iocb_cache) {
sdp_warn("IOCB caches already initialized.");
return -EINVAL;
}
@@ -861,7 +861,7 @@ int sdp_main_iocb_init(void)
sizeof(struct sdpc_iocb),
0, SLAB_HWCACHE_ALIGN, NULL,
NULL);
- if (NULL == __sdp_iocb_cache) {
+ if (!__sdp_iocb_cache) {
result = -ENOMEM;
goto error_iocb_c;
}
Index: sdp_event.c
===================================================================
--- sdp_event.c (revision 1922)
+++ sdp_event.c (working copy)
@@ -45,7 +45,7 @@ int sdp_cq_event_locked(struct ib_wc *co
{
int result = 0;
- if (0 < (SDP_ST_MASK_ERROR & conn->state)) {
+ if (SDP_ST_MASK_ERROR & conn->state) {
/*
* Ignore events in error state, connection is being
* terminated, connection cleanup will take care of freeing
@@ -102,7 +102,7 @@ int sdp_cq_event_locked(struct ib_wc *co
/*
* release socket before error processing.
*/
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "ABORT on error <%d> event <%u:%llu:%u:%u>",
result,
comp->status,
@@ -115,7 +115,7 @@ int sdp_cq_event_locked(struct ib_wc *co
SDP_CONN_ST_SET(conn, SDP_CONN_ST_ERROR_CQ);
result = sdp_wall_abort(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> during abort", result);
return -EFAULT;
@@ -140,7 +140,7 @@ void sdp_cq_event_handler(struct ib_cq *
* get socket
*/
conn = sdp_conn_table_lookup(hashent);
- if (NULL == conn) {
+ if (!conn) {
sdp_dbg_warn(conn, "Unknown connection <%d> for cq event",
hashent);
goto done;
@@ -157,14 +157,14 @@ void sdp_cq_event_handler(struct ib_cq *
* has been made, the act of unlocking the connection will
* drain the CQ.
*/
- if (0 == (SDP_ST_MASK_EVENTS & conn->state)) {
+ if (!(SDP_ST_MASK_EVENTS & conn->state)) {
/*
* passive and active connect respectively
*/
- if (SDP_CONN_ST_REP_SENT == conn->state ||
- SDP_CONN_ST_RTU_SENT == conn->state) {
+ if (conn->state == SDP_CONN_ST_REP_SENT ||
+ conn->state == SDP_CONN_ST_RTU_SENT) {
result = ib_cm_establish(conn->cm_id);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
else
sdp_dbg_warn(conn, "Unexpected locked state.");
@@ -173,7 +173,7 @@ void sdp_cq_event_handler(struct ib_cq *
goto unlock;
}
- if (0 == conn->lock.users)
+ if (!conn->lock.users)
/*
* dispatch CQ completions.
*/
@@ -208,7 +208,7 @@ static int _sdp_cm_idle(struct ib_cm_id
int result = 0;
int expect;
- if (NULL == conn)
+ if (!conn)
return -EINVAL;
/*
* IDLE should only be called after some other action on the comm_id,
@@ -231,7 +231,7 @@ static int _sdp_cm_idle(struct ib_cm_id
break;
case SDP_CONN_ST_REP_SENT: /* passive open, Hello ack msg sent */
result = sdp_wall_recv_failed(conn, ECONNREFUSED);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> receiving CM failed",
result);
@@ -260,7 +260,7 @@ static int _sdp_cm_idle(struct ib_cm_id
* Connection is finally dead. Drop the CM reference
*/
result = sdp_wall_recv_drop(conn);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
break;
default:
@@ -278,7 +278,7 @@ error:
* last attempt to drop the CM reference.
*/
expect = sdp_wall_recv_drop(conn);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
return result;
}
@@ -294,7 +294,7 @@ static int _sdp_cm_established(struct ib
int expect;
struct sdpc_buff *buff;
- if (NULL == conn)
+ if (!conn)
return -EINVAL;
sdp_dbg_ctrl(conn, "CM ESTABLISHED. commID <%08x>", cm_id->local_id);
@@ -308,7 +308,7 @@ static int _sdp_cm_established(struct ib
switch (conn->state) {
case SDP_CONN_ST_REP_SENT: /* passive open, Hello ack msg sent */
buff = sdp_buff_q_get_head(&conn->send_post);
- if (NULL == buff)
+ if (!buff)
sdp_dbg_warn(conn, "hello ack missing in send pool");
else
(void)sdp_buff_pool_put(buff);
@@ -316,7 +316,7 @@ static int _sdp_cm_established(struct ib
SDP_CONN_ST_SET(conn, SDP_CONN_ST_ESTABLISHED);
result = sdp_wall_recv_confirm(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> confirming conn state",
result);
/*
@@ -327,14 +327,14 @@ static int _sdp_cm_established(struct ib
}
result = sdp_send_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing send queue.",
result);
goto error;
}
result = sdp_recv_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing receives.",
result);
goto error;
@@ -355,7 +355,7 @@ static int _sdp_cm_established(struct ib
/* active open, and active close, confirm */
case SDP_CONN_ST_DIS_PEND_2:
result = sdp_send_flush(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> flushing receives.",
result);
goto error;
@@ -369,7 +369,7 @@ static int _sdp_cm_established(struct ib
* existing state correctly.
*/
result = sdp_cm_disconnect(conn);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(conn, "Error <%d> posting CM disconnect",
result);
@@ -386,7 +386,7 @@ static int _sdp_cm_established(struct ib
return 0;
error:
expect = sdp_wall_recv_drop(conn);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
done:
conn->cm_id = NULL;
return result;
@@ -402,7 +402,7 @@ static int _sdp_cm_timewait(struct ib_cm
int result = 0;
int expect;
- if (NULL == conn)
+ if (!conn)
return -EINVAL;
sdp_dbg_ctrl(conn, "CM TIME WAIT. commID <%08x> event <%d>",
@@ -452,7 +452,7 @@ static int _sdp_cm_timewait(struct ib_cm
SDP_CONN_ST_SET(conn, SDP_CONN_ST_TIME_WAIT_1);
result = sdp_wall_abort(conn);
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> during abort", result);
goto error;
}
@@ -469,7 +469,7 @@ static int _sdp_cm_timewait(struct ib_cm
return 0;
error:
expect = sdp_wall_recv_drop(conn);
- SDP_EXPECT(!(0 > expect));
+ SDP_EXPECT(expect >= 0);
conn->cm_id = NULL;
return result;
@@ -495,7 +495,7 @@ int sdp_cm_event_handler(struct ib_cm_id
* lookup the connection, on a REQ_RECV the sk will be empty.
*/
conn = sdp_conn_table_lookup(hashent);
- if (NULL != conn)
+ if (conn)
sdp_conn_lock(conn);
else
if (IB_CM_REQ_RCVD != cm_id->state)
@@ -527,8 +527,8 @@ int sdp_cm_event_handler(struct ib_cm_id
/*
* if a socket was found, release the lock, and put the reference.
*/
- if (NULL != conn) {
- if (0 > result) {
+ if (conn) {
+ if (result < 0) {
sdp_dbg_warn(conn,
"CM state <%d> event <%d> error <%d>",
cm_id->state, event->event, result);
Index: sdp_buff.c
===================================================================
--- sdp_buff.c (revision 1922)
+++ sdp_buff.c (working copy)
@@ -51,7 +51,7 @@ static inline struct sdpc_buff *_sdp_buf
{
struct sdpc_buff *buff;
- if (NULL == pool->head)
+ if (!pool->head)
return NULL;
if (fifo)
@@ -59,7 +59,7 @@ static inline struct sdpc_buff *_sdp_buf
else
buff = pool->head->prev;
- if (NULL == test_func || 0 == test_func(buff, usr_arg)) {
+ if (!test_func || !test_func(buff, usr_arg)) {
if (buff->next == buff && buff->prev == buff)
pool->head = NULL;
else {
@@ -89,10 +89,10 @@ static inline int _sdp_buff_q_put(struct
int fifo)
{
/* fifo: false == tail, true == head */
- if (NULL != buff->pool)
+ if (buff->pool)
return -EINVAL;
- if (NULL == pool->head) {
+ if (!pool->head) {
buff->next = buff;
buff->prev = buff;
pool->head = buff;
@@ -119,7 +119,7 @@ static inline int _sdp_buff_q_put(struct
static inline struct sdpc_buff *_sdp_buff_q_look(struct sdpc_buff_q *pool,
int fifo)
{
- if (NULL == pool->head || fifo)
+ if (!pool->head || fifo)
return pool->head;
else
return pool->head->prev;
@@ -236,21 +236,21 @@ struct sdpc_buff *sdp_buff_q_fetch(struc
/*
* check to see if there is anything to traverse.
*/
- if (NULL != pool->head)
+ if (pool->head)
/*
* lock to prevent corruption of table
*/
for (counter = 0, buff = pool->head;
counter < pool->size; counter++, buff = buff->next) {
result = test(buff, usr_arg);
- if (0 < result) {
+ if (result > 0) {
result = _sdp_buff_q_remove(pool, buff);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
return buff;
}
- if (0 > result)
+ if (result < 0)
break;
}
@@ -272,7 +272,7 @@ int sdp_buff_q_trav_head(struct sdpc_buf
/*
* check to see if there is anything to traverse.
*/
- if (NULL != pool->head)
+ if (pool->head)
/*
* lock to prevent corruption of table
*/
@@ -280,7 +280,7 @@ int sdp_buff_q_trav_head(struct sdpc_buf
counter < pool->size; counter++, buff = buff->next) {
result = trav_func(buff, usr_arg);
- if (0 > result)
+ if (result < 0)
break;
}
@@ -324,13 +324,13 @@ void sdp_buff_q_clear_unmap(struct sdpc_
int result;
struct sdpc_buff *buff;
- while (NULL != (buff = _sdp_buff_q_get(pool, 0, NULL, NULL))) {
+ while ((buff = _sdp_buff_q_get(pool, 0, NULL, NULL))) {
if (dev)
dma_unmap_single(dev, buff->real,
buff->tail - buff->data, direction);
result = sdp_buff_pool_put(buff);
- if (0 > result)
+ if (result < 0)
sdp_dbg_err("Error <%d> returning buffer to main",
result);
}
@@ -352,7 +352,7 @@ static void _sdp_buff_pool_release(struc
*/
while (count--) {
buff = sdp_buff_q_get(&m_pool->pool);
- if (NULL == buff)
+ if (!buff)
break;
/*
* decrement global buffer count, free buffer page, and free
@@ -408,14 +408,14 @@ static int _sdp_buff_pool_alloc(struct s
* the pool.
*/
buff = kmem_cache_alloc(m_pool->buff_cache, GFP_ATOMIC);
- if (NULL == buff) {
+ if (!buff) {
sdp_warn("Failed to allocate buffer. <%d:%d>",
total, m_pool->buff_cur);
break;
}
buff->head = (void *)__get_free_page(GFP_ATOMIC);
- if (NULL == buff->head) {
+ if (!buff->head) {
sdp_warn("Failed to allocate buffer page. <%d:%d>",
total, m_pool->buff_cur);
@@ -434,7 +434,7 @@ static int _sdp_buff_pool_alloc(struct s
buff->release = sdp_buff_pool_put;
result = sdp_buff_q_put(&m_pool->pool, buff);
- if (0 > result) {
+ if (result < 0) {
sdp_warn("Failed to queue buffer. <%d>", result);
free_page((unsigned long)buff->head);
@@ -445,7 +445,7 @@ static int _sdp_buff_pool_alloc(struct s
m_pool->buff_cur++;
}
- if (NULL == main_pool->pool.head) {
+ if (!main_pool->pool.head) {
sdp_warn("Failed to allocate any buffers. <%d:%d:%d>",
total, m_pool->buff_cur, m_pool->alloc_inc);
@@ -465,14 +465,14 @@ int sdp_buff_pool_init(int buff_min,
{
int result;
- if (NULL != main_pool) {
+ if (main_pool) {
sdp_warn("Main pool already initialized!");
return -EEXIST;
}
- if (!(0 < buff_min) ||
- !(0 < alloc_inc) ||
- !(0 < free_mark) ||
+ if (buff_min <= 0 ||
+ alloc_inc <= 0 ||
+ free_mark <= 0 ||
buff_max < buff_min) {
sdp_warn("Pool allocation count error. <%d:%d:%d:%d>",
@@ -483,7 +483,7 @@ int sdp_buff_pool_init(int buff_min,
* allocate the main pool structures
*/
main_pool = kmalloc(sizeof(struct sdpc_buff_root), GFP_KERNEL);
- if (NULL == main_pool) {
+ if (!main_pool) {
sdp_warn("Main pool initialization failed.");
result = -ENOMEM;
goto done;
@@ -504,7 +504,7 @@ int sdp_buff_pool_init(int buff_min,
sizeof(struct sdpc_buff_q),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (NULL == main_pool->pool_cache) {
+ if (!main_pool->pool_cache) {
sdp_warn("Failed to allocate pool cache.");
result = -ENOMEM;
goto error_pool;
@@ -514,7 +514,7 @@ int sdp_buff_pool_init(int buff_min,
sizeof(struct sdpc_buff),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
- if (NULL == main_pool->buff_cache) {
+ if (!main_pool->buff_cache) {
sdp_warn("Failed to allocate buffer cache.");
result = -ENOMEM;
goto error_buff;
@@ -523,7 +523,7 @@ int sdp_buff_pool_init(int buff_min,
* allocate the minimum number of buffers.
*/
result = _sdp_buff_pool_alloc(main_pool);
- if (0 > result) {
+ if (result < 0) {
sdp_warn("Error <%d> allocating buffers. <%d>",
result, buff_min);
goto error_alloc;
@@ -551,7 +551,7 @@ done:
*/
void sdp_buff_pool_destroy(void)
{
- if (NULL == main_pool) {
+ if (!main_pool) {
sdp_warn("Main pool dosn't exist.");
return;
}
@@ -595,9 +595,9 @@ struct sdpc_buff *sdp_buff_pool_get(void
*/
spin_lock_irqsave(&main_pool->lock, flags);
- if (NULL == main_pool->pool.head) {
+ if (!main_pool->pool.head) {
result = _sdp_buff_pool_alloc(main_pool);
- if (0 > result) {
+ if (result < 0) {
sdp_warn("Error <%d> allocating buffers.", result);
spin_unlock_irqrestore(&main_pool->lock, flags);
return NULL;
@@ -642,10 +642,10 @@ int sdp_buff_pool_put(struct sdpc_buff *
{
unsigned long flags;
- if (NULL == buff || NULL != buff->pool)
+ if (!buff || buff->pool)
return -EINVAL;
- if (NULL != buff->next || NULL != buff->prev)
+ if (buff->next || buff->prev)
return -ETOOMANYREFS;
/*
* reset pointers
@@ -656,7 +656,7 @@ int sdp_buff_pool_put(struct sdpc_buff *
spin_lock_irqsave(&main_pool->lock, flags);
- if (NULL == main_pool->pool.head) {
+ if (!main_pool->pool.head) {
buff->next = buff;
buff->prev = buff;
main_pool->pool.head = buff;
@@ -686,7 +686,7 @@ void sdp_buff_pool_chain_link(struct sdp
buff->tail = buff->head;
buff->pool = &main_pool->pool;
- if (NULL == head) {
+ if (!head) {
buff->next = buff;
buff->prev = buff;
} else {
@@ -712,12 +712,12 @@ int sdp_buff_pool_chain_put(struct sdpc_
* a number of buffers are processed in a loop, before being
* returned. (e.g. send completions, recv to userspace.
*/
- if (NULL == buff || !(0 < count))
+ if (!buff || count <= 0)
return -EINVAL;
spin_lock_irqsave(&main_pool->lock, flags);
- if (NULL == main_pool->pool.head)
+ if (!main_pool->pool.head)
main_pool->pool.head = buff;
else {
prev = buff->prev;
@@ -746,7 +746,7 @@ int sdp_buff_pool_buff_size(void)
{
int result;
- if (NULL == main_pool)
+ if (!main_pool)
result = -1;
else
result = main_pool->buff_size;
@@ -774,7 +774,7 @@ int sdp_proc_dump_buff_pool(char *buffer
*/
spin_lock_irqsave(&main_pool->lock, flags);
- if (0 == start_index) {
+ if (!start_index) {
offset += sprintf((buffer + offset),
" buffer size: %8d\n",
main_pool->buff_size);
Index: sdp_queue.c
===================================================================
--- sdp_queue.c (revision 1922)
+++ sdp_queue.c (working copy)
@@ -46,7 +46,7 @@ static struct sdpc_desc *_sdp_desc_q_get
{
struct sdpc_desc *element;
- if (NULL == table->head)
+ if (!table->head)
return NULL;
if (fifo)
@@ -84,10 +84,10 @@ static inline int _sdp_desc_q_put(struct
/*
* fifo: false == tail, true == head
*/
- if (NULL != element->table)
+ if (element->table)
return -EINVAL;
- if (NULL == table->head) {
+ if (!table->head) {
element->next = element;
element->prev = element;
table->head = element;
@@ -158,7 +158,7 @@ struct sdpc_desc *sdp_desc_q_lookup(stru
for (counter = 0, element = table->head;
counter < table->size; counter++, element = element->next)
- if (0 == lookup(element, arg))
+ if (!lookup(element, arg))
return element;
return NULL;
@@ -211,7 +211,7 @@ struct sdpc_desc *sdp_desc_q_look_head(s
*/
int sdp_desc_q_type_head(struct sdpc_desc_q *table)
{
- if (NULL == table->head)
+ if (!table->head)
return SDP_DESC_TYPE_NONE;
else
return table->head->type;
@@ -223,7 +223,7 @@ int sdp_desc_q_type_head(struct sdpc_des
struct sdpc_desc *sdp_desc_q_look_type_head(struct sdpc_desc_q *table,
enum sdp_desc_type type)
{
- if (NULL == table->head)
+ if (!table->head)
return NULL;
else
return ((type == table->head->type) ? table->head : NULL);
@@ -235,7 +235,7 @@ struct sdpc_desc *sdp_desc_q_look_type_h
struct sdpc_desc *sdp_desc_q_look_type_tail(struct sdpc_desc_q *table,
enum sdp_desc_type type)
{
- if (NULL == table->head)
+ if (!table->head)
return NULL;
else
return ((type == table->head->prev->type) ?
@@ -274,9 +274,9 @@ void sdp_desc_q_clear(struct sdpc_desc_q
/*
* drain the table of any objects
*/
- while (NULL != (element = sdp_desc_q_get_head(table)))
- if (NULL != element->release) {
+ while ((element = sdp_desc_q_get_head(table)))
+ if (element->release) {
result = element->release(element);
- SDP_EXPECT(!(0 > result));
+ SDP_EXPECT(result >= 0);
}
}
Index: sdp_post.c
===================================================================
--- sdp_post.c (revision 1922)
+++ sdp_post.c (working copy)
@@ -41,12 +41,12 @@ int sdp_cm_listen_start(struct sdev_root
{
int result = 0;
- if (NULL != dev_root->listen_id)
+ if (dev_root->listen_id)
sdp_dbg_warn(NULL, "Already listening for connections.");
dev_root->listen_id = ib_create_cm_id(sdp_cm_event_handler,
(void *)SDP_DEV_SK_INVALID);
- if (NULL == dev_root->listen_id)
+ if (!dev_root->listen_id)
return -ENOMEM;
/*
* start listening
@@ -54,7 +54,7 @@ int sdp_cm_listen_start(struct sdev_root
result = ib_cm_listen(dev_root->listen_id,
cpu_to_be64(SDP_MSG_SERVICE_ID_VALUE),
cpu_to_be64(SDP_MSG_SERVICE_ID_MASK));
- if (0 > result) {
+ if (result < 0) {
sdp_dbg_warn(NULL, "Error <%d> listening for SDP connections",
result);
@@ -74,7 +74,7 @@ int sdp_cm_listen_stop(struct sdev_root
int result = 0;
result = ib_destroy_cm_id(dev_root->listen_id);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(NULL, "Error <%d> stopping listen", result);
else
sdp_dbg_init("Stopped listening for SDP connections");
@@ -92,7 +92,7 @@ static void _sdp_cm_disconnect(void *arg
struct sdp_opt *conn = (struct sdp_opt *)arg;
int result;
- if (NULL == conn) {
+ if (!conn) {
sdp_dbg_warn(NULL, "Error, posting disconnect for NULL conn");
return;
}
@@ -102,7 +102,7 @@ static void _sdp_cm_disconnect(void *arg
* send a disconnect request using the connection manager
*/
result = ib_send_cm_dreq(conn->cm_id, NULL, 0);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(NULL, "Error <%d> CM disconnect request", result);
sdp_conn_put(conn);
@@ -116,7 +116,7 @@ static void _sdp_cm_reject(void *arg)
struct sdp_opt *conn = (struct sdp_opt *)arg;
int result;
- if (NULL == conn)
+ if (!conn)
sdp_dbg_warn(NULL, "Error, posting reject for NULL conn");
sdp_dbg_ctrl(conn, "Defered reject <%08x>", conn->cm_id->local_id);
@@ -126,7 +126,7 @@ static void _sdp_cm_reject(void *arg)
result = ib_send_cm_rej(conn->cm_id,
IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
- if (0 > result)
+ if (result < 0)
sdp_dbg_warn(NULL, "Error <%d> CM reject request", result);
sdp_conn_put(conn);
--
MST - Michael S. Tsirkin
More information about the general
mailing list