[openib-general] [PATCH applied] sdp: kill all trailing whitespace
Michael S. Tsirkin
mst at mellanox.co.il
Tue Aug 9 05:34:03 PDT 2005
The following is applied in rev 3033.
---
Kill all trailing whitespace in SDP. There's no reason to keep it around.
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_write.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_write.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_write.c (working copy)
@@ -116,12 +116,12 @@ int sdp_event_write(struct sdp_sock *con
iocb = (struct sdpc_iocb *)sdp_desc_q_look_type_head(&conn->send_queue,
SDP_DESC_TYPE_IOCB);
if (!iocb) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"No IOCB on write complete <%llu:%d:%d>",
(unsigned long long)comp->wr_id,
sdp_desc_q_size(&conn->w_snk),
sdp_desc_q_size(&conn->send_queue));
-
+
result = -EPROTO;
goto error;
}
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_link.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_link.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_link.c (working copy)
@@ -82,12 +82,12 @@ static void sdp_link_path_complete(u64 i
/*
* call completion function
*/
- func(id,
+ func(id,
status,
info->dst,
info->src,
info->port,
- info->ca,
+ info->ca,
&info->path,
arg);
@@ -136,7 +136,7 @@ static void sdp_path_wait_destroy(struct
static void sdp_path_wait_complete(struct sdp_path_wait *wait,
struct sdp_path_info *info, int status)
{
- sdp_link_path_complete(wait->id,
+ sdp_link_path_complete(wait->id,
status,
info,
wait->completion,
@@ -267,7 +267,7 @@ static void sdp_link_path_rec_done(int s
IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY |
IB_SA_PATH_REC_NUMB_PATH),
- info->sa_time,
+ info->sa_time,
GFP_KERNEL,
sdp_link_path_rec_done,
info,
@@ -560,8 +560,8 @@ int sdp_link_path_lookup(u32 dst_addr,
sdp_dbg_warn(NULL, "Failed to create path object");
return -ENOMEM;
}
-
- info->src = src_addr; /* source is used in lookup and
+
+ info->src = src_addr; /* source is used in lookup and
populated by routing lookup */
}
/*
@@ -609,13 +609,13 @@ static void sdp_link_sweep(void *data)
struct sdp_path_info *info;
struct sdp_path_info *sweep;
- sweep = info_list;
+ sweep = info_list;
while (sweep) {
info = sweep;
sweep = sweep->next;
if (jiffies > (info->use + SDP_LINK_INFO_TIMEOUT)) {
- sdp_dbg_ctrl(NULL,
+ sdp_dbg_ctrl(NULL,
"info delete <%d.%d.%d.%d> <%lu:%lu>",
(info->dst & 0x000000ff),
(info->dst & 0x0000ff00) >> 8,
@@ -762,7 +762,7 @@ int sdp_link_addr_init(void)
result = -ENOMEM;
goto error_wq;
}
-
+
INIT_WORK(&link_timer, sdp_link_sweep, NULL);
queue_delayed_work(link_wq, &link_timer, SDP_LINK_SWEEP_INTERVAL);
/*
@@ -771,7 +771,7 @@ int sdp_link_addr_init(void)
* completed.
*/
dev_add_pack(&sdp_arp_type);
-
+
return 0;
error_wq:
kmem_cache_destroy(wait_cache);
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_rcvd.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_rcvd.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_rcvd.c (working copy)
@@ -63,7 +63,7 @@ static int sdp_rcvd_disconnect(struct sd
*/
result = ib_send_cm_dreq(conn->cm_id, NULL, 0);
/*
- * if the remote DREQ was already received, but unprocessed,
+ * if the remote DREQ was already received, but unprocessed,
* do not treat it as an error
*/
if (result) {
@@ -135,7 +135,7 @@ static int sdp_rcvd_send_sm(struct sdp_s
* using buffered mode
* 2) Conn is in source cancel, and this message acks the cancel.
* Release all active IOCBs in the source queue.
- * 3) Conn is in source cancel, but this message doesn't ack the
+ * 3) Conn is in source cancel, but this message doesn't ack the
* cancel.
*
* Do nothing, can't send since the IOCB is being cancelled, but
@@ -355,7 +355,7 @@ static int sdp_rcvd_mode_change(struct s
/* if */
/*
* drop all srcAvail message, they will be reissued, with
- * combined mode constraints. No snkAvails outstanding on
+ * combined mode constraints. No snkAvails outstanding on
* this half of the connection. How do I know which srcAvail
* RDMA's completed?
*/
@@ -422,7 +422,7 @@ static int sdp_rcvd_src_cancel(struct sd
} else {
result = sdp_send_ctrl_rdma_rd(conn, advt->post);
if (result < 0) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> read completion",
result);
goto done;
@@ -668,7 +668,7 @@ static int sdp_rcvd_snk_avail(struct sdp
goto consume;
}
/*
- * If there are outstanding SrcAvail messages, they are now
+ * If there are outstanding SrcAvail messages, they are now
* invalid and the queue needs to be fixed up.
*/
if (conn->src_sent > 0) {
@@ -690,7 +690,7 @@ static int sdp_rcvd_snk_avail(struct sdp
sdp_iocb_complete(iocb, 0);
}
/*
- * If Source Cancel was in process, it should now
+ * If Source Cancel was in process, it should now
* be cleared.
*/
if (conn->flags & SDP_CONN_F_SRC_CANCEL_L) {
@@ -713,7 +713,7 @@ static int sdp_rcvd_snk_avail(struct sdp
advt->rkey = snkah->r_key;
conn->snk_recv++;
-
+
conn->s_cur_adv = 1;
conn->s_par_adv = 0;
@@ -740,7 +740,7 @@ consume:
result);
} else
result = 0;
-
+
/*
* PostRecv will take care of consuming this advertisment, based
* on result.
@@ -768,7 +768,7 @@ static int sdp_rcvd_src_avail(struct sdp
if (conn->snk_sent > 0) {
/*
- * crossed SrcAvail and SnkAvail, the source message is
+ * crossed SrcAvail and SnkAvail, the source message is
* discarded.
*/
sdp_dbg_data(conn, "avail cross<%d> dropping src. mode <%d>",
@@ -811,12 +811,12 @@ static int sdp_rcvd_src_avail(struct sdp
goto done;
}
/*
- * consume the advertisment, if it's allowed, first check the recv
+ * consume the advertisment, if it's allowed, first check the recv
* path mode to determine if all is cool for the advertisment.
*/
switch (conn->recv_mode) {
case SDP_MODE_BUFF:
- sdp_dbg_warn(conn, "SrcAvail in bad mode. <%d>",
+ sdp_dbg_warn(conn, "SrcAvail in bad mode. <%d>",
conn->recv_mode);
result = -EPROTO;
goto advt_error;
@@ -826,7 +826,7 @@ static int sdp_rcvd_src_avail(struct sdp
if (conn->src_recv > 0 ||
size <= 0 ||
!(srcah->size > size)) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"SrcAvail mode <%d> mismatch. <%d:%d:%d>",
conn->recv_mode, conn->src_recv,
size, srcah->size);
@@ -955,7 +955,7 @@ static int sdp_rcvd_data(struct sdp_sock
* me can dispose of the buffer.
*/
conn->byte_strm += ret_val;
-
+
return ret_val;
}
@@ -1075,7 +1075,7 @@ int sdp_event_recv(struct sdp_sock *conn
}
dma_unmap_single(conn->ca->dma_device,
- buff->sge.addr,
+ buff->sge.addr,
buff->tail - buff->data,
PCI_DMA_FROMDEVICE);
@@ -1090,7 +1090,7 @@ int sdp_event_recv(struct sdp_sock *conn
sdp_msg_net_to_cpu_bsdh(buff->bsdh_hdr);
if (comp->byte_len != buff->bsdh_hdr->size) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"receive event, message size mismatch <%d:%d>",
comp->byte_len, buff->bsdh_hdr->size);
@@ -1101,7 +1101,7 @@ int sdp_event_recv(struct sdp_sock *conn
buff->tail = buff->data + buff->bsdh_hdr->size;
buff->data = buff->data + sizeof(struct msg_hdr_bsdh);
/*
- * Do not update the advertised sequence number, until the
+ * Do not update the advertised sequence number, until the
* SrcAvailCancel message has been processed.
*/
conn->recv_seq = buff->bsdh_hdr->seq_num;
@@ -1122,7 +1122,7 @@ int sdp_event_recv(struct sdp_sock *conn
buff->bsdh_hdr->flags,
buff->bsdh_hdr->mid,
buff->bsdh_hdr->size,
- buff->bsdh_hdr->seq_num,
+ buff->bsdh_hdr->seq_num,
buff->bsdh_hdr->seq_ack);
/*
* fast path data messages
@@ -1163,7 +1163,7 @@ int sdp_event_recv(struct sdp_sock *conn
}
} else
if (result < 0) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"receive event, dispatch error. <%d>",
result);
@@ -1178,7 +1178,7 @@ int sdp_event_recv(struct sdp_sock *conn
sk->sk_data_ready(sk, conn->byte_strm);
}
/*
- * It's possible that a new recv buffer advertisment opened up the
+ * It's possible that a new recv buffer advertisment opened up the
* recv window and we can flush buffered send data
*/
result = sdp_send_flush(conn);
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_inet.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_inet.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_inet.c (working copy)
@@ -184,7 +184,7 @@ static int sdp_inet_disconnect(struct sd
result = sdp_send_ctrl_disconnect(conn);
if (result < 0) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> send disconnect request",
result);
goto error;
@@ -303,7 +303,7 @@ static int sdp_inet_release(struct socke
if (result < 0)
goto done;
/*
- * Skip lingering/canceling if
+ * Skip lingering/canceling if
* non-blocking and not exiting.
*/
if (!(flags & MSG_DONTWAIT) ||
@@ -316,7 +316,7 @@ static int sdp_inet_release(struct socke
&& !(PF_EXITING & current->flags)) {
DECLARE_WAITQUEUE(wait, current);
timeout = sk->sk_lingertime;
-
+
add_wait_queue(sk->sk_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
@@ -789,7 +789,7 @@ static int sdp_inet_accept(struct socket
listen_done:
sdp_conn_unlock(listen_conn);
- sdp_dbg_ctrl(listen_conn,
+ sdp_dbg_ctrl(listen_conn,
"ACCEPT: complete <%d> <%08x:%04x><%08x:%04x>",
(accept_conn ? accept_conn->hashent : SDP_DEV_SK_INVALID),
(accept_sk ? accept_conn->src_addr : 0),
@@ -814,7 +814,7 @@ static int sdp_inet_getname(struct socke
conn = sdp_sk(sk);
sdp_dbg_ctrl(conn, "GETNAME: src <%08x:%04x> dst <%08x:%04x>",
- conn->src_addr, conn->src_port,
+ conn->src_addr, conn->src_port,
conn->dst_addr, conn->dst_port);
addr->sin_family = proto_family;
@@ -889,8 +889,8 @@ static unsigned int sdp_inet_poll(struct
mask |= POLLIN | POLLRDNORM;
/*
- * send EOF _or_ send data space.
- * (Some poll() Linux documentation says that POLLHUP is
+ * send EOF _or_ send data space.
+ * (Some poll() Linux documentation says that POLLHUP is
* incompatible with the POLLOUT/POLLWR flags)
*/
if (SEND_SHUTDOWN & conn->shutdown)
@@ -898,7 +898,7 @@ static unsigned int sdp_inet_poll(struct
else {
/*
* avoid race by setting flags, and only clearing
- * them if the test is passed. Setting after the
+ * them if the test is passed. Setting after the
* test, we can end up with them set and a passing
* test.
*/
@@ -917,10 +917,10 @@ static unsigned int sdp_inet_poll(struct
mask |= POLLPRI;
}
- sdp_dbg_data(conn, "POLL: mask <%08x> flags <%08lx> <%d:%d:%d>",
+ sdp_dbg_data(conn, "POLL: mask <%08x> flags <%08lx> <%d:%d:%d>",
mask, sock->flags, conn->send_buf, conn->send_qud,
sdp_inet_writable(conn));
-
+
return mask;
}
@@ -1051,7 +1051,7 @@ static int sdp_inet_ioctl(struct socket
/*
* sdp_inet_setopt - set a socket option
*/
-static int sdp_inet_setopt(struct socket *sock, int level, int optname,
+static int sdp_inet_setopt(struct socket *sock, int level, int optname,
char __user *optval, int optlen)
{
struct sock *sk;
@@ -1062,7 +1062,7 @@ static int sdp_inet_setopt(struct socket
sk = sock->sk;
conn = sdp_sk(sk);
- sdp_dbg_ctrl(conn, "SETSOCKOPT: level <%d> option <%d>",
+ sdp_dbg_ctrl(conn, "SETSOCKOPT: level <%d> option <%d>",
level, optname);
if (SOL_TCP != level && SOL_SDP != level)
@@ -1272,7 +1272,7 @@ static int sdp_inet_create(struct socket
sdp_dbg_ctrl(NULL, "SOCKET: type <%d> proto <%d> state <%u:%08lx>",
sock->type, protocol, sock->state, sock->flags);
-
+
if (SOCK_STREAM != sock->type ||
(IPPROTO_IP != protocol && IPPROTO_TCP != protocol)) {
sdp_dbg_warn(NULL, "SOCKET: unsupported type/proto. <%d:%d>",
@@ -1366,9 +1366,9 @@ static int __init sdp_init(void)
/*
* buffer memory
*/
- result = sdp_buff_pool_init(buff_min,
- buff_max,
- alloc_inc,
+ result = sdp_buff_pool_init(buff_min,
+ buff_max,
+ alloc_inc,
free_mark);
if (result < 0) {
sdp_warn("Error <%d> initializing buffer pool.", result);
@@ -1378,7 +1378,7 @@ static int __init sdp_init(void)
* connection table
*/
result = sdp_conn_table_init(proto_family,
- conn_size,
+ conn_size,
recv_post_max,
recv_buff_max,
send_post_max,
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_proto.h
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_proto.h (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_proto.h (working copy)
@@ -93,7 +93,7 @@ struct sdpc_buff *sdp_buff_q_fetch(struc
void *arg),
void *usr_arg);
-int sdp_buff_pool_init(int buff_min,
+int sdp_buff_pool_init(int buff_min,
int buff_max,
int alloc_inc,
int free_mark);
@@ -263,7 +263,7 @@ struct sdp_sock *sdp_conn_table_lookup(s
struct sdp_sock *sdp_conn_alloc(unsigned int priority);
int sdp_conn_alloc_ib(struct sdp_sock *conn,
- struct ib_device *device,
+ struct ib_device *device,
u8 hw_port,
u16 pkey);
@@ -325,7 +325,7 @@ int sdp_send_ctrl_abort(struct sdp_sock
int sdp_send_ctrl_send_sm(struct sdp_sock *conn);
int sdp_send_ctrl_snk_avail(struct sdp_sock *conn,
- u32 size,
+ u32 size,
u32 rkey,
u64 addr);
@@ -369,14 +369,14 @@ int sdp_event_write(struct sdp_sock *con
* DATA transport
*/
int sdp_inet_send(struct kiocb *iocb,
- struct socket *sock,
+ struct socket *sock,
struct msghdr *msg,
size_t size);
int sdp_inet_recv(struct kiocb *iocb,
struct socket *sock,
- struct msghdr *msg,
- size_t size,
+ struct msghdr *msg,
+ size_t size,
int flags);
void sdp_iocb_q_cancel_all_read(struct sdp_sock *conn, ssize_t error);
@@ -413,7 +413,7 @@ void sdp_link_addr_cleanup(void);
/*
* Event handling function, demultiplexed base on Message ID
*/
-typedef int (*sdp_event_cb_func)(struct sdp_sock *conn,
+typedef int (*sdp_event_cb_func)(struct sdp_sock *conn,
struct sdpc_buff *buff);
/*
@@ -562,7 +562,7 @@ static inline void sdp_conn_stat_dump(st
#ifdef _SDP_CONN_STATS_REC
int counter;
- sdp_dbg_init("STAT: src <%u> snk <%u>",
+ sdp_dbg_init("STAT: src <%u> snk <%u>",
conn->src_serv, conn->snk_serv);
for (counter = 0; counter < 0x20; counter++)
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_send.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_send.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_send.c (working copy)
@@ -82,7 +82,7 @@ static int sdp_send_buff_post(struct sdp
* the flag. This allows for at least one pending urgent message
* to send early notification.
*/
- if ((conn->flags & SDP_CONN_F_OOB_SEND) &&
+ if ((conn->flags & SDP_CONN_F_OOB_SEND) &&
conn->oob_offset <= 0xFFFF) {
SDP_BSDH_SET_OOB_PEND(buff->bsdh_hdr);
SDP_BUFF_F_SET_SE(buff);
@@ -138,7 +138,7 @@ static int sdp_send_buff_post(struct sdp
result = ib_post_send(conn->qp, &send_param, &bad_wr);
if (result) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> posting send. <%d:%d> <%d:%d:%d>",
result, conn->s_wq_cur, conn->s_wq_size,
sdp_buff_q_size(&conn->send_post),
@@ -625,7 +625,7 @@ static int sdp_send_data_iocb_src(struct
if (len > iocb->len) {
sdp_dbg_warn(conn, "Data <%d:%d:%d> from IOCB <%d:%d>",
len, pos, off,
- iocb->page_count,
+ iocb->page_count,
iocb->page_offset);
result = -EFAULT;
@@ -633,7 +633,7 @@ static int sdp_send_data_iocb_src(struct
}
local_irq_save(flags);
-
+
addr = kmap_atomic(iocb->page_array[pos], KM_IRQ0);
if (!addr) {
result = -ENOMEM;
@@ -711,7 +711,7 @@ static int sdp_send_iocb_buff_write(stru
local_irq_restore(flags);
break;
}
-
+
copy = min((PAGE_SIZE - offset),
(unsigned long)(buff->end - buff->tail));
copy = min((unsigned long)iocb->len, copy);
@@ -838,7 +838,7 @@ static int sdp_send_data_iocb(struct sdp
}
/*
* If there are active sink IOCBs we want to stall, in the
- * hope that a new sink advertisment will arrive, because
+ * hope that a new sink advertisment will arrive, because
* sinks are more efficient.
*/
if (sdp_desc_q_size(&conn->w_snk) ||
@@ -925,7 +925,7 @@ static int sdp_send_data_queue_flush(str
* (positive: no space; negative: error)
*/
while ((element = sdp_desc_q_look_head(&conn->send_queue))) {
-
+
result = sdp_send_data_queue_test(conn, element);
if (result)
break;
@@ -964,7 +964,7 @@ static int sdp_send_data_queue(struct sd
result = sdp_send_ctrl_mode_ch(conn,
SDP_MSG_MCH_PIPE_RECV);
if (result < 0) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> posting mode change",
result);
goto done;
@@ -1096,14 +1096,14 @@ static int sdp_send_ctrl_buff_flush(stru
{
struct sdpc_desc *element;
int result = 0;
-
+
/*
* As long as there are buffers, try to post until a non-zero
* result is generated. (positive: no space; negative: error)
*/
while ((element = sdp_desc_q_look_head(&conn->send_ctrl))) {
- result = sdp_send_ctrl_buff_test(conn,
+ result = sdp_send_ctrl_buff_test(conn,
(struct sdpc_buff *)element);
if (result)
break;
@@ -1259,7 +1259,7 @@ int sdp_send_ctrl_disconnect(struct sdp_
*/
if ((conn->flags & SDP_CONN_F_DIS_HOLD) ||
sdp_desc_q_size(&conn->send_queue) ||
- conn->src_sent)
+ conn->src_sent)
conn->flags |= SDP_CONN_F_DIS_PEND;
else
result = do_send_ctrl_disconnect(conn);
@@ -1685,7 +1685,7 @@ static int sdp_inet_write_cancel(struct
sdp_dbg_ctrl(NULL, "Cancel Write IOCB user <%d> key <%d> flag <%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
-
+
if (!si || !si->sock || !si->sock->sk) {
sdp_warn("Cancel empty write IOCB users <%d> flags <%d:%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
@@ -1714,7 +1714,7 @@ static int sdp_inet_write_cancel(struct
* If active, then place it into the correct active queue
*/
sdp_desc_q_remove((struct sdpc_desc *)iocb);
-
+
if (iocb->flags & SDP_IOCB_F_ACTIVE) {
if (iocb->flags & SDP_IOCB_F_RDMA_W)
sdp_desc_q_put_tail(&conn->w_snk,
@@ -1936,7 +1936,7 @@ int sdp_inet_send(struct kiocb *req, str
sdp_dbg_data(conn, "write IOCB <%d> addr <%p> user <%d> flag <%08lx>",
req->ki_key, msg->msg_iov->iov_base,
req->ki_users, req->ki_flags);
-
+
sdp_conn_lock(conn);
/*
* ESTABLISED and CLOSE can send, while CONNECT and ACCEPTED can
@@ -1983,7 +1983,7 @@ int sdp_inet_send(struct kiocb *req, str
copy = min(copy, sdp_inet_write_space(conn, oob));
#ifndef _SDP_DATA_PATH_NULL
- result = memcpy_fromiovec(buff->tail,
+ result = memcpy_fromiovec(buff->tail,
msg->msg_iov,
copy);
if (result < 0) {
@@ -2095,20 +2095,20 @@ skip: /* entry point for IOCB based tran
iocb->req = req;
iocb->key = req->ki_key;
iocb->addr = (unsigned long)msg->msg_iov->iov_base - copied;
-
+
req->ki_cancel = sdp_inet_write_cancel;
result = sdp_iocb_lock(iocb);
if (result < 0) {
- sdp_dbg_warn(conn, "Error <%d> locking IOCB <%Zu:%d>",
+ sdp_dbg_warn(conn, "Error <%d> locking IOCB <%Zu:%d>",
result, size, copied);
-
+
sdp_iocb_destroy(iocb);
break;
}
SDP_CONN_STAT_WQ_INC(conn, iocb->size);
-
+
conn->send_pipe += iocb->len;
result = sdp_send_data_queue(conn, (struct sdpc_desc *)iocb);
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_conn.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_conn.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_conn.c (working copy)
@@ -106,7 +106,7 @@ void sdp_conn_abort(struct sdp_sock *con
int error = -ECONNRESET;
sdp_dbg_ctrl(conn, "Abort send. src <%08x:%04x> dst <%08x:%04x>",
- conn->src_addr, conn->src_port,
+ conn->src_addr, conn->src_port,
conn->dst_addr, conn->dst_port);
switch (conn->state) {
@@ -121,7 +121,7 @@ void sdp_conn_abort(struct sdp_sock *con
case SDP_CONN_ST_DIS_SEND_2:
case SDP_CONN_ST_DIS_SEND_1:
/*
- * don't touch control queue, diconnect message may
+ * don't touch control queue, diconnect message may
* still be queued.
*/
sdp_desc_q_clear(&conn->send_queue);
@@ -423,13 +423,13 @@ int sdp_inet_port_get(struct sdp_sock *c
INADDR_ANY == look->src_addr ||
conn->src_addr == look->src_addr) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"port rejected. <%04x><%d:%d><%d:%d><%04x><%u:%u>",
port,
sk->sk_bound_dev_if,
srch->sk_bound_dev_if,
sk->sk_reuse,
- srch->sk_reuse,
+ srch->sk_reuse,
look->state,
conn->src_addr,
look->src_addr);
@@ -619,7 +619,7 @@ done:
return conn;
}
-/*
+/*
* Functions to cancel IOCB requests in a conenctions queues.
*/
static int sdp_desc_q_cancel_lookup_func(struct sdpc_desc *element, void *arg)
@@ -826,7 +826,7 @@ void sdp_conn_relock(struct sdp_sock *co
if (1 == result_r) {
result = sdp_cq_event_locked(&entry, conn);
if (result < 0)
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> from event handler.",
result);
@@ -837,7 +837,7 @@ void sdp_conn_relock(struct sdp_sock *co
if (1 == result_s) {
result = sdp_cq_event_locked(&entry, conn);
if (result < 0)
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> from event handler.",
result);
rearm = 1;
@@ -850,17 +850,17 @@ void sdp_conn_relock(struct sdp_sock *co
result = ib_req_notify_cq(conn->recv_cq,
IB_CQ_NEXT_COMP);
if (result)
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> rearming recv CQ",
result);
result = ib_req_notify_cq(conn->send_cq,
IB_CQ_NEXT_COMP);
if (result)
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> rearming send CQ",
result);
-
+
rearm = 0;
} else
break; /* exit CQ handler routine */
@@ -891,7 +891,7 @@ int sdp_conn_cq_drain(struct ib_cq *cq,
result = ib_poll_cq(cq, 1, &entry);
if (1 == result) {
/*
- * dispatch completion, and mark that the CQ needs
+ * dispatch completion, and mark that the CQ needs
* to be armed.
*/
result = sdp_cq_event_locked(&entry, conn);
@@ -909,7 +909,7 @@ int sdp_conn_cq_drain(struct ib_cq *cq,
if (rearm > 0) {
result = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
if (result)
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> rearming CQ",
result);
rearm = 0;
@@ -988,13 +988,13 @@ int sdp_conn_alloc_ib(struct sdp_sock *c
result = -ENOMEM;
goto error_attr;
}
-
+
init_attr = kmalloc(sizeof(*init_attr), GFP_KERNEL);
if (!init_attr) {
result = -ENOMEM;
goto error_param;
}
-
+
memset(qp_attr, 0, sizeof(*qp_attr));
memset(init_attr, 0, sizeof(*init_attr));
/*
@@ -1308,12 +1308,12 @@ error:
"dst address:port src address:port ID comm_id pid " \
" dst guid src guid dlid slid dqpn " \
"sqpn data sent buff'd data rcvd_buff'd " \
- " data written data read src_serv snk_serv\n"
+ " data written data read src_serv snk_serv\n"
#define SDP_PROC_CONN_MAIN_SEP \
"---------------- ---------------- ---- -------- ---- " \
"---------------- ---------------- ---- ---- ------ " \
"------ ---------------- ---------------- " \
- "---------------- ---------------- -------- --------\n"
+ "---------------- ---------------- -------- --------\n"
#define SDP_PROC_CONN_MAIN_FORM \
"%02x.%02x.%02x.%02x:%04x %02x.%02x.%02x.%02x:%04x " \
"%04x %08x %04x %08x%08x %08x%08x %04x %04x " \
@@ -1322,7 +1322,7 @@ error:
/*
* sdp_proc_dump_conn_main - dump the connection table to /proc
*/
-int sdp_proc_dump_conn_main(char *buffer, int max_size, off_t start_index,
+int sdp_proc_dump_conn_main(char *buffer, int max_size, off_t start_index,
long *end_index)
{
struct sdp_sock *conn;
@@ -1352,7 +1352,7 @@ int sdp_proc_dump_conn_main(char *buffer
/*
* loop across connections.
*/
- for (counter = start_index;
+ for (counter = start_index;
counter < dev_root_s.sk_size &&
!(SDP_CONN_PROC_MAIN_SIZE > (max_size - offset));
counter++) {
@@ -1374,7 +1374,7 @@ int sdp_proc_dump_conn_main(char *buffer
((conn->src_addr >> 8) & 0xff),
((conn->src_addr >> 16) & 0xff),
((conn->src_addr >> 24) & 0xff),
- conn->src_port,
+ conn->src_port,
conn->hashent,
conn->cm_id ? conn->cm_id->local_id : 0,
conn->pid,
@@ -1771,7 +1771,7 @@ static void sdp_device_init_one(struct i
/*
* port allocation
*/
- for (port_count = 0;
+ for (port_count = 0;
port_count < device->phys_port_cnt;
port_count++) {
port = kmalloc(sizeof *port, GFP_KERNEL);
@@ -1788,8 +1788,8 @@ static void sdp_device_init_one(struct i
port->index = port_count + 1;
list_add(&port->list, &hca->port_list);
- result = ib_query_gid(hca->ca,
- port->index,
+ result = ib_query_gid(hca->ca,
+ port->index,
0, /* index */
&port->gid);
if (result) {
@@ -1836,7 +1836,7 @@ static void sdp_device_remove_one(struct
sdp_warn("Device <%s> has no HCA info.", device->name);
return;
}
-
+
list_for_each_entry_safe(port, tmp, &hca->port_list, list) {
list_del(&port->list);
kfree(port);
@@ -1890,7 +1890,7 @@ int sdp_conn_table_init(int proto_family
dev_root_s.recv_buff_max = recv_buff_max;
dev_root_s.send_post_max = send_post_max;
dev_root_s.send_buff_max = send_buff_max;
-
+
dev_root_s.send_usig_max = send_usig_max;
/*
* Get HCA/port list
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_actv.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_actv.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_actv.c (working copy)
@@ -252,7 +252,7 @@ static int sdp_cm_hello_ack_check(struct
hello_ack->bsdh.seq_ack);
sdp_dbg_ctrl(NULL, "Hello Ack HAH <%02x:%02x:%08x>",
hello_ack->hah.max_adv,
- hello_ack->hah.version,
+ hello_ack->hah.version,
hello_ack->hah.l_rcv_size);
return 0; /* success */
@@ -354,7 +354,7 @@ static void sdp_cm_path_complete(u64 id,
*/
if (id != conn->plid) {
sdp_dbg_warn(conn, "Path record ID mismatch <%016llx:%016llx>",
- (unsigned long long)id,
+ (unsigned long long)id,
(unsigned long long)conn->plid);
goto done;
}
@@ -530,7 +530,7 @@ int sdp_cm_connect(struct sdp_sock *conn
*/
sdp_conn_hold(conn); /* address resolution reference */
sdp_conn_unlock(conn);
-
+
result = sdp_link_path_lookup(htonl(conn->dst_addr),
htonl(conn->src_addr),
sk_sdp(conn)->sk_bound_dev_if,
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_advt.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_advt.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_advt.c (working copy)
@@ -102,7 +102,7 @@ struct sdpc_advt *sdp_advt_q_look(struct
{
if (list_empty(&table->head))
return NULL;
-
+
return list_entry(table->head.next, struct sdpc_advt, list);
}
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_recv.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_recv.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_recv.c (working copy)
@@ -278,7 +278,7 @@ static int sdp_post_rdma_iocb_src(struct
* if there is no more iocb space queue the it for completion
*/
if (!iocb->len)
- sdp_desc_q_put_tail(&conn->r_src,
+ sdp_desc_q_put_tail(&conn->r_src,
(struct sdpc_desc *)
sdp_iocb_q_get_head(&conn->r_pend));
@@ -487,7 +487,7 @@ int sdp_recv_flush(struct sdp_sock *conn
sdp_buff_q_size(&conn->recv_pool)));
counter -= conn->l_recv_bf;
- counter = min(counter,
+ counter = min(counter,
((s32)conn->recv_cq_size - (s32)conn->l_recv_bf));
while (counter-- > 0) {
@@ -648,7 +648,7 @@ static int sdp_recv_buff_iocb_active(str
result = sdp_read_buff_iocb(iocb, buff);
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> data copy <%d:%u> to IOCB",
- result, iocb->len,
+ result, iocb->len,
(unsigned)(buff->tail - buff->data));
sdp_iocb_q_put_head(&conn->r_snk, iocb);
@@ -692,7 +692,7 @@ static int sdp_recv_buff_iocb_pending(st
result = sdp_read_buff_iocb(iocb, buff);
if (result < 0) {
sdp_dbg_warn(conn, "Error <%d> data copy <%d:%u> to IOCB",
- result, iocb->len,
+ result, iocb->len,
(unsigned)(buff->tail - buff->data));
return result;
}
@@ -790,7 +790,7 @@ int sdp_recv_buff(struct sdp_sock *conn,
break;
if (result < 0) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error <%d> processing IOCB. <%d:%d:%d>",
result, conn->snk_sent,
sdp_iocb_q_size(&conn->r_pend),
@@ -841,7 +841,7 @@ static int sdp_inet_read_cancel(struct k
sdp_dbg_ctrl(NULL, "Cancel Read IOCB. user <%d> key <%d> flag <%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
-
+
if (!si || !si->sock || !si->sock->sk) {
sdp_warn("Cancel empty read IOCB. users <%d> flags <%d:%08lx>",
req->ki_users, req->ki_key, req->ki_flags);
@@ -887,7 +887,7 @@ static int sdp_inet_read_cancel(struct k
result = 0;
}
-
+
goto unlock;
}
@@ -926,10 +926,10 @@ static int sdp_inet_read_cancel(struct k
* source probably will get cancel requests as well.
*/
if (!(conn->flags & SDP_CONN_F_SNK_CANCEL)) {
-
+
result = sdp_send_ctrl_snk_cancel(conn);
SDP_EXPECT(result >= 0);
-
+
conn->flags |= SDP_CONN_F_SNK_CANCEL;
}
@@ -946,7 +946,7 @@ static int sdp_inet_read_cancel(struct k
req->ki_users, req->ki_key, req->ki_flags);
result = -EAGAIN;
-
+
unlock:
sdp_conn_unlock(conn);
done:
@@ -1030,7 +1030,7 @@ static int sdp_inet_recv_urg(struct sock
if (!(flags & MSG_PEEK)) {
conn->rcv_urg_cnt -= 1;
conn->byte_strm -= 1;
-
+
SDP_CONN_STAT_RECV_INC(conn, 1);
/*
* we've potentially emptied a buffer, if
@@ -1057,7 +1057,7 @@ done:
/*
* sdp_inet_recv - recv data from the network to user space
*/
-int sdp_inet_recv(struct kiocb *req, struct socket *sock, struct msghdr *msg,
+int sdp_inet_recv(struct kiocb *req, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk;
@@ -1084,7 +1084,7 @@ int sdp_inet_recv(struct kiocb *req, st
sdp_dbg_data(conn, "state <%08x> size <%Zu> pending <%d> falgs <%08x>",
conn->state, size, conn->byte_strm, flags);
sdp_dbg_data(conn, "read IOCB <%d> addr <%p> users <%d> flags <%08lx>",
- req->ki_key, msg->msg_iov->iov_base,
+ req->ki_key, msg->msg_iov->iov_base,
req->ki_users, req->ki_flags);
/*
@@ -1239,7 +1239,7 @@ int sdp_inet_recv(struct kiocb *req, st
}
}
/*
- * urgent data needs to break up the data stream, regardless
+ * urgent data needs to break up the data stream, regardless
* of low water mark, or whether there is room in the buffer.
*/
if (oob > 0) {
@@ -1282,7 +1282,7 @@ int sdp_inet_recv(struct kiocb *req, st
result = (copied > 0) ? 0 : sock_error(sk);
break;
}
-
+
if (RCV_SHUTDOWN & conn->shutdown) {
result = 0;
break;
@@ -1332,7 +1332,7 @@ int sdp_inet_recv(struct kiocb *req, st
*/
iocb = sdp_iocb_create();
if (!iocb) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Error allocating IOCB <%Zu:%d>",
size, copied);
result = -ENOMEM;
@@ -1351,10 +1351,10 @@ int sdp_inet_recv(struct kiocb *req, st
result = sdp_iocb_lock(iocb);
if (result < 0) {
- sdp_dbg_warn(conn,
- "Error <%d> IOCB lock <%Zu:%d>",
+ sdp_dbg_warn(conn,
+ "Error <%d> IOCB lock <%Zu:%d>",
result, size, copied);
-
+
sdp_iocb_destroy(iocb);
break;
}
@@ -1362,11 +1362,11 @@ int sdp_inet_recv(struct kiocb *req, st
SDP_CONN_STAT_RQ_INC(conn, iocb->size);
sdp_iocb_q_put_tail(&conn->r_pend, iocb);
-
+
ack = 1;
copied = 0; /* copied amount was saved in IOCB. */
result = -EIOCBQUEUED;
-
+
break;
}
}
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_conn.h
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_conn.h (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_conn.h (working copy)
@@ -488,7 +488,7 @@ static inline void sdp_conn_put_light(st
void sdp_conn_put(struct sdp_sock *conn);
-static inline void *hashent_arg(s32 hashent)
+static inline void *hashent_arg(s32 hashent)
{
return (void *)(unsigned long)hashent;
}
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_proc.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_proc.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_proc.c (working copy)
@@ -54,7 +54,7 @@ static int sdp_proc_read_parse(char *pag
#if 0
if (!*start && offset) {
- return 0; /* I'm not sure why this always gets
+ return 0; /* I'm not sure why this always gets
called twice... */
}
#endif
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_pass.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_pass.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_pass.c (working copy)
@@ -46,7 +46,7 @@ int sdp_cm_pass_establish(struct sdp_soc
int result;
sdp_dbg_ctrl(conn, "Passive Establish src <%08x:%04x> dst <%08x:%04x>",
- conn->src_addr, conn->src_port,
+ conn->src_addr, conn->src_port,
conn->dst_addr, conn->dst_port);
/*
* free hello ack message
@@ -162,7 +162,7 @@ static int sdp_cm_accept(struct sdp_sock
*/
sdp_buff_q_put_tail(&conn->send_post, buff);
/*
- * modify QP. INIT->RTR
+ * modify QP. INIT->RTR
*/
qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr) {
@@ -174,7 +174,7 @@ static int sdp_cm_accept(struct sdp_sock
memset(qp_attr, 0, sizeof(*qp_attr));
qp_attr->qp_state = IB_QPS_RTR;
-
+
result = ib_cm_init_qp_attr(conn->cm_id, qp_attr, &qp_mask);
if (result) {
sdp_dbg_warn(conn, "Error <%d> QP attributes for RTR",
@@ -188,7 +188,7 @@ static int sdp_cm_accept(struct sdp_sock
result = ib_modify_qp(conn->qp, qp_attr, qp_mask);
kfree(qp_attr);
-
+
if (result) {
sdp_dbg_warn(conn, "Error <%d> modifying QP to RTR.", result);
goto error;
@@ -343,7 +343,7 @@ static int sdp_cm_hello_check(struct sdp
msg_hello->hh.port,
msg_hello->hh.src.ipv4.addr,
msg_hello->hh.dst.ipv4.addr);
-
+
return 0; /* success */
}
@@ -357,10 +357,10 @@ int sdp_cm_req_handler(struct ib_cm_id *
u16 port;
u32 addr;
- sdp_dbg_ctrl(NULL,
+ sdp_dbg_ctrl(NULL,
"CM REQ. comm <%08x> SID <%016llx> ca <%s> port <%d>",
cm_id->local_id, (unsigned long long)cm_id->service_id,
- event->param.req_rcvd.device->name,
+ event->param.req_rcvd.device->name,
event->param.req_rcvd.port);
/*
* check Hello Header, to determine if we want the connection.
@@ -378,7 +378,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
* first find a listening connection, and check backlog
*/
result = -ECONNREFUSED;
-
+
listen_conn = sdp_inet_listen_lookup(addr, port);
if (!listen_conn) {
/*
@@ -395,7 +395,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
goto done;
if (listen_conn->backlog_cnt > listen_conn->backlog_max) {
- sdp_dbg_ctrl(listen_conn,
+ sdp_dbg_ctrl(listen_conn,
"Listen backlog <%d> too big to accept new conn",
listen_conn->backlog_cnt);
goto done;
@@ -437,7 +437,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
conn->send_size = min((u16)sdp_buff_pool_buff_size(),
(u16)conn->send_size) - SDP_MSG_HDR_SIZE;
- memcpy(&conn->d_gid,
+ memcpy(&conn->d_gid,
&event->param.req_rcvd.remote_ca_guid,
sizeof(conn->d_gid));
/*
@@ -448,7 +448,7 @@ int sdp_cm_req_handler(struct ib_cm_id *
/*
* associate connection with a hca/port, and allocate IB.
*/
- result = sdp_conn_alloc_ib(conn,
+ result = sdp_conn_alloc_ib(conn,
event->param.req_rcvd.device,
event->param.req_rcvd.port,
event->param.req_rcvd.primary_path->pkey);
@@ -502,7 +502,7 @@ done:
sdp_conn_put(listen_conn); /* ListenLookup reference. */
empty:
(void)ib_send_cm_rej(cm_id,
- IB_CM_REJ_CONSUMER_DEFINED,
+ IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
return result;
}
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_proc.h
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_proc.h (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_proc.h (working copy)
@@ -64,7 +64,7 @@ struct sdpc_proc_ent {
struct proc_dir_entry *entry;
int (*read)(char *buffer,
int max_size,
- off_t start,
+ off_t start,
long *end);
};
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_sent.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_sent.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_sent.c (working copy)
@@ -162,7 +162,7 @@ int sdp_event_send(struct sdp_sock *conn
/*
* error
*/
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Send wrid mismatch. <%llu:%llu:%d>",
(unsigned long long)comp->wr_id,
(unsigned long long)buff->wrid,
@@ -256,7 +256,7 @@ int sdp_event_send(struct sdp_sock *conn
sdp_buff_pool_chain_put(head, free_count);
if (free_count <= 0 || conn->send_usig < 0) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"Send processing mismatch. <%llu:%llu:%d:%d>",
(unsigned long long)comp->wr_id,
(unsigned long long)current_wrid,
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_iocb.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_iocb.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_iocb.c (working copy)
@@ -58,17 +58,17 @@ static void do_iocb_unlock(struct sdpc_i
iocb->addr, iocb->size);
while (vma) {
- sdp_dbg_data(NULL,
+ sdp_dbg_data(NULL,
"unmark <%lx> <%p> <%08lx:%08lx> <%08lx> <%ld>",
iocb->addr, vma, vma->vm_start, vma->vm_end,
vma->vm_flags, (long)vma->vm_private_data);
-
+
spin_lock(&iocb->mm->page_table_lock);
/*
* if there are no more references to the vma
*/
vma->vm_private_data--;
-
+
if (!vma->vm_private_data) {
/*
* modify VM flags.
@@ -78,7 +78,7 @@ static void do_iocb_unlock(struct sdpc_i
* adjust locked page count
*/
vma->vm_mm->locked_vm -= ((vma->vm_end -
- vma->vm_start) >>
+ vma->vm_start) >>
PAGE_SHIFT);
}
@@ -107,7 +107,7 @@ void sdp_iocb_unlock(struct sdpc_iocb *i
* spin lock since this could be from interrupt context.
*/
down_write(&iocb->mm->mmap_sem);
-
+
do_iocb_unlock(iocb);
up_write(&iocb->mm->mmap_sem);
@@ -152,7 +152,7 @@ static int sdp_iocb_page_save(struct sdp
if (!iocb->addr_array)
goto err_addr;
- iocb->page_array = kmalloc((sizeof(struct page *) * iocb->page_count),
+ iocb->page_array = kmalloc((sizeof(struct page *) * iocb->page_count),
GFP_KERNEL);
if (!iocb->page_array)
goto err_page;
@@ -182,13 +182,13 @@ static int sdp_iocb_page_save(struct sdp
pud = pud_offset(pgd, addr);
if (!pud || pud_none(*pud))
break;
-
+
pmd = pmd_offset(pud, addr);
if (!pmd || pmd_none(*pmd))
break;
ptep = pte_offset_map(pmd, addr);
- if (!ptep)
+ if (!ptep)
break;
pte = *ptep;
@@ -200,7 +200,7 @@ static int sdp_iocb_page_save(struct sdp
pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
break;
-
+
page = pfn_to_page(pfn);
iocb->page_array[counter] = page;
@@ -208,7 +208,7 @@ static int sdp_iocb_page_save(struct sdp
}
spin_unlock(&iocb->mm->page_table_lock);
-
+
if (size > 0) {
result = -EFAULT;
goto err_find;
@@ -216,7 +216,7 @@ static int sdp_iocb_page_save(struct sdp
return 0;
err_find:
-
+
kfree(iocb->page_array);
iocb->page_array = NULL;
err_page:
@@ -239,7 +239,7 @@ int sdp_iocb_lock(struct sdpc_iocb *iocb
int result = -ENOMEM;
unsigned long addr;
size_t size;
-
+
/*
* mark IOCB as locked. We do not take a reference on the mm, AIO
* handles this for us.
@@ -251,7 +251,7 @@ int sdp_iocb_lock(struct sdpc_iocb *iocb
*/
real_cap = cap_t(current->cap_effective);
cap_raise(current->cap_effective, CAP_IPC_LOCK);
-
+
size = PAGE_ALIGN(iocb->size + (iocb->addr & ~PAGE_MASK));
addr = iocb->addr & PAGE_MASK;
@@ -271,13 +271,13 @@ int sdp_iocb_lock(struct sdpc_iocb *iocb
*/
if (result) {
sdp_dbg_err("VMA lock <%lx:%Zu> error <%d> <%d:%lu:%lu>",
- iocb->addr, iocb->size, result,
+ iocb->addr, iocb->size, result,
iocb->page_count, iocb->mm->locked_vm, limit);
goto err_lock;
}
/*
* look up the head of the vma queue, loop through the vmas, marking
- * them do not copy, reference counting, and saving them.
+ * them do not copy, reference counting, and saving them.
*/
vma = find_vma(iocb->mm, addr);
if (!vma)
@@ -296,13 +296,13 @@ int sdp_iocb_lock(struct sdpc_iocb *iocb
if (PAGE_SIZE < (unsigned long)vma->vm_private_data)
sdp_dbg_err("VMA: private daya in use! <%08lx>",
(unsigned long)vma->vm_private_data);
-
+
vma->vm_flags |= VM_DONTCOPY;
vma->vm_private_data++;
spin_unlock(&iocb->mm->page_table_lock);
- sdp_dbg_data(NULL,
+ sdp_dbg_data(NULL,
"mark <%lx> <0x%p> <%08lx:%08lx> <%08lx> <%ld>",
iocb->addr, vma, vma->vm_start, vma->vm_end,
vma->vm_flags, (long)vma->vm_private_data);
@@ -315,7 +315,7 @@ int sdp_iocb_lock(struct sdpc_iocb *iocb
result = sdp_iocb_page_save(iocb);
if (result) {
- sdp_dbg_err("Error <%d> saving pages for IOCB <%lx:%Zu>",
+ sdp_dbg_err("Error <%d> saving pages for IOCB <%lx:%Zu>",
result, iocb->addr, iocb->size);
goto err_save;
}
@@ -362,9 +362,9 @@ static int sdp_mem_lock_init(void)
struct kallsym_iter *iter;
loff_t pos = 0;
int ret = -EINVAL;
-
+
sdp_dbg_init("Memory Locking initialization.");
-
+
kallsyms = filp_open("/proc/kallsyms", O_RDONLY, 0);
if (!kallsyms) {
sdp_warn("Failed to open /proc/kallsyms");
@@ -444,7 +444,7 @@ int sdp_iocb_register(struct sdpc_iocb *
iocb->page_offset);
goto error;
}
-
+
iocb->l_key = iocb->mem->fmr->lkey;
iocb->r_key = iocb->mem->fmr->rkey;
/*
@@ -501,10 +501,10 @@ static void do_iocb_complete(void *arg)
value = (iocb->post > 0) ? iocb->post : iocb->status;
sdp_dbg_data(NULL, "IOCB complete. <%d:%d:%08lx> value <%ld>",
- iocb->req->ki_users, iocb->req->ki_key,
+ iocb->req->ki_users, iocb->req->ki_key,
iocb->req->ki_flags, value);
/*
- * valid result can be 0 or 1 for complete so
+ * valid result can be 0 or 1 for complete so
* we ignore the value.
*/
(void)aio_complete(iocb->req, value, 0);
@@ -520,7 +520,7 @@ static void do_iocb_complete(void *arg)
void sdp_iocb_complete(struct sdpc_iocb *iocb, ssize_t status)
{
iocb->status = status;
-
+
if (in_atomic() || irqs_disabled()) {
INIT_WORK(&iocb->completion, do_iocb_complete, (void *)iocb);
schedule_work(&iocb->completion);
@@ -684,7 +684,7 @@ static struct sdpc_iocb *sdp_iocb_q_get(
/*
* sdp_iocb_q_put - put the IOCB object at the tables tail
*/
-static void sdp_iocb_q_put(struct sdpc_iocb_q *table,
+static void sdp_iocb_q_put(struct sdpc_iocb_q *table,
struct sdpc_iocb *iocb,
int head)
{
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_event.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_event.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_event.c (working copy)
@@ -48,7 +48,7 @@ int sdp_cq_event_locked(struct ib_wc *co
if (SDP_ST_MASK_CLOSED & conn->state) {
/*
- * Ignore events in closed state, connection is being
+ * Ignore events in closed state, connection is being
* terminated, connection cleanup will take care of freeing
* posted buffers.
*/
@@ -268,7 +268,7 @@ static int sdp_cm_established(struct ib_
*/
result = ib_send_cm_dreq(conn->cm_id, NULL, 0);
if (result) {
- sdp_dbg_warn(conn, "Error <%d> sending CM DREQ",
+ sdp_dbg_warn(conn, "Error <%d> sending CM DREQ",
result);
goto error;
}
@@ -357,7 +357,7 @@ static int sdp_cm_timewait(struct ib_cm_
*/
case SDP_CONN_ST_ESTABLISHED:
/*
- * Change state, so we only need to wait for the abort
+ * Change state, so we only need to wait for the abort
* callback, and idle. Call the abort callback.
*/
SDP_CONN_ST_SET(conn, SDP_CONN_ST_TIME_WAIT_2);
@@ -394,7 +394,7 @@ int sdp_cm_event_handler(struct ib_cm_id
sdp_conn_lock(conn);
else
if (cm_id->state != IB_CM_REQ_RCVD) {
- sdp_dbg_warn(NULL,
+ sdp_dbg_warn(NULL,
"No conn <%d> CM state <%d> event <%d>",
hashent, cm_id->state, event->event);
return -EINVAL;
@@ -430,7 +430,7 @@ int sdp_cm_event_handler(struct ib_cm_id
*/
if (conn) {
if (result < 0 && event->event != IB_CM_TIMEWAIT_EXIT) {
- sdp_dbg_warn(conn,
+ sdp_dbg_warn(conn,
"CM state <%d> event <%d> error <%d>",
cm_id->state, event->event, result);
/*
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_buff.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_buff.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_buff.c (working copy)
@@ -125,7 +125,7 @@ static inline struct sdpc_buff *sdp_buff
/*
* do_buff_q_remove - remove a specific buffer from a specific pool
*/
-static inline void do_buff_q_remove(struct sdpc_buff_q *pool,
+static inline void do_buff_q_remove(struct sdpc_buff_q *pool,
struct sdpc_buff *buff)
{
struct sdpc_buff *prev;
@@ -382,7 +382,7 @@ static int sdp_buff_pool_alloc(struct sd
kmem_cache_free(m_pool->buff_cache, buff);
break;
}
-
+
buff->end = buff->head + PAGE_SIZE;
buff->data = buff->head;
buff->tail = buff->head;
@@ -513,7 +513,7 @@ void sdp_buff_pool_destroy(void)
* Sanity check that the current number of buffers was released.
*/
if (main_pool->buff_cur)
- sdp_warn("Leaking buffers during cleanup. <%d>",
+ sdp_warn("Leaking buffers during cleanup. <%d>",
main_pool->buff_cur);
/*
* free pool cache
@@ -719,7 +719,7 @@ int sdp_proc_dump_buff_pool(char *buffer
spin_lock_irqsave(&main_pool->lock, flags);
if (!start_index) {
- offset += sprintf((buffer + offset),
+ offset += sprintf((buffer + offset),
" buffer size: %8d\n",
main_pool->buff_size);
offset += sprintf((buffer + offset),
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_queue.c
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_queue.c (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_queue.c (working copy)
@@ -144,7 +144,7 @@ void sdp_desc_q_remove(struct sdpc_desc
/*
* sdp_desc_q_lookup - search and return an element from the table
*/
-struct sdpc_desc *sdp_desc_q_lookup(struct sdpc_desc_q *table,
+struct sdpc_desc *sdp_desc_q_lookup(struct sdpc_desc_q *table,
int (*lookup)(struct sdpc_desc *element,
void *arg),
void *arg)
@@ -214,7 +214,7 @@ int sdp_desc_q_type_head(struct sdpc_des
/*
* sdp_desc_q_look_type_head - look at a specific object
*/
-struct sdpc_desc *sdp_desc_q_look_type_head(struct sdpc_desc_q *table,
+struct sdpc_desc *sdp_desc_q_look_type_head(struct sdpc_desc_q *table,
enum sdp_desc_type type)
{
if (!table->head)
Index: linux-kernel/drivers/infiniband/ulp/sdp/sdp_buff.h
===================================================================
--- linux-kernel/drivers/infiniband/ulp/sdp/sdp_buff.h (revision 3032)
+++ linux-kernel/drivers/infiniband/ulp/sdp/sdp_buff.h (working copy)
@@ -69,7 +69,7 @@ struct sdpc_buff {
u32 data_size; /* size of just data in the buffer */
u64 wrid; /* IB work request ID */
/*
- * IB specific data (The main buffer pool sets the lkey when
+ * IB specific data (The main buffer pool sets the lkey when
* it is created)
*/
struct ib_sge sge;
--
MST
More information about the general
mailing list