[openib-general] [PATCH] kill ib_legacy.h
Roland Dreier
roland at topspin.com
Mon Aug 9 21:24:13 PDT 2004
The coup de grace...
Index: src/linux-kernel/infiniband/ulp/dapl/khash.c
===================================================================
--- src/linux-kernel/infiniband/ulp/dapl/khash.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/dapl/khash.c (working copy)
@@ -26,7 +26,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
-#include "ib_legacy_types.h"
#include "ts_kernel_trace.h"
#include "khash.h"
Index: src/linux-kernel/infiniband/ulp/dapl/khash.h
===================================================================
--- src/linux-kernel/infiniband/ulp/dapl/khash.h (revision 619)
+++ src/linux-kernel/infiniband/ulp/dapl/khash.h (working copy)
@@ -24,8 +24,6 @@
#ifndef _KHASH_H
#define _KHASH_H
-#include <ib_legacy_types.h>
-
#define HASH_KEY_SIZE 40
/*
Index: src/linux-kernel/infiniband/ulp/dapl/udapl_mod.c
===================================================================
--- src/linux-kernel/infiniband/ulp/dapl/udapl_mod.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/dapl/udapl_mod.c (working copy)
@@ -39,7 +39,6 @@
#include "vapi_common.h"
#include "evapi.h"
-#include "ib_legacy_types.h"
#include "ts_kernel_trace.h"
#include <ib_verbs.h>
#include "ts_ib_sa_client.h"
Index: src/linux-kernel/infiniband/ulp/dapl/udapl_mod.h
===================================================================
--- src/linux-kernel/infiniband/ulp/dapl/udapl_mod.h (revision 619)
+++ src/linux-kernel/infiniband/ulp/dapl/udapl_mod.h (working copy)
@@ -28,7 +28,6 @@
#include <vapi_types.h>
-#include "ib_legacy_types.h"
#include <ib_verbs.h>
#define UDAPL_DEVNAME "udapl"
Index: src/linux-kernel/infiniband/ulp/ipoib/ip2pr_priv.h
===================================================================
--- src/linux-kernel/infiniband/ulp/ipoib/ip2pr_priv.h (revision 614)
+++ src/linux-kernel/infiniband/ulp/ipoib/ip2pr_priv.h (working copy)
@@ -26,7 +26,6 @@
/*
* topspin generic includes
*/
-#include <ib_legacy_types.h>
#include <trace_codes.h>
#include <trace_masks.h>
#include <ts_kernel_trace.h>
@@ -66,7 +65,6 @@
/*
* topspin IB includes
*/
-#include <ib_legacy_types.h>
#include <ts_ib_cm.h>
#include <ts_ib_sa_client.h>
#include <ts_kernel_trace.h>
Index: src/linux-kernel/infiniband/ulp/ipoib/ip2pr_export.h
===================================================================
--- src/linux-kernel/infiniband/ulp/ipoib/ip2pr_export.h (revision 614)
+++ src/linux-kernel/infiniband/ulp/ipoib/ip2pr_export.h (working copy)
@@ -24,8 +24,6 @@
#ifndef _TS_IP2PR_EXPORT_H
#define _TS_IP2PR_EXPORT_H
-#include <ib_legacy_types.h>
-
/* ------------------------------------------------------------------------- */
/* kernel */
/* ------------------------------------------------------------------------- */
Index: src/linux-kernel/infiniband/ulp/ipoib/ipoib_proto.h
===================================================================
--- src/linux-kernel/infiniband/ulp/ipoib/ipoib_proto.h (revision 576)
+++ src/linux-kernel/infiniband/ulp/ipoib/ipoib_proto.h (working copy)
@@ -25,7 +25,6 @@
#define _IPOIB_PROTO_H
#include <linux/netdevice.h>
-#include <ib_legacy_types.h>
#include <ib_verbs.h>
/* ------------------------------------------------------------------------- */
Index: src/linux-kernel/infiniband/ulp/srp/srp_host.c
===================================================================
--- src/linux-kernel/infiniband/ulp/srp/srp_host.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/srp/srp_host.c (working copy)
@@ -154,7 +154,7 @@
.use_clustering = ENABLE_CLUSTERING,
};
-static int scsi_unload_in_progress = FALSE;
+static int scsi_unload_in_progress = 0;
static unsigned long connection_timeout = 0;
static struct pci_dev *hca_pdev;
@@ -279,7 +279,7 @@
driver_params.num_connections++;
port->num_connections++;
target->ioc->num_connections++;
- target->hard_reject = FALSE;
+ target->hard_reject = 0;
target->hard_reject_count = 0;
target->active_conn = s;
@@ -293,7 +293,7 @@
s->port = port;
s->state = SRP_HOST_LOGIN_INPROGRESS;
- s->redirected = FALSE;
+ s->redirected = 0;
s->path_record_tid = 0;
srp_host_login(s);
@@ -323,7 +323,7 @@
target->active_conn = NULL;
target->timeout = jiffies + connection_timeout;
target->state = TARGET_POTENTIAL_CONNECTION;
- target->need_device_reset = TRUE;
+ target->need_device_reset = 1;
spin_unlock_irqrestore(&target->spin_lock, cpu_flags);
}
@@ -332,14 +332,14 @@
{
srp_target_t *target = s->target;
unsigned long cpu_flags;
- int force_close = FALSE;
+ int force_close = 0;
spin_lock_irqsave(&target->spin_lock, cpu_flags);
if (s->state == SRP_HOST_LOGIN_INPROGRESS) {
driver_params.num_pending_connections--;
} else if (s->state == SRP_HOST_GET_PATH_RECORD) {
- force_close = TRUE;
+ force_close = 1;
driver_params.num_pending_connections--;
} else if (s->state == SRP_UP) {
driver_params.num_active_connections--;
@@ -352,8 +352,8 @@
if (target->state == TARGET_POTENTIAL_CONNECTION)
target->timeout = jiffies + connection_timeout;
target->active_conn = NULL;
- target->need_disconnect = FALSE;
- target->hard_reject = FALSE;
+ target->need_disconnect = 0;
+ target->hard_reject = 0;
s->state = SRP_HOST_LOGOUT_INPROGRESS;
@@ -398,17 +398,17 @@
spin_lock_irqsave(&target->spin_lock, cpu_flags);
- if (srp_pkt->in_use == FALSE) {
+ if (!srp_pkt->in_use) {
TS_REPORT_STAGE(MOD_SRPTP, "srp_pkt already free %d",
srp_pkt->pkt_index);
- } else if (srp_pkt->in_use == TRUE) {
+ } else if (srp_pkt->in_use) {
srp_pkt->scatter_gather_list.address =
(u64) (unsigned long)srp_pkt->data;
srp_pkt->scatter_gather_list.length = srp_cmd_pkt_size;
- srp_pkt->in_use = FALSE;
+ srp_pkt->in_use = 0;
srp_pkt->conn = INVALID_CONN_HANDLE;
srp_pkt->next = target->srp_pkt_free_list;
@@ -429,17 +429,17 @@
} else {
target = (srp_target_t *) srp_pkt->target;
- if (srp_pkt->in_use == FALSE) {
+ if (!srp_pkt->in_use) {
TS_REPORT_STAGE(MOD_SRPTP, "srp_pkt already free %d",
srp_pkt->pkt_index);
- } else if (srp_pkt->in_use == TRUE) {
+ } else if (srp_pkt->in_use) {
srp_pkt->scatter_gather_list.address =
(u64) (unsigned long)srp_pkt->data;
srp_pkt->scatter_gather_list.length = srp_cmd_pkt_size;
- srp_pkt->in_use = FALSE;
+ srp_pkt->in_use = 0;
srp_pkt->conn = INVALID_CONN_HANDLE;
srp_pkt->next = target->srp_pkt_free_list;
@@ -479,7 +479,7 @@
atomic_dec(&target->free_pkt_counter);
srp_pkt->next = NULL;
- srp_pkt->in_use = TRUE;
+ srp_pkt->in_use = 1;
}
spin_unlock_irqrestore(&target->spin_lock, cpu_flags);
@@ -514,7 +514,7 @@
}
}
- return (TS_SUCCESS);
+ return 0;
}
/*
@@ -558,7 +558,7 @@
hca = &hca_params[hca_index];
- if (hca->valid == FALSE)
+ if (!hca->valid)
break;
target->cqs_hndl[hca_index] = ib_create_cq(hca->ca_hndl,
@@ -656,7 +656,7 @@
hca = &hca_params[hca_index];
- if (hca->valid == FALSE)
+ if (!hca->valid)
break;
if (target->srp_pkt_data_mhndl[hca_index])
@@ -685,13 +685,13 @@
target = &srp_targets[target_index];
- if (target->valid == TRUE) {
+ if (target->valid) {
for (hca_index = 0; hca_index < MAX_HCAS; hca_index++) {
hca = &hca_params[hca_index];
- if (hca->valid == FALSE)
+ if (!hca->valid)
break;
ib_dereg_mr(target->srp_pkt_data_mhndl[hca_index]);
@@ -720,10 +720,10 @@
target->target_index = target - &srp_targets[0];
target->state = TARGET_POTENTIAL_CONNECTION;
target->timeout = jiffies + TARGET_POTENTIAL_STARTUP_TIMEOUT;
- target->valid = FALSE;
- target->need_disconnect = FALSE;
- target->need_device_reset = FALSE;
- target->hard_reject = FALSE;
+ target->valid = 0;
+ target->need_disconnect = 0;
+ target->need_device_reset = 0;
+ target->hard_reject = 0;
target->hard_reject_count = 0;
INIT_LIST_HEAD(&target->conn_list);
@@ -982,7 +982,7 @@
if (((target->state > TARGET_INITIALIZED) &&
(target->state < TARGET_ACTIVE_CONNECTION)) &&
- (target->valid == TRUE) && (target->active_conn == NULL)) {
+ (target->valid) && (target->active_conn == NULL)) {
TS_REPORT_STAGE(MOD_SRPTP,
"target %d, no active connection",
@@ -1007,7 +1007,7 @@
port = &hca_params[hca_index].port[port_index];
- port->dm_need_query = TRUE;
+ port->dm_need_query = 1;
}
}
}
@@ -1055,7 +1055,7 @@
int port_index, hca_index;
int dm_query_sum = 0;
unsigned long dm_query_filter_timeout = 0;
- int sweep_targets_for_connections = FALSE;
+ int sweep_targets_for_connections = 0;
srp_target_t *target;
srp_host_hca_params_t *hca;
srp_host_port_params_t *port;
@@ -1088,18 +1088,18 @@
port =
&hca_params[hca_index].
port[port_index];
- if (port->valid == FALSE)
+ if (!port->valid)
break;
srp_port_query_cancel(port);
if (port->out_of_service_xid != 0)
srp_register_out_of_service
- (port, FALSE);
+ (port, 0);
if (port->in_service_xid != 0)
srp_register_in_service(port,
- FALSE);
+ 0);
if (port->dm_query_in_progress) {
TS_REPORT_STAGE(MOD_SRP,
@@ -1123,7 +1123,7 @@
*/
if (driver_params.need_refresh) {
- driver_params.need_refresh = FALSE;
+ driver_params.need_refresh = 0;
/*
* Refresh our local port information
@@ -1147,7 +1147,7 @@
port = &hca_params[hca_index].port[port_index];
- if (port->valid == FALSE)
+ if (!port->valid)
break;
if (port->dm_query_in_progress) {
@@ -1155,14 +1155,14 @@
continue;
}
- if (port->dm_need_query == FALSE)
+ if (!port->dm_need_query)
continue;
if (port->out_of_service_xid == 0)
- srp_register_out_of_service(port, TRUE);
+ srp_register_out_of_service(port, 1);
if (port->in_service_xid == 0)
- srp_register_in_service(port, TRUE);
+ srp_register_in_service(port, 1);
port->dm_retry_count = 0;
@@ -1185,8 +1185,8 @@
"Number of active dm_queries %d",
dm_query_sum);
- sweep_targets_for_connections = TRUE;
- driver_params.port_query = FALSE;
+ sweep_targets_for_connections = 1;
+ driver_params.port_query = 0;
if (dm_query_filter_timeout == 0)
dm_query_filter_timeout =
@@ -1204,12 +1204,12 @@
sweep_targets();
}
- driver_params.dm_active = TRUE;
+ driver_params.dm_active = 1;
- } else if (sweep_targets_for_connections == TRUE) {
- sweep_targets_for_connections = FALSE;
+ } else if (sweep_targets_for_connections) {
+ sweep_targets_for_connections = 0;
dm_query_filter_timeout = 0;
- driver_params.dm_active = FALSE;
+ driver_params.dm_active = 0;
sweep_targets();
}
@@ -1229,7 +1229,7 @@
* Cleanup various disconnect/reconnect methods into
* one method
*/
- if (target->need_disconnect == TRUE) {
+ if (target->need_disconnect) {
remove_connection(target->active_conn,
TARGET_POTENTIAL_CONNECTION);
@@ -1238,17 +1238,17 @@
initialize_connection(target);
- target->need_device_reset = FALSE;
- target->hard_reject = FALSE;
+ target->need_device_reset = 0;
+ target->hard_reject = 0;
}
- if (target->need_device_reset == TRUE) {
+ if (target->need_device_reset) {
struct list_head *conn_entry;
srp_host_conn_t *conn;
- target->need_device_reset = FALSE;
- target->need_disconnect = FALSE;
- target->hard_reject = FALSE;
+ target->need_device_reset = 0;
+ target->need_disconnect = 0;
+ target->hard_reject = 0;
list_for_each(conn_entry, &target->conn_list) {
@@ -1268,12 +1268,12 @@
}
}
- if ((target->hard_reject == TRUE)
+ if ((target->hard_reject)
&& (target->active_conn)) {
- target->need_device_reset = FALSE;
- target->need_disconnect = FALSE;
- target->hard_reject = FALSE;
+ target->need_device_reset = 0;
+ target->need_disconnect = 0;
+ target->hard_reject = 0;
if (target->hard_reject_count++ <
MAX_HARD_REJECT_COUNT) {
@@ -1306,7 +1306,7 @@
srp_host_close_conn(conn);
- srp_dm_kill_ioc(target, FALSE);
+ srp_dm_kill_ioc(target, 0);
pick_connection_path(target);
}
@@ -1368,7 +1368,7 @@
for (hca = &hca_params[0]; hca < &hca_params[MAX_HCAS]; hca++) {
- if (hca->valid == FALSE)
+ if (!hca->valid)
break;
/*
@@ -1546,12 +1546,12 @@
port_index++) {
if (hca_params[hca_index].port[port_index].valid) {
hca_params[hca_index].port[port_index].
- dm_need_query = TRUE;
+ dm_need_query = 1;
}
}
}
- driver_params.need_refresh = TRUE;
+ driver_params.need_refresh = 1;
err = tsKernelThreadStart("ts_srp_dm",
srp_dm_poll_thread,
@@ -1586,7 +1586,7 @@
* (1) timeout to expire
*/
} while (((driver_params.num_active_local_ports == 0) ||
- (driver_params.dm_active == TRUE) ||
+ (driver_params.dm_active) ||
(driver_params.num_pending_connections != 0)) &&
(connections_timeout < (srp_discovery_timeout * HZ)));
@@ -1637,12 +1637,12 @@
int len = 0;
int i;
srp_target_t *target;
- int not_first_entry = FALSE;
+ int not_first_entry = 0;
u8 *gid;
u8 *ioc_guid;
char *buf;
- if (inout == TRUE) {
+ if (inout) {
/* write to proc interface, redistribute connections */
if (!buffer || length >= PAGE_SIZE) {
return (-EINVAL);
@@ -1680,8 +1680,7 @@
target = &srp_targets[i];
- if ((target->valid == FALSE)
- || (target->state != TARGET_ACTIVE_CONNECTION))
+ if (!target->valid || target->state != TARGET_ACTIVE_CONNECTION)
continue;
gid = target->port->local_gid;
@@ -1736,13 +1735,13 @@
for (target = &srp_targets[0];
target < &srp_targets[max_srp_targets]; target++) {
- if (target->valid == TRUE) {
+ if (target->valid) {
/* don't print colon on first guy */
- if (not_first_entry == TRUE) {
+ if (not_first_entry) {
len += sprintf(&buffer[len], ":");
} else {
- not_first_entry = TRUE;
+ not_first_entry = 1;
}
len += sprintf(&buffer[len], "%llx.%x",
@@ -2455,9 +2454,8 @@
/* send the srp packet */
status = srptp_post_recv(recv_pkt);
- if (status == TS_SUCCESS) {
+ if (!status)
status = srptp_post_send(send_pkt);
- }
if (status) {
/* we have a problem posting, disconnect, never should happen */
@@ -2491,7 +2489,7 @@
target->target_index, status);
target->state = TARGET_POTENTIAL_CONNECTION;
- target->need_disconnect = TRUE;
+ target->need_disconnect = 1;
spin_unlock_irqrestore(&target->spin_lock, cpu_flags);
@@ -2499,10 +2497,10 @@
}
SEND_SUCCESS:
- return (TS_SUCCESS);
+ return 0;
SEND_FAIL:
- return (TS_FAIL);
+ return -1;
}
#if 1
@@ -3499,7 +3497,7 @@
}
s->state = SRP_HOST_LOGIN_INPROGRESS;
- s->redirected = TRUE;
+ s->redirected = 1;
srptp_connect(s,
redirected_path_record,
(__u8 *) s->login_buff, s->login_buff_len);
@@ -3624,7 +3622,7 @@
"SRP Target rejected for target %d, redirect %d",
target->target_index, s->redirected);
- target->hard_reject = TRUE;
+ target->hard_reject = 1;
break;
@@ -3761,7 +3759,7 @@
target->target_index);
target->state = TARGET_POTENTIAL_CONNECTION;
- target->need_disconnect = TRUE;
+ target->need_disconnect = 1;
}
spin_unlock_irqrestore(&target->spin_lock, cpu_flags);
@@ -3813,7 +3811,7 @@
/* First unregister, the scsi driver, set a flag to indicate
* to the abort code to complete aborts immediately */
- scsi_unload_in_progress = TRUE;
+ scsi_unload_in_progress = 1;
tsKernelThreadStop(driver_params.thread);
Index: src/linux-kernel/infiniband/ulp/srp/srp_dm.c
===================================================================
--- src/linux-kernel/infiniband/ulp/srp/srp_dm.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/srp/srp_dm.c (working copy)
@@ -36,13 +36,13 @@
for (i = 0; i < MAX_IOCS; i++) {
if ((memcmp(ioc_table[i].guid, guid, sizeof(tTS_IB_GUID)) == 0)
- && (ioc_table[i].valid == TRUE)) {
+ && (ioc_table[i].valid == 1)) {
/* we have a match, return IOC index */
*ioc_index = i;
- return (TS_SUCCESS);
+ return 0;
}
}
- return (TS_FAIL);
+ return -1;
}
int srp_new_ioc(tTS_IB_GUID guid, int *ioc_index)
@@ -50,19 +50,19 @@
int i;
for (i = 0; i < MAX_IOCS; i++) {
- if (ioc_table[i].valid == FALSE) {
+ if (!ioc_table[i].valid) {
TS_REPORT_STAGE(MOD_SRPTP,
"Creating IOC Entry %d for 0x%llx", i,
be64_to_cpu(*(u64 *) guid));
memcpy(ioc_table[i].guid, guid, sizeof(tTS_IB_GUID));
- ioc_table[i].valid = TRUE;
+ ioc_table[i].valid = 1;
*ioc_index = i;
- return (TS_SUCCESS);
+ return 0;
}
}
- return (TS_FAIL);
+ return -1;
}
static void srp_check_ioc_paths(ioc_entry_t * ioc)
@@ -78,24 +78,24 @@
spin_lock_irqsave(&driver_params.spin_lock, cpu_flags);
- path_available = FALSE;
+ path_available = 0;
/* check if it has any valid paths */
for (hca_index = 0; hca_index < MAX_HCAS; hca_index++) {
for (port_index = 0; port_index < MAX_LOCAL_PORTS_PER_HCA;
port_index++) {
- if (ioc->path_valid[hca_index][port_index] == TRUE) {
+ if (ioc->path_valid[hca_index][port_index]) {
/*
* we have at least one path, lets keep the
* IOC
*/
- path_available = TRUE;
+ path_available = 1;
}
}
}
- if (path_available == FALSE) {
+ if (!path_available) {
TS_REPORT_WARN(MOD_SRPTP, "IOC GUID %llx, no available paths",
be64_to_cpu(*(u64 *) ioc->guid));
@@ -103,14 +103,14 @@
* no paths available to this IOC, let's remove it from our
* list
*/
- ioc->valid = FALSE;
+ ioc->valid = 0;
/* loop through all targets, and indicate that this
* ioc_index is not available as a path */
for (target = &srp_targets[0];
target < &srp_targets[max_srp_targets]; target++) {
- target->ioc_mask[ioc_index] = FALSE;
- target->ioc_needs_request[ioc_index] = FALSE;
+ target->ioc_mask[ioc_index] = 0;
+ target->ioc_needs_request[ioc_index] = 0;
}
}
@@ -131,15 +131,15 @@
* the difference is if we get a IOU connection failure versus
* a redirected connection failure
*/
- if (flag == TRUE) {
+ if (flag) {
/*
* this will not cause the path to be lost,
* just asks connection hunt code to skip this IOC
* for this target
*/
- target->ioc_needs_request[ioc - &ioc_table[0]] = FALSE;
+ target->ioc_needs_request[ioc - &ioc_table[0]] = 0;
} else {
- ioc->path_valid[hca_index][port_index] = FALSE;
+ ioc->path_valid[hca_index][port_index] = 0;
srp_check_ioc_paths(ioc);
}
@@ -154,7 +154,7 @@
ioc_entry = &ioc_table[ioc_index];
/* check if the entry is valid */
- if (ioc_entry->valid == FALSE)
+ if (!ioc_entry->valid)
continue;
srp_check_ioc_paths(ioc_entry);
@@ -175,8 +175,8 @@
* Search for IOC guid with lowest connections first
*/
for (ioc_index = 0; ioc_index < MAX_IOCS; ioc_index++) {
- if ((target->ioc_mask[ioc_index] == TRUE) &&
- (target->ioc_needs_request[ioc_index] == TRUE)) {
+ if ((target->ioc_mask[ioc_index]) &&
+ (target->ioc_needs_request[ioc_index])) {
if (ioc_table[ioc_index].num_connections <
connection_count) {
lowest_ioc_entry = &ioc_table[ioc_index];
@@ -203,10 +203,8 @@
* check if the port is valid, the port is up
* and if the IOC can be seen through this port
*/
- if ((port->valid == TRUE) &&
- (lowest_ioc_entry->
- path_valid[hca_index][port_index] ==
- TRUE)) {
+ if (port->valid &&
+ lowest_ioc_entry->path_valid[hca_index][port_index]) {
if (port->num_connections <
connection_count) {
lowest_port = port;
@@ -295,8 +293,8 @@
}
if (port->dm_query_in_progress) {
- port->dm_query_in_progress = FALSE;
- port->dm_need_query = FALSE;
+ port->dm_query_in_progress = 0;
+ port->dm_need_query = 0;
TS_REPORT_STAGE(MOD_SRP, "Canceling DM Query on hca %d port %d",
port->hca->hca_index + 1, port->local_port);
@@ -323,12 +321,12 @@
* flag
*/
if ((*query_entry)->id == id) {
- return (TS_SUCCESS);
+ return 0;
}
}
*query_entry = NULL;
- return (TS_FAIL);
+ return -1;
}
int srp_find_query(srp_host_port_params_t *port, u8 *gid)
@@ -348,16 +346,16 @@
*/
if ((query->port == port) &&
(memcmp(query->remote_gid, gid, sizeof(tTS_IB_GID)) == 0)) {
- query->need_retry = TRUE;
+ query->need_retry = 1;
spin_unlock_irqrestore(&driver_params.spin_lock,
cpu_flags);
- return (TS_SUCCESS);
+ return 0;
}
}
spin_unlock_irqrestore(&driver_params.spin_lock, cpu_flags);
- return (TS_FAIL);
+ return -1;
}
void srp_update_ioc(ioc_entry_t * ioc_entry,
@@ -383,7 +381,7 @@
memcpy(svc_path_record->sgid, port->local_gid, sizeof(tTS_IB_GID));
ioc_entry->path_valid[port->hca->hca_index][port->local_port - 1] =
- TRUE;
+ 1;
TS_REPORT_STAGE(MOD_SRPTP, "Updating IOC on hca %d port %d",
port->hca->hca_index + 1, port->local_port);
@@ -450,7 +448,7 @@
* update the pathing information for the IOC
*/
status = srp_find_ioc(io_svc->controller_guid, &ioc_index);
- if (status == TS_FAIL) {
+ if (status) {
TS_REPORT_STAGE(MOD_SRPTP,
"IOC not found %llx, creating new "
"IOC entry",
@@ -460,7 +458,7 @@
status =
srp_new_ioc(io_svc->controller_guid, &ioc_index);
- if (status == TS_FAIL) {
+ if (status) {
TS_REPORT_STAGE(MOD_SRPTP,
"IOC entry creation failed, "
"too many IOCs");
@@ -525,7 +523,7 @@
* If target wasn't previously discovered
* Allocate packets and mark as in-use.
*/
- if (empty_target->valid == FALSE) {
+ if (!empty_target->valid) {
int status;
status = srp_host_alloc_pkts(empty_target);
@@ -534,15 +532,15 @@
"Could not allocat target %d",
empty_target->target_index);
} else {
- empty_target->valid = TRUE;
+ empty_target->valid = 1;
}
}
/*
* Indicate which IOCs the target/service is visible on
*/
- empty_target->ioc_mask[ioc_index] = TRUE;
- empty_target->ioc_needs_request[ioc_index] = TRUE;
+ empty_target->ioc_mask[ioc_index] = 1;
+ empty_target->ioc_needs_request[ioc_index] = 1;
}
}
@@ -556,7 +554,7 @@
* If we are shuting down, throw away the query
*/
if (driver_params.dm_shutdown) {
- if ((status == TS_SUCCESS) && (io_list)) {
+ if (!status && io_list) {
ib_host_io_list_free(io_list);
}
return;
@@ -564,15 +562,15 @@
down(&driver_params.sema);
- if ((status == TS_SUCCESS) && (io_list == NULL)) {
+ if (!status && !io_list) {
TS_REPORT_STAGE(MOD_SRPTP,
"DM Client Query complete hca %d port %d",
port->hca->hca_index + 1, port->local_port);
- port->dm_query_in_progress = FALSE;
- port->dm_need_query = FALSE;
+ port->dm_query_in_progress = 0;
+ port->dm_need_query = 0;
- if (port->dm_need_retry == TRUE) {
+ if (port->dm_need_retry) {
if (port->dm_retry_count++ < MAX_DM_RETRIES) {
TS_REPORT_WARN(MOD_SRPTP,
@@ -599,7 +597,7 @@
TS_REPORT_WARN(MOD_SRPTP, "DM Client timeout on hca %d port %d",
port->hca->hca_index + 1, port->local_port);
- port->dm_need_retry = TRUE;
+ port->dm_need_retry = 1;
} else if (status) {
/*
@@ -630,7 +628,7 @@
* If we are shuting down, throw away the query
*/
if (driver_params.dm_shutdown) {
- if ((status == TS_SUCCESS) && (io_list)) {
+ if (!status && io_list) {
ib_host_io_list_free(io_list);
}
return;
@@ -649,16 +647,16 @@
port = query_entry->port;
- if ((status == TS_SUCCESS) && (io_list == NULL)) {
+ if (!status && !io_list) {
TS_REPORT_STAGE(MOD_SRPTP,
"Port Query %d complete hca %d port %d",
query_entry->id, port->hca->hca_index + 1,
port->local_port);
- if (query_entry->need_retry == TRUE) {
- query_entry->need_retry = FALSE;
+ if (query_entry->need_retry) {
+ query_entry->need_retry = 0;
- driver_params.port_query = TRUE;
+ driver_params.port_query = 1;
ib_host_io_port_query(query_entry->port->hca->ca_hndl,
query_entry->port->local_port,
@@ -678,13 +676,13 @@
port->local_port);
if (++query_entry->retry < MAX_QUERY_RETRIES) {
- query_entry->need_retry = TRUE;
+ query_entry->need_retry = 1;
} else {
TS_REPORT_WARN(MOD_SRPTP,
"Retries exceeded on hca %d port %d",
port->hca->hca_index + 1,
port->local_port);
- query_entry->need_retry = FALSE;
+ query_entry->need_retry = 0;
}
} else if (status) {
/*
@@ -784,11 +782,11 @@
sizeof(tTS_IB_GID)) == 0) &&
(srp_path_records[index].slid == port->slid)) {
*path_record = &srp_path_records[index];
- return (TS_SUCCESS);
+ return 0;
}
}
- return (TS_FAIL);
+ return -1;
}
void srp_update_cache(struct ib_path_record *path_record,
@@ -824,7 +822,7 @@
status = srp_find_path_record(find_gid, port, &path_record);
- if (status == TS_SUCCESS) {
+ if (!status) {
TS_REPORT_STAGE(MOD_SRPTP, "Found Path Record in cache");
completion_function(TS_IB_CLIENT_QUERY_TID_INVALID,
@@ -924,7 +922,7 @@
query_entry->state = QUERY_PORT_INFO;
- driver_params.port_query = TRUE;
+ driver_params.port_query = 1;
ib_host_io_port_query(port->hca->ca_hndl,
port->local_port,
@@ -980,7 +978,7 @@
/*
* Query already outstanding, do nothing
*/
- if (srp_find_query(port, notified_port_gid) == TS_SUCCESS) {
+ if (!srp_find_query(port, notified_port_gid)) {
up(&driver_params.sema);
return;
}
@@ -1068,7 +1066,7 @@
down(&driver_params.sema);
- if (srp_find_query(srp_port, notified_port_gid) == TS_SUCCESS) {
+ if (!srp_find_query(srp_port, notified_port_gid)) {
up(&driver_params.sema);
return;
}
@@ -1214,7 +1212,7 @@
port->hca->hca_index + 1, status);
if (status == -ETIMEDOUT)
- srp_register_out_of_service(port, TRUE);
+ srp_register_out_of_service(port, 1);
else
TS_REPORT_WARN(MOD_SRPTP, "Unhandled error");
} else {
@@ -1244,7 +1242,7 @@
port->hca->hca_index + 1, status);
if (status == -ETIMEDOUT)
- srp_register_in_service(port, TRUE);
+ srp_register_in_service(port, 1);
else
TS_REPORT_WARN(MOD_SRPTP,
"Unhandled error for in-service "
@@ -1266,7 +1264,7 @@
tTS_IB_SA_NOTICE_HANDLER_FUNC handler;
tTS_IB_INFORM_INFO_SET_COMPLETION_FUNC completion_handler;
- if (port->valid == FALSE)
+ if (!port->valid)
return;
if (flag) {
@@ -1316,7 +1314,7 @@
tTS_IB_SA_NOTICE_HANDLER_FUNC handler;
tTS_IB_INFORM_INFO_SET_COMPLETION_FUNC completion_handler;
- if (port->valid == FALSE)
+ if (!port->valid)
return;
if (flag) {
@@ -1361,13 +1359,13 @@
int srp_dm_query(srp_host_port_params_t * port)
{
- int status = TS_FAIL;
+ int status = -1;
if (port->port_state == IB_PORT_STATE_ACTIVE) {
- port->dm_query_in_progress = TRUE;
+ port->dm_query_in_progress = 1;
- port->dm_need_retry = FALSE;
+ port->dm_need_retry = 0;
TS_REPORT_STAGE(MOD_SRPTP,
"DM Query Initiated on hca %d local port %d",
@@ -1383,7 +1381,7 @@
"tsIbHostIoQuery failed status 0x%x",
status);
- port->dm_query_in_progress = FALSE;
+ port->dm_query_in_progress = 0;
}
}
@@ -1422,15 +1420,15 @@
"Port active event for hca %d port %d",
hca_index + 1, event->modifier.port);
- if (port->valid == FALSE)
+ if (!port->valid)
break;
down(&driver_params.sema);
- port->dm_need_query = TRUE;
+ port->dm_need_query = 1;
if (port->port_state != IB_PORT_ACTIVE) {
- driver_params.need_refresh = TRUE;
+ driver_params.need_refresh = 1;
}
up(&driver_params.sema);
@@ -1453,7 +1451,7 @@
for (port_index = 0;
port_index < MAX_LOCAL_PORTS_PER_HCA;
port_index++) {
- if (hca->port[port_index].valid == FALSE)
+ if (!hca->port[port_index].valid)
break;
event->event = IB_PORT_ERROR;
@@ -1477,7 +1475,7 @@
"Port error event for hca %d port %d",
hca_index + 1, event->modifier.port);
- if (port->valid == FALSE)
+ if (!port->valid)
break;
/*
@@ -1498,7 +1496,7 @@
for (ioc_index = 0; ioc_index < MAX_IOCS; ioc_index++) {
ioc_table[ioc_index].path_valid[hca->
hca_index]
- [port->local_port - 1] = FALSE;
+ [port->local_port - 1] = 0;
}
spin_unlock_irqrestore(&driver_params.spin_lock,
cpu_flags);
@@ -1547,9 +1545,9 @@
up(&target->sema);
}
- srp_register_out_of_service(port, FALSE);
+ srp_register_out_of_service(port, 0);
port->out_of_service_xid = 0;
- srp_register_in_service(port, FALSE);
+ srp_register_in_service(port, 0);
port->in_service_xid = 0;
srp_port_query_cancel(port);
@@ -1605,7 +1603,7 @@
for (hca_index = 0; hca_index < MAX_HCAS; hca_index++) {
hca = &hca_params[hca_index];
- if (hca->valid == FALSE)
+ if (!hca->valid)
break;
TS_REPORT_STAGE(MOD_SRPTP,
@@ -1656,7 +1654,7 @@
for (hca_index = 0; hca_index < MAX_HCAS; hca_index++) {
hca = &hca_params[hca_index];
- if (hca->valid == FALSE)
+ if (!hca->valid)
break;
TS_REPORT_STAGE(MOD_SRPTP,
Index: src/linux-kernel/infiniband/ulp/srp/srp_host.h
===================================================================
--- src/linux-kernel/infiniband/ulp/srp/srp_host.h (revision 619)
+++ src/linux-kernel/infiniband/ulp/srp/srp_host.h (working copy)
@@ -50,7 +50,6 @@
#include <linux/pci.h>
#include <scsi.h>
#include <scsi/scsi_host.h>
-#include "ib_legacy_types.h"
#include "ts_kernel_trace.h"
#include "ts_kernel_thread.h"
#include <ib_verbs.h>
Index: src/linux-kernel/infiniband/ulp/srp/srptp.c
===================================================================
--- src/linux-kernel/infiniband/ulp/srp/srptp.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/srp/srptp.c (working copy)
@@ -30,7 +30,6 @@
#include <asm/atomic.h>
#include <asm/uaccess.h>
-#include "ib_legacy_types.h"
#include "ts_kernel_trace.h"
#include <ib_verbs.h>
#include "srp_cmd.h"
@@ -156,7 +155,7 @@
rcv_param.scatter_list = &srp_pkt->scatter_gather_list;
rcv_param.num_scatter_entries = 1;
rcv_param.device_specific = NULL;
- rcv_param.signaled = TRUE;
+ rcv_param.signaled = 1;
status = ib_receive(srp_pkt->conn->qp_hndl, &rcv_param, 1);
@@ -182,7 +181,7 @@
send_param.op = IB_OP_SEND;
send_param.gather_list = &srp_pkt->scatter_gather_list;
send_param.num_gather_entries = 1;
- send_param.signaled = TRUE;
+ send_param.signaled = 1;
status = ib_send(srp_pkt->conn->qp_hndl, &send_param, 1);
@@ -252,14 +251,14 @@
goto cleanup;
}
- hca->valid = TRUE;
+ hca->valid = 1;
hca->hca_index = hca_index;
for (port_index = 0; port_index < MAX_LOCAL_PORTS_PER_HCA;
port_index++) {
/*
* Apply IB ports mask here
*/
- hca->port[port_index].valid = TRUE;
+ hca->port[port_index].valid = 1;
hca->port[port_index].hca = hca;
hca->port[port_index].local_port = port_index + 1;
hca->port[port_index].index =
@@ -320,7 +319,7 @@
fmr_params.pool_size = 64 * max_cmds_per_lun * sg_elements;
fmr_params.dirty_watermark = fmr_params.pool_size / 8;
- fmr_params.cache = FALSE;
+ fmr_params.cache = 0;
TS_REPORT_STAGE(MOD_SRPTP,
"Pool Create max pages 0x%x pool size 0x%x",
@@ -392,7 +391,7 @@
hca = &hca_params[i];
- if (hca_params[i].valid == FALSE)
+ if (!hca_params[i].valid)
continue;
status = ib_fmr_pool_destroy(hca->fmr_pool);
@@ -534,7 +533,7 @@
TS_REPORT_WARN(MOD_SRPTP, "Unknown comm_id 0x%x for target %d",
comm_id, target->target_index);
up(&target->sema);
- return (TS_SUCCESS);
+ return 0;
}
TS_REPORT_STAGE(MOD_SRPTP, "SRP conn event %d for comm id 0x%x",
@@ -575,7 +574,7 @@
up(&target->sema);
- return (TS_SUCCESS);
+ return 0;
}
/*
@@ -599,7 +598,7 @@
hca = &hca_params[hca_index];
- if (hca->valid == FALSE)
+ if (!hca->valid)
break;
for (port_index = 0; port_index < MAX_LOCAL_PORTS_PER_HCA;
@@ -726,7 +725,7 @@
active_param.rnr_retry_count = 3;
active_param.cm_response_timeout = 19;
active_param.max_cm_retries = 3;
- active_param.flow_control = TRUE;
+ active_param.flow_control = 1;
path_record->packet_life = 14;
path_record->mtu = IB_MTU_1024;
@@ -735,7 +734,7 @@
path_record, /* Primary Path */
NULL, /* alternate path */
SRP_SERVICE_ID, /* Service ID */
- FALSE, /* peer-to-peer */
+ 0, /* peer-to-peer */
conn_handler, /* Callback function */
(void *)conn->target, /* Argument */
&conn->comm_id); /* comm_id */
@@ -750,7 +749,7 @@
return (status);
}
- return (TS_SUCCESS);
+ return 0;
}
/*
Index: src/linux-kernel/infiniband/ulp/srp/hostoptions.c
===================================================================
--- src/linux-kernel/infiniband/ulp/srp/hostoptions.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/srp/hostoptions.c (working copy)
@@ -46,7 +46,6 @@
#include <scsi.h>
#include <scsi/scsi_host.h>
-#include "ib_legacy_types.h"
#include "srp_cmd.h"
#include "srptp.h"
#include "srp_host.h"
@@ -64,8 +63,6 @@
#define kLoadError 1
#define kNoError 0
-#define TS_FAILURE -1
-#define TS_SUCCESS 0
void ConvertToLowerCase(char *stringPtr)
{
@@ -239,7 +236,7 @@
/* printk( "wwn string %s\n", wwn_str ); */
/* printk( "characters copied %d\n", (u32)chars_copied ); */
if (chars_copied > (kWWNStringLength + 1)) {
- return (TS_FAILURE);
+ return -1;
} else {
curr_loc += chars_copied;
}
@@ -249,7 +246,7 @@
*(u64 *) & (srp_targets[i].service_name) = cpu_to_be64(wwn);
if (result != kNoError)
- return (TS_FAILURE);
+ return -1;
delimeter = ':';
memset(guid_str, 0, kGUIDStringLength + 1);
@@ -264,7 +261,7 @@
(u32) chars_copied);
if (chars_copied > (kGUIDStringLength + 1)) {
- return (TS_FAILURE);
+ return -1;
} else {
curr_loc += chars_copied;
}
@@ -276,7 +273,7 @@
*(u64 *) & srp_targets[i].guid);
if (result != kNoError)
- return (TS_FAILURE);
+ return -1;
} else {
GetString(curr_loc, dlid_str, kDLIDStringLength,
delimeter, &chars_copied);
@@ -285,7 +282,7 @@
/* printk( "characters copied %d\n", (u32)chars_copied ); */
if (chars_copied > (kDLIDStringLength + 1)) {
- return (TS_FAILURE);
+ return -1;
} else {
curr_loc += chars_copied;
}
@@ -296,13 +293,13 @@
srp_targets[i].iou_path_record[0].dlid);
if (result != kNoError)
- return (TS_FAILURE);
+ return -1;
}
i++;
}
- return (TS_SUCCESS);
+ return 0;
}
#endif
@@ -338,7 +335,7 @@
&chars_copied);
if (chars_copied > (kWWNStringLength + 1)) {
- return (TS_FAILURE);
+ return -1;
} else {
curr_loc += chars_copied;
}
@@ -346,7 +343,7 @@
result = StringToHex64(wwn_str, &wwn);
if (result != kNoError)
- return (TS_FAILURE);
+ return -1;
delimeter = ':';
memset(target_index_str, 0, kTargetIndexStringLength + 1);
@@ -355,7 +352,7 @@
delimeter, &chars_copied);
if (chars_copied > (kTargetIndexStringLength + 1)) {
- return (TS_FAILURE);
+ return -1;
} else {
curr_loc += chars_copied;
}
@@ -369,33 +366,33 @@
TS_REPORT_FATAL(MOD_SRPTP,
"Target %d, packet allocation failure");
}
- target->valid = TRUE;
+ target->valid = 1;
if (result != kNoError)
- return (TS_FAILURE);
+ return -1;
i++;
}
- return (TS_SUCCESS);
+ return 0;
}
void print_target_bindings(void)
{
srp_target_t *target;
- int not_first_entry = FALSE;
+ int not_first_entry = 0;
printk("srp_host: target_bindings=");
for (target = &srp_targets[0];
(target < &srp_targets[max_srp_targets]); target++) {
- if (target->valid == TRUE) {
+ if (target->valid) {
/* don't print colon on first guy */
- if (not_first_entry == TRUE) {
+ if (not_first_entry) {
printk(":");
} else {
- not_first_entry = TRUE;
+ not_first_entry = 1;
}
printk("%llx.%x",
Index: src/linux-kernel/infiniband/ulp/sdp/sdp_send.c
===================================================================
--- src/linux-kernel/infiniband/ulp/sdp/sdp_send.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/sdp/sdp_send.c (working copy)
@@ -1691,8 +1691,8 @@
/*.._sdp_send_ctrl_buff -- Create and Send a buffered control message. */
static s32 _sdp_send_ctrl_buff(struct sdp_opt *conn,
u8 mid,
- tBOOLEAN se,
- tBOOLEAN sig)
+ int se,
+ int sig)
{
s32 result = 0;
struct sdpc_buff *buff;
@@ -1721,7 +1721,7 @@
/*
* solicite event flag for IB sends.
*/
- if (TRUE == se) {
+ if (se) {
TS_SDP_BUFF_F_SET_SE(buff);
}
@@ -1732,7 +1732,7 @@
/*
* try for unsignalled?
*/
- if (TRUE == sig) {
+ if (sig) {
TS_SDP_BUFF_F_CLR_UNSIG(buff);
}
@@ -1864,36 +1864,35 @@
return 0;
}
- return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_DATA, FALSE, FALSE);
+ return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_DATA, 0, 0);
} /* sdp_send_ctrl_ack */
/* ========================================================================= */
/*..sdp_send_ctrl_send_sm -- Send a request for buffered mode. */
s32 sdp_send_ctrl_send_sm(struct sdp_opt *conn)
{
- return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SEND_SM, TRUE, TRUE);
+ return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SEND_SM, 1, 1);
} /* sdp_send_ctrl_send_sm */
/* ========================================================================= */
/*..sdp_send_ctrl_src_cancel -- Send a source cancel */
s32 sdp_send_ctrl_src_cancel(struct sdp_opt *conn)
{
- return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SRC_CANCEL, TRUE, TRUE);
+ return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SRC_CANCEL, 1, 1);
} /* sdp_send_ctrl_src_cancel */
/* ========================================================================= */
/*..sdp_send_ctrl_snk_cancel -- Send a sink cancel */
s32 sdp_send_ctrl_snk_cancel(struct sdp_opt *conn)
{
- return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SNK_CANCEL, TRUE, TRUE);
+ return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SNK_CANCEL, 1, 1);
} /* sdp_send_ctrl_snk_cancel */
/* ========================================================================= */
/*..sdp_send_ctrl_snk_cancel_ack -- Send an ack for a sink cancel */
s32 sdp_send_ctrl_snk_cancel_ack(struct sdp_opt *conn)
{
- return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SNK_CANCEL_ACK, TRUE,
- TRUE);
+ return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_SNK_CANCEL_ACK, 1, 1);
} /* sdp_send_ctrl_snk_cancel_ack */
/* ========================================================================= */
@@ -1904,7 +1903,7 @@
/*
* send
*/
- return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_ABORT_CONN, TRUE, TRUE);
+ return _sdp_send_ctrl_buff(conn, TS_SDP_MSG_MID_ABORT_CONN, 1, 1);
} /* sdp_send_ctrl_abort */
/* ========================================================================= */
Index: src/linux-kernel/infiniband/ulp/sdp/sdp_iocb.c
===================================================================
--- src/linux-kernel/infiniband/ulp/sdp/sdp_iocb.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/sdp/sdp_iocb.c (working copy)
@@ -338,7 +338,7 @@
/* ========================================================================= */
/*.._sdp_iocb_q_get - get, and remove, the object at the tables head */
-static struct sdpc_iocb *_sdp_iocb_q_get(struct sdpc_iocb_q *table, tBOOLEAN head)
+static struct sdpc_iocb *_sdp_iocb_q_get(struct sdpc_iocb_q *table, int head)
{
struct sdpc_iocb *iocb;
struct sdpc_iocb *next;
@@ -351,7 +351,7 @@
return NULL;
}
- if (TRUE == head) {
+ if (head) {
iocb = table->head;
}
@@ -387,7 +387,7 @@
/*.._sdp_iocb_q_put - put the IOCB object at the tables tail */
int _sdp_iocb_q_put(struct sdpc_iocb_q *table,
struct sdpc_iocb *iocb,
- tBOOLEAN head)
+ int head)
{
struct sdpc_iocb *next;
struct sdpc_iocb *prev;
@@ -411,7 +411,7 @@
iocb->next = next;
next->prev = iocb;
- if (TRUE == head) {
+ if (head) {
table->head = iocb;
}
}
@@ -427,28 +427,28 @@
/*..sdp_iocb_q_get_tail - get an IOCB object from the tables tail */
struct sdpc_iocb *sdp_iocb_q_get_tail(struct sdpc_iocb_q *table)
{
- return _sdp_iocb_q_get(table, FALSE);
+ return _sdp_iocb_q_get(table, 0);
} /* sdp_iocb_q_get_tail */
/* ========================================================================= */
/*..sdp_iocb_q_get_head - get an IOCB object from the tables head */
struct sdpc_iocb *sdp_iocb_q_get_head(struct sdpc_iocb_q *table)
{
- return _sdp_iocb_q_get(table, TRUE);
+ return _sdp_iocb_q_get(table, 1);
} /* sdp_iocb_q_get_head */
/* ========================================================================= */
/*..sdp_iocb_q_put_tail - put the IOCB object at the tables tail */
int sdp_iocb_q_put_tail(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb)
{
- return _sdp_iocb_q_put(table, iocb, FALSE);
+ return _sdp_iocb_q_put(table, iocb, 0);
} /* sdp_iocb_q_put_tail */
/* ========================================================================= */
/*..sdp_iocb_q_put_head - put the IOCB object at the tables head */
int sdp_iocb_q_put_head(struct sdpc_iocb_q *table, struct sdpc_iocb *iocb)
{
- return _sdp_iocb_q_put(table, iocb, TRUE);
+ return _sdp_iocb_q_put(table, iocb, 1);
} /* sdp_iocb_q_put_head */
/* ========================================================================= */
Index: src/linux-kernel/infiniband/ulp/sdp/sdp_buff.c
===================================================================
--- src/linux-kernel/infiniband/ulp/sdp/sdp_buff.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/sdp/sdp_buff.c (working copy)
@@ -44,7 +44,7 @@
return NULL;
}
- if (TRUE == fifo) {
+ if (fifo) {
buff = pool->head;
}
@@ -110,7 +110,7 @@
buff->next->prev = buff;
buff->prev->next = buff;
- if (TRUE == fifo) {
+ if (fifo) {
pool->head = buff;
}
}
@@ -128,7 +128,7 @@
{
TS_CHECK_NULL(pool, NULL);
- if (NULL == pool->head || TRUE == fifo) {
+ if (NULL == pool->head || fifo) {
return pool->head;
}
@@ -243,7 +243,7 @@
{
struct sdpc_buff *buff;
- buff = _sdp_buff_q_get(pool, TRUE, NULL, NULL);
+ buff = _sdp_buff_q_get(pool, 1, NULL, NULL);
return buff;
} /* sdp_buff_q_get */
@@ -254,7 +254,7 @@
{
struct sdpc_buff *buff;
- buff = _sdp_buff_q_get(pool, TRUE, NULL, NULL);
+ buff = _sdp_buff_q_get(pool, 1, NULL, NULL);
return buff;
} /* sdp_buff_q_get_head */
@@ -265,7 +265,7 @@
{
struct sdpc_buff *buff;
- buff = _sdp_buff_q_get(pool, FALSE, NULL, NULL);
+ buff = _sdp_buff_q_get(pool, 0, NULL, NULL);
return buff;
} /* sdp_buff_q_get_tail */
@@ -276,7 +276,7 @@
{
struct sdpc_buff *buff;
- buff = _sdp_buff_q_look(pool, TRUE);
+ buff = _sdp_buff_q_look(pool, 1);
return buff;
} /* sdp_buff_q_look_head */
@@ -287,7 +287,7 @@
{
struct sdpc_buff *buff;
- buff = _sdp_buff_q_look(pool, FALSE);
+ buff = _sdp_buff_q_look(pool, 0);
return buff;
} /* sdp_buff_q_look_tail */
@@ -300,7 +300,7 @@
{
struct sdpc_buff *buff;
- buff = _sdp_buff_q_get(pool, TRUE, test_func, usr_arg);
+ buff = _sdp_buff_q_get(pool, 1, test_func, usr_arg);
return buff;
} /* sdp_buff_q_fetch_head */
@@ -313,7 +313,7 @@
{
struct sdpc_buff *buff;
- buff = _sdp_buff_q_get(pool, FALSE, test_func, usr_arg);
+ buff = _sdp_buff_q_get(pool, 0, test_func, usr_arg);
return buff;
} /* sdp_buff_q_fetch_tail */
@@ -432,7 +432,7 @@
{
int result;
- result = _sdp_buff_q_put(pool, buff, TRUE);
+ result = _sdp_buff_q_put(pool, buff, 1);
return result;
} /* sdp_buff_q_put */
@@ -444,7 +444,7 @@
{
int result;
- result = _sdp_buff_q_put(pool, buff, TRUE);
+ result = _sdp_buff_q_put(pool, buff, 1);
return result;
} /* sdp_buff_q_put_head */
@@ -456,7 +456,7 @@
{
int result;
- result = _sdp_buff_q_put(pool, buff, FALSE);
+ result = _sdp_buff_q_put(pool, buff, 0);
return result;
} /* sdp_buff_q_put_tail */
@@ -470,7 +470,7 @@
TS_CHECK_NULL(pool, -EINVAL);
- while (NULL != (buff = _sdp_buff_q_get(pool, FALSE, NULL, NULL))) {
+ while (NULL != (buff = _sdp_buff_q_get(pool, 0, NULL, NULL))) {
result = sdp_buff_pool_put(buff);
if (0 > result) {
Index: src/linux-kernel/infiniband/ulp/sdp/sdp_queue.c
===================================================================
--- src/linux-kernel/infiniband/ulp/sdp/sdp_queue.c (revision 619)
+++ src/linux-kernel/infiniband/ulp/sdp/sdp_queue.c (working copy)
@@ -33,7 +33,7 @@
/* ========================================================================= */
/*.._sdp_desc_q_get - Get an element from a specific table */
static struct sdpc_desc *_sdp_desc_q_get(struct sdpc_desc_q *table,
- tBOOLEAN fifo)
+ int fifo)
{
struct sdpc_desc *element;
@@ -44,7 +44,7 @@
return NULL;
}
- if (TRUE == fifo) {
+ if (fifo) {
element = table->head;
}
@@ -80,7 +80,7 @@
/*.._sdp_desc_q_put - Place an element into a specific table */
static __inline__ int _sdp_desc_q_put(struct sdpc_desc_q *table,
struct sdpc_desc *element,
- tBOOLEAN fifo)
+ int fifo)
{
/*
* fifo: false == tail, true == head
@@ -107,7 +107,7 @@
element->next->prev = element;
element->prev->next = element;
- if (TRUE == fifo) {
+ if (fifo) {
table->head = element;
}
}
@@ -212,14 +212,14 @@
/*..sdp_desc_q_get_head - Get the element at the front of the table */
struct sdpc_desc *sdp_desc_q_get_head(struct sdpc_desc_q *table)
{
- return _sdp_desc_q_get(table, TRUE);
+ return _sdp_desc_q_get(table, 1);
} /* sdp_desc_q_get_head */
/* ========================================================================= */
/*..sdp_desc_q_get_tail - Get the element at the end of the table */
struct sdpc_desc *sdp_desc_q_get_tail(struct sdpc_desc_q *table)
{
- return _sdp_desc_q_get(table, FALSE);
+ return _sdp_desc_q_get(table, 0);
} /* sdp_desc_q_get_tail */
/* ========================================================================= */
@@ -227,7 +227,7 @@
int sdp_desc_q_put_head(struct sdpc_desc_q *table,
struct sdpc_desc *element)
{
- return _sdp_desc_q_put(table, element, TRUE);
+ return _sdp_desc_q_put(table, element, 1);
} /* sdp_desc_q_put_head */
/* ========================================================================= */
@@ -235,7 +235,7 @@
int sdp_desc_q_put_tail(struct sdpc_desc_q *table,
struct sdpc_desc *element)
{
- return _sdp_desc_q_put(table, element, FALSE);
+ return _sdp_desc_q_put(table, element, 0);
} /* sdp_desc_q_put_tail */
/* ========================================================================= */
Index: src/linux-kernel/infiniband/include/ib_legacy_types.h
===================================================================
--- src/linux-kernel/infiniband/include/ib_legacy_types.h (revision 576)
+++ src/linux-kernel/infiniband/include/ib_legacy_types.h (working copy)
@@ -1,58 +0,0 @@
-/*
- This software is available to you under a choice of one of two
- licenses. You may choose to be licensed under the terms of the GNU
- General Public License (GPL) Version 2, available at
- <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
- license, available in the LICENSE.TXT file accompanying this
- software. These details are also available at
- <http://openib.org/license.html>.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
- Copyright (c) 2004 Topspin Communications. All rights reserved.
-
- $Id$
-*/
-
-#ifndef _IB_LEGACY_TYPES_H
-#define _IB_LEGACY_TYPES_H
-
-/*
- * #define section
- */
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/*
- * Common types used by all proprietary TopSpin code (native C types
- * should not be used).
- */
-typedef int tBOOLEAN;
-
-/*
- * Generic type for returning pass/fail information back from subroutines
- * Note that this is the *opposite* semantics from BOOLEAN. I.e. a zero
- * (False) indicates success. This is consistent with the VxWorks stds.
- */
-typedef enum
-{
- TS_FAIL = -1,
- TS_SUCCESS = 0 /* must be consistant with "OK" defined in */
- /* rl_rlstddef.h - RAPIDLOGIC */
-
-} tSTATUS;
-
-#endif /* _IB_LEGACY_TYPES_H */
More information about the general
mailing list