[openib-general] [PATCH] ib_mad.c: Eliminate macro use
Hal Rosenstock
halr at voltaire.com
Sat Sep 11 05:02:19 PDT 2004
ib_mad.c: Eliminate macro use
Index: ib_mad.c
===================================================================
--- ib_mad.c (revision 781)
+++ ib_mad.c (working copy)
@@ -74,28 +74,13 @@
* Locks
*/
-/* Device list lock */
+/* Port list lock */
static spinlock_t ib_mad_port_list_lock = SPIN_LOCK_UNLOCKED;
-#define IB_MAD_PORT_LIST_LOCK_VAR unsigned long ib_mad_port_list_sflags
-#define
IB_MAD_PORT_LIST_LOCK() spin_lock_irqsave(&ib_mad_port_list_lock,
ib_mad_port_list_sflags)
-#define
IB_MAD_PORT_LIST_UNLOCK() spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags)
/* Agent list lock */
static spinlock_t ib_mad_agent_list_lock = SPIN_LOCK_UNLOCKED;
-#define IB_MAD_AGENT_LIST_LOCK_VAR unsigned long
ib_mad_agent_list_sflags
-#define
IB_MAD_AGENT_LIST_LOCK() spin_lock_irqsave(&ib_mad_agent_list_lock,
ib_mad_agent_list_sflags)
-#define
IB_MAD_AGENT_LIST_UNLOCK() spin_unlock_irqrestore(&ib_mad_agent_list_lock, ib_mad_agent_list_sflags)
-/* Send and receive list locks */
-#define IB_MAD_SEND_LIST_LOCK_VAR unsigned long ib_mad_send_list_sflags
-#define
IB_MAD_SEND_LIST_LOCK(priv) spin_lock_irqsave(&priv->send_list_lock,
ib_mad_send_list_sflags)
-#define
IB_MAD_SEND_LIST_UNLOCK(priv) spin_unlock_irqrestore(&priv->send_list_lock, ib_mad_send_list_sflags)
-#define IB_MAD_RECV_LIST_LOCK_VAR unsigned long ib_mad_recv_list_sflags
-#define
IB_MAD_RECV_LIST_LOCK(priv) spin_lock_irqsave(&priv->recv_list_lock,
ib_mad_recv_list_sflags)
-#define
IB_MAD_RECV_LIST_UNLOCK(priv) spin_unlock_irqrestore(&priv->recv_list_lock, ib_mad_recv_list_sflags)
-
-
/* Forward declarations */
static u8 convert_mgmt_class(struct ib_mad_reg_req *mad_reg_req);
static int is_method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -109,7 +94,7 @@
/*
- * ib_mad_reg - Register to send/receive MADs.
+ * ib_mad_reg - Register to send/receive MADs
*/
struct ib_mad_agent *ib_mad_reg(struct ib_device *device,
u8 port,
@@ -130,8 +115,8 @@
struct ib_mad_mgmt_class_table *class;
struct ib_mad_mgmt_method_table *method;
int ret2;
- IB_MAD_PORT_LIST_LOCK_VAR;
- IB_MAD_AGENT_LIST_LOCK_VAR;
+ unsigned long ib_mad_port_list_sflags;
+ unsigned long ib_mad_agent_list_sflags;
u8 mgmt_class;
/* Validate parameters */
@@ -170,14 +155,14 @@
}
/* Validate device and port */
- IB_MAD_PORT_LIST_LOCK();
+ spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
list_for_each(entry, head) {
if (entry->device == device && entry->port == port) {
priv = entry;
break;
}
}
- IB_MAD_PORT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
if (!priv) {
ret = ERR_PTR(-ENODEV);
goto error1;
@@ -233,9 +218,9 @@
mad_agent->hi_tid = ++ib_mad_client_id;
/* Add to mad agent list */
- IB_MAD_AGENT_LIST_LOCK();
+ spin_lock_irqsave(&ib_mad_agent_list_lock, ib_mad_agent_list_sflags);
list_add_tail((struct list_head *) mad_agent_priv,
&ib_mad_agent_list);
- IB_MAD_AGENT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_agent_list_lock,
ib_mad_agent_list_sflags);
ret2 = add_mad_reg_req(mad_reg_req, mad_agent_priv);
if (ret2) {
@@ -247,14 +232,14 @@
error3:
/* Remove from mad agent list */
- IB_MAD_AGENT_LIST_LOCK();
+ spin_lock_irqsave(&ib_mad_agent_list_lock, ib_mad_agent_list_sflags);
list_for_each(entry2, head2) {
if (entry2->agent == mad_agent_priv->agent) {
list_del((struct list_head *)entry2);
break;
}
}
- IB_MAD_AGENT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_agent_list_lock,
ib_mad_agent_list_sflags);
kfree(reg_req);
error2:
kfree(mad_agent);
@@ -265,15 +250,15 @@
EXPORT_SYMBOL(ib_mad_reg);
/*
- * ib_mad_dereg - Deregisters a client from using MAD services.
+ * ib_mad_dereg - Deregisters a client from using MAD services
*/
int ib_mad_dereg(struct ib_mad_agent *mad_agent)
{
struct ib_mad_agent_private *entry,
*head = (struct ib_mad_agent_private *)&ib_mad_agent_list;
- IB_MAD_AGENT_LIST_LOCK_VAR;
+ unsigned long ib_mad_agent_list_sflags;
- IB_MAD_AGENT_LIST_LOCK();
+ spin_lock_irqsave(&ib_mad_agent_list_lock, ib_mad_agent_list_sflags);
list_for_each(entry, head) {
if (entry->agent == mad_agent) {
remove_mad_reg_req(entry);
@@ -285,7 +270,7 @@
break;
}
}
- IB_MAD_AGENT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_agent_list_lock,
ib_mad_agent_list_sflags);
return 0;
}
@@ -293,7 +278,7 @@
/*
* ib_mad_post_send - Posts MAD(s) to the send queue of the QP
associated
- * with the registered client.
+ * with the registered client
*/
int ib_mad_post_send(struct ib_mad_agent *mad_agent,
struct ib_send_wr *send_wr,
@@ -304,7 +289,7 @@
struct ib_send_wr wr;
struct ib_send_wr *bad_wr;
struct ib_mad_send_wr_private *mad_send_wr;
- IB_MAD_SEND_LIST_LOCK_VAR;
+ unsigned long ib_mad_send_list_sflags;
cur_send_wr = send_wr;
/* Validate supplied parameters */
@@ -343,17 +328,17 @@
wr.send_flags = IB_SEND_SIGNALED; /* cur_send_wr->send_flags ? */
/* Link send WR into posted send MAD list */
- IB_MAD_SEND_LIST_LOCK(((struct ib_mad_port_private
*)mad_agent->device->mad));
+ spin_lock_irqsave(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
list_add_tail((struct list_head *)mad_send_wr,
&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_posted_mad_list);
- IB_MAD_SEND_LIST_UNLOCK(((struct ib_mad_port_private
*)mad_agent->device->mad));
+ spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
ret = ib_post_send(mad_agent->qp, &wr, &bad_wr);
if (ret) {
/* Unlink from posted send MAD list */
- IB_MAD_SEND_LIST_LOCK(((struct ib_mad_port_private
*)mad_agent->device->mad));
+ spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
list_del((struct list_head *)send_wr);
- IB_MAD_SEND_LIST_UNLOCK(((struct ib_mad_port_private
*)mad_agent->device->mad));
+ spin_unlock_irqrestore(&((struct ib_mad_port_private
*)mad_agent->device->mad)->send_list_lock, ib_mad_send_list_sflags);
*bad_send_wr = cur_send_wr;
printk(KERN_NOTICE "ib_mad_post_send failed\n");
return ret;
@@ -562,10 +547,10 @@
struct ib_mad_private_header *entry,
*head = (struct ib_mad_private_header
*)&priv->recv_posted_mad_list;
struct ib_mad_private *recv = NULL;
- IB_MAD_RECV_LIST_LOCK_VAR;
+ unsigned long ib_mad_recv_list_sflags;
/* Find entry on posted MAD receive list which corresponds to this
completion */
- IB_MAD_RECV_LIST_LOCK(priv);
+ spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
list_for_each(entry, head) {
if ((unsigned long)entry == wc->wr_id) {
recv = (struct ib_mad_private *)entry;
@@ -574,7 +559,7 @@
break;
}
}
- IB_MAD_RECV_LIST_UNLOCK(priv);
+ spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
if (!recv) {
printk(KERN_ERR "No matching posted receive WR 0x%Lx\n", wc->wr_id);
}
@@ -610,10 +595,10 @@
{
struct ib_mad_send_wr_private *entry, *send_wr = NULL,
*head = (struct ib_mad_send_wr_private
*)&priv->send_posted_mad_list;
- IB_MAD_SEND_LIST_LOCK_VAR;
+ unsigned long ib_mad_send_list_sflags;
/* Find entry on posted MAD send list which corresponds to this
completion */
- IB_MAD_SEND_LIST_LOCK(priv);
+ spin_lock_irqsave(&priv->send_list_lock, ib_mad_send_list_sflags);
list_for_each(entry, head) {
if (entry->wr_id == wc->wr_id) {
send_wr = entry;
@@ -622,7 +607,7 @@
break;
}
}
- IB_MAD_SEND_LIST_UNLOCK(priv);
+ spin_unlock_irqrestore(&priv->send_list_lock,
ib_mad_send_list_sflags);
if (!send_wr) {
printk(KERN_ERR "No matching posted send WR 0x%Lx\n", wc->wr_id);
} else {
@@ -700,7 +685,6 @@
daemonize("ib_mad-%-6s-%-2d", priv->device->name, priv->port);
unlock_kernel();
- sema_init(&thread_data->sem, 0);
while (1) {
if (down_interruptible(&thread_data->sem)) {
printk(KERN_DEBUG "Exiting ib_mad thread\n");
@@ -723,6 +707,7 @@
{
struct ib_mad_thread_data *thread_data = &priv->thread_data;
+ sema_init(&thread_data->sem, 0);
thread_data->run = 1;
kernel_thread(ib_mad_thread, priv, 0);
}
@@ -761,7 +746,7 @@
struct ib_sge sg_list;
struct ib_recv_wr recv_wr;
struct ib_recv_wr *bad_recv_wr;
- IB_MAD_RECV_LIST_LOCK_VAR;
+ unsigned long ib_mad_recv_list_sflags;
/* Allocate memory for receive MAD (and private header) */
mad_priv = kmalloc(sizeof *mad_priv, GFP_KERNEL);
@@ -787,18 +772,18 @@
recv_wr.wr_id = (unsigned long)mad_priv;
/* Link receive WR into posted receive MAD list */
- IB_MAD_RECV_LIST_LOCK(priv);
+ spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
list_add_tail((struct list_head *)mad_priv,
&priv->recv_posted_mad_list);
- IB_MAD_RECV_LIST_UNLOCK(priv);
+ spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
pci_unmap_addr_set(&mad_priv->header.buf, mapping, sg_list.addr);
/* Now, post receive WR */
if (ib_post_recv(qp, &recv_wr, &bad_recv_wr)) {
/* Unlink from posted receive MAD list */
- IB_MAD_RECV_LIST_LOCK(priv);
+ spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
list_del((struct list_head *)mad_priv);
- IB_MAD_RECV_LIST_UNLOCK(priv);
+ spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
pci_unmap_single(priv->device->dma_device,
pci_unmap_addr(&mad_priv->header.buf, mapping),
@@ -835,16 +820,16 @@
*/
static void ib_mad_return_posted_recv_mads(struct ib_mad_port_private
*priv)
{
- IB_MAD_RECV_LIST_LOCK_VAR;
+ unsigned long ib_mad_recv_list_sflags;
/* PCI mapping ? */
- IB_MAD_RECV_LIST_LOCK(priv);
+ spin_lock_irqsave(&priv->recv_list_lock, ib_mad_recv_list_sflags);
while (!list_empty(&priv->recv_posted_mad_list)) {
}
INIT_LIST_HEAD(&priv->recv_posted_mad_list);
- IB_MAD_RECV_LIST_UNLOCK(priv);
+ spin_unlock_irqrestore(&priv->recv_list_lock,
ib_mad_recv_list_sflags);
}
/*
@@ -852,17 +837,17 @@
*/
static void ib_mad_return_posted_send_mads(struct ib_mad_port_private
*priv)
{
- IB_MAD_SEND_LIST_LOCK_VAR;
+ unsigned long ib_mad_send_list_sflags;
/* PCI mapping ? */
- IB_MAD_SEND_LIST_LOCK(priv);
+ spin_lock_irqsave(&priv->send_list_lock, ib_mad_send_list_sflags);
while (!list_empty(&priv->send_posted_mad_list)) {
list_del(priv->send_posted_mad_list.next);
/* Call completion handler ? */
}
INIT_LIST_HEAD(&priv->send_posted_mad_list);
- IB_MAD_SEND_LIST_UNLOCK(priv);
+ spin_unlock_irqrestore(&priv->send_list_lock,
ib_mad_send_list_sflags);
}
/*
@@ -981,24 +966,13 @@
return ret;
}
-#define IB_MAD_PORT_SET_UP(__port__) {\
- IB_MAD_PORT_LIST_LOCK_VAR;\
- IB_MAD_PORT_LIST_LOCK();\
- (__port__)->up = 1;\
- IB_MAD_PORT_LIST_UNLOCK();}
-
-#define IB_MAD_PORT_SET_DOWN(__port__) {\
- IB_MAD_PORT_LIST_LOCK_VAR;\
- IB_MAD_PORT_LIST_LOCK();\
- (__port__)->up = 0;\
- IB_MAD_PORT_LIST_UNLOCK();}
-
/*
* Start the port
*/
static int ib_mad_port_start(struct ib_mad_port_private *priv)
{
int ret, i;
+ unsigned long ib_mad_port_list_sflags;
for (i = 0; i < 2; i++) {
ret = ib_mad_change_qp_state_to_init(priv->qp[i], priv->port);
@@ -1034,8 +1008,9 @@
}
}
- IB_MAD_PORT_SET_UP(priv);
-
+ spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+ priv->up = 1;
+ spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
return 0;
error:
ib_mad_return_posted_recv_mads(priv);
@@ -1052,8 +1027,11 @@
static void ib_mad_port_stop(struct ib_mad_port_private *priv)
{
int i;
+ unsigned long ib_mad_port_list_sflags;
- IB_MAD_PORT_SET_DOWN(priv);
+ spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
+ priv->up = 0;
+ spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
for (i = 0; i < 2; i++) {
ib_mad_change_qp_state_to_reset(priv->qp[i]);
@@ -1096,17 +1074,17 @@
struct ib_qp_cap qp_cap;
struct ib_mad_port_private *entry, *priv = NULL,
*head = (struct ib_mad_port_private *) &ib_mad_port_list;
- IB_MAD_PORT_LIST_LOCK_VAR;
+ unsigned long ib_mad_port_list_sflags;
/* First, check if port already open at MAD layer */
- IB_MAD_PORT_LIST_LOCK();
+ spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
list_for_each(entry, head) {
if (entry->device == device && entry->port == port) {
priv = entry;
break;
}
}
- IB_MAD_PORT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
if (priv) {
printk(KERN_DEBUG "Port already open\n");
return 0;
@@ -1191,9 +1169,9 @@
goto error8;
}
- IB_MAD_PORT_LIST_LOCK();
+ spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
list_add_tail((struct list_head *)priv, &ib_mad_port_list);
- IB_MAD_PORT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
return 0;
@@ -1222,9 +1200,9 @@
{
struct ib_mad_port_private *entry, *priv = NULL,
*head = (struct ib_mad_port_private *)&ib_mad_port_list;
- IB_MAD_PORT_LIST_LOCK_VAR;
+ unsigned long ib_mad_port_list_sflags;
- IB_MAD_PORT_LIST_LOCK();
+ spin_lock_irqsave(&ib_mad_port_list_lock, ib_mad_port_list_sflags);
list_for_each(entry, head) {
if (entry->device == device && entry->port == port) {
priv = entry;
@@ -1234,12 +1212,12 @@
if (priv == NULL) {
printk(KERN_ERR "Port not found\n");
- IB_MAD_PORT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
return -ENODEV;
}
list_del((struct list_head *)priv);
- IB_MAD_PORT_LIST_UNLOCK();
+ spin_unlock_irqrestore(&ib_mad_port_list_lock,
ib_mad_port_list_sflags);
ib_mad_port_stop(priv);
ib_mad_thread_stop(priv);
More information about the general
mailing list