[ofa-general] [PATCH-2.6.24 2/2] [RFC] ib/cm: add basic performance counters
Sean Hefty
sean.hefty at intel.com
Tue Sep 25 11:05:14 PDT 2007
Add performance/debug counters to track sent/received messages, retries,
and duplicates. Counters are tracked per CM message type, per port.
The counters are always enabled, so intrusive state tracking is not done.
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
This exports the CM counters through debugfs. The implementation of
the counters changed to use a 2D array, but the type of counters are
the same as in the previous version of this patch.
drivers/infiniband/core/cm.c | 206 ++++++++++++++++++++++++++++++++++++++++--
1 files changed, 194 insertions(+), 12 deletions(-)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 2e39236..481b9e7 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
@@ -37,6 +37,7 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
@@ -78,19 +79,60 @@ static struct ib_cm {
struct workqueue_struct *wq;
} cm;
+/* Counter indexes ordered by attribute ID */
+enum {
+ CM_REQ_COUNTER,
+ CM_MRA_COUNTER,
+ CM_REJ_COUNTER,
+ CM_REP_COUNTER,
+ CM_RTU_COUNTER,
+ CM_DREQ_COUNTER,
+ CM_DREP_COUNTER,
+ CM_SIDR_REQ_COUNTER,
+ CM_SIDR_REP_COUNTER,
+ CM_LAP_COUNTER,
+ CM_APR_COUNTER,
+ CM_ATTR_COUNT,
+ CM_ATTR_ID_OFFSET = 0x0010,
+};
+
+static char const attr_names[CM_ATTR_COUNT][sizeof("SIDR_REQ")] = {
+ "REQ", "MRA", "REJ", "REP", "RTU", "DREQ", "DREP",
+ "SIDR_REQ", "SIDR_REP", "LAP", "APR"
+};
+
+enum {
+ CM_XMIT,
+ CM_XMIT_RETRIES,
+ CM_RECV,
+ CM_RECV_DUPLICATES,
+ CM_COUNTERS
+};
+
+static char const counter_names[CM_COUNTERS][sizeof("cm_rx_duplicates")] = {
+ "cm_tx_msgs", "cm_tx_retries",
+ "cm_rx_msgs", "cm_rx_duplicates"
+};
+
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
+ struct dentry *port_dir;
u8 port_num;
+ atomic_long_t counters[CM_COUNTERS][CM_ATTR_COUNT];
+ struct dentry *counter_file[CM_COUNTERS];
};
struct cm_device {
struct list_head list;
struct ib_device *device;
+ struct dentry *dev_dir;
u8 ack_delay;
struct cm_port port[0];
};
+static struct dentry *cm_dir;
+
struct cm_av {
struct cm_port *port;
union ib_gid dgid;
@@ -1270,6 +1312,9 @@ static void cm_dup_req_handler(struct cm_work *work,
struct ib_mad_send_buf *msg = NULL;
int ret;
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_REQ_COUNTER]);
+
/* Quick state check to discard duplicate REQs. */
if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
return;
@@ -1616,6 +1661,8 @@ static void cm_dup_rep_handler(struct cm_work *work)
if (!cm_id_priv)
return;
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_REP_COUNTER]);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
goto deref;
@@ -1781,6 +1828,8 @@ static int cm_rtu_handler(struct cm_work *work)
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_RTU_COUNTER]);
goto out;
}
cm_id_priv->id.state = IB_CM_ESTABLISHED;
@@ -1958,6 +2007,8 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
dreq_msg->local_comm_id);
if (!cm_id_priv) {
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
return -EINVAL;
}
@@ -1977,6 +2028,8 @@ static int cm_dreq_handler(struct cm_work *work)
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_DREQ_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
@@ -1988,6 +2041,10 @@ static int cm_dreq_handler(struct cm_work *work)
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
+ case IB_CM_DREQ_RCVD:
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_DREQ_COUNTER]);
+ goto unlock;
default:
goto unlock;
}
@@ -2339,10 +2396,19 @@ static int cm_mra_handler(struct cm_work *work)
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout))
+ cm_id_priv->msg, timeout)) {
+ if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_MRA_COUNTER]);
goto out;
+ }
cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
break;
+ case IB_CM_MRA_REQ_RCVD:
+ case IB_CM_MRA_REP_RCVD:
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_MRA_COUNTER]);
+ /* fall through */
default:
goto out;
}
@@ -2502,6 +2568,8 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_LAP_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
@@ -2515,6 +2583,10 @@ static int cm_lap_handler(struct cm_work *work)
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
+ case IB_CM_LAP_RCVD:
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_LAP_COUNTER]);
+ goto unlock;
default:
goto unlock;
}
@@ -2796,6 +2868,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
+ atomic_long_inc(&work->port->counters
+ [CM_RECV_DUPLICATES][CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
@@ -2990,6 +3064,25 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
+ struct cm_port *port;
+ u16 attr_index;
+
+ port = mad_agent->context;
+ attr_index = be16_to_cpu(((struct ib_mad_hdr *)
+ msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
+
+ /*
+ * If the send was in response to a received message (context[0] is not
+ * set to a cm_id), and is not a REJ, then it is a send that was
+ * manually retried.
+ */
+ if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+ msg->retries = 1;
+
+ atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
+ if (msg->retries)
+ atomic_long_add(msg->retries,
+ &port->counters[CM_XMIT_RETRIES][attr_index]);
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
@@ -3148,8 +3241,10 @@ EXPORT_SYMBOL(ib_cm_notify);
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
+ struct cm_port *port = mad_agent->context;
struct cm_work *work;
enum ib_cm_event_type event;
+ u16 attr_id;
int paths = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
@@ -3194,6 +3289,9 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
return;
}
+ attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
+ atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]);
+
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
GFP_KERNEL);
if (!work) {
@@ -3204,7 +3302,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
- work->port = (struct cm_port *)mad_agent->context;
+ work->port = port;
queue_delayed_work(cm.wq, &work->work, 0);
}
@@ -3379,6 +3477,65 @@ static void cm_get_ack_delay(struct cm_device *cm_dev)
cm_dev->ack_delay = attr.local_ca_ack_delay;
}
+static ssize_t cm_read_counter(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ atomic_long_t *counter;
+ u64 value;
+
+ if (*pos >= CM_ATTR_COUNT)
+ return 0;
+
+ counter = filp->f_dentry->d_inode->i_private;
+ value = (u64) atomic_long_read(&counter[*pos]);
+
+ return snprintf(buf, count, "%s %lld\n", attr_names[(*pos)++], value);
+}
+
+static const struct file_operations cm_file_ops = {
+ .owner = THIS_MODULE,
+ .read = cm_read_counter
+};
+
+static int cm_create_port_fs(struct cm_port *port)
+{
+ char port_name[4];
+ int i;
+
+ sprintf(port_name, "%d", port->port_num);
+ port->port_dir = debugfs_create_dir(port_name, port->cm_dev->dev_dir);
+ if (!port->port_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < CM_COUNTERS; i++) {
+ port->counter_file[i] = debugfs_create_file(counter_names[i],
+ S_IFREG | S_IRUGO,
+ port->port_dir,
+ &port->counters[i],
+ &cm_file_ops);
+ if (!port->counter_file[i])
+ goto error;
+ }
+ return 0;
+
+error:
+ while (i--)
+ debugfs_remove(port->counter_file[i]);
+
+ debugfs_remove(port->port_dir);
+ return -ENOMEM;
+}
+
+static void cm_remove_port_fs(struct cm_port *port)
+{
+ int i;
+
+ for (i = 0; i < CM_COUNTERS; i++)
+ debugfs_remove(port->counter_file[i]);
+
+ debugfs_remove(port->port_dir);
+}
+
static void cm_add_one(struct ib_device *device)
{
struct cm_device *cm_dev;
@@ -3397,11 +3554,15 @@ static void cm_add_one(struct ib_device *device)
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
- cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
+ cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
return;
+ cm_dev->dev_dir = debugfs_create_dir(device->name, cm_dir);
+ if (!cm_dev->dev_dir)
+ goto error1;
+
cm_dev->device = device;
cm_get_ack_delay(cm_dev);
@@ -3410,6 +3571,11 @@ static void cm_add_one(struct ib_device *device)
port = &cm_dev->port[i-1];
port->cm_dev = cm_dev;
port->port_num = i;
+
+ ret = cm_create_port_fs(port);
+ if (ret)
+ goto error2;
+
port->mad_agent = ib_register_mad_agent(device, i,
IB_QPT_GSI,
®_req,
@@ -3418,11 +3584,11 @@ static void cm_add_one(struct ib_device *device)
cm_recv_handler,
port);
if (IS_ERR(port->mad_agent))
- goto error1;
+ goto error3;
ret = ib_modify_port(device, i, 0, &port_modify);
if (ret)
- goto error2;
+ goto error4;
}
ib_set_client_data(device, &cm_client, cm_dev);
@@ -3431,16 +3597,21 @@ static void cm_add_one(struct ib_device *device)
write_unlock_irqrestore(&cm.device_lock, flags);
return;
-error2:
+error4:
ib_unregister_mad_agent(port->mad_agent);
-error1:
+error3:
+ cm_remove_port_fs(port);
+error2:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) {
port = &cm_dev->port[i-1];
ib_modify_port(device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
+ cm_remove_port_fs(port);
}
+ debugfs_remove(cm_dev->dev_dir);
+error1:
kfree(cm_dev);
}
@@ -3466,7 +3637,9 @@ static void cm_remove_one(struct ib_device *device)
port = &cm_dev->port[i-1];
ib_modify_port(device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
+ cm_remove_port_fs(port);
}
+ debugfs_remove(cm_dev->dev_dir);
kfree(cm_dev);
}
@@ -3488,17 +3661,25 @@ static int __init ib_cm_init(void)
idr_pre_get(&cm.local_id_table, GFP_KERNEL);
INIT_LIST_HEAD(&cm.timewait_list);
- cm.wq = create_workqueue("ib_cm");
- if (!cm.wq)
+ cm_dir = debugfs_create_dir("infiniband_cm", NULL);
+ if (!cm_dir)
return -ENOMEM;
+ cm.wq = create_workqueue("ib_cm");
+ if (!cm.wq) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
ret = ib_register_client(&cm_client);
if (ret)
- goto error;
+ goto error2;
return 0;
-error:
+error2:
destroy_workqueue(cm.wq);
+error1:
+ debugfs_remove(cm_dir);
return ret;
}
@@ -3519,6 +3700,7 @@ static void __exit ib_cm_cleanup(void)
}
ib_unregister_client(&cm_client);
+ debugfs_remove(cm_dir);
idr_destroy(&cm.local_id_table);
}
More information about the general
mailing list