[openib-general] [PATCH] MAD snooping API/implementation
Sean Hefty
mshefty at ichips.intel.com
Thu Dec 9 13:50:49 PST 2004
Here's a patch that adds in the ability to snoop MADs. Currently only send and
receive completions are snooped, but the implementation should be general enough
to add in snooping to other areas fairly easily.
- Sean
Index: core/mad.c
===================================================================
--- core/mad.c (revision 1316)
+++ core/mad.c (working copy)
@@ -367,17 +367,129 @@
}
EXPORT_SYMBOL(ib_register_mad_agent);
-/*
- * ib_unregister_mad_agent - Unregisters a client from using MAD services
- */
-int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
+static inline int is_snooping_sends(int mad_snoop_flags)
{
- struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_port_private *port_priv;
+ return (mad_snoop_flags &
+ (/*IB_MAD_SNOOP_POSTED_SENDS |
+ IB_MAD_SNOOP_RMPP_SENDS |*/
+ IB_MAD_SNOOP_SEND_COMPLETIONS /*|
+ IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
+}
+
+static inline int is_snooping_recvs(int mad_snoop_flags)
+{
+ return (mad_snoop_flags &
+ (IB_MAD_SNOOP_RECVS /*|
+ IB_MAD_SNOOP_RMPP_RECVS*/));
+}
+
+static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
+ struct ib_mad_snoop_private *mad_snoop_priv)
+{
+ struct ib_mad_snoop_private **new_snoop_table;
unsigned long flags;
+ int i;
- mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
- agent);
+ spin_lock_irqsave(&qp_info->snoop_lock, flags);
+ /* Check for empty slot in array. */
+ for (i = 0; i < qp_info->snoop_table_size; i++)
+ if (!qp_info->snoop_table[i])
+ break;
+
+ if (i == qp_info->snoop_table_size) {
+ /* Grow table. */
+ new_snoop_table = kmalloc(sizeof mad_snoop_priv *
+ qp_info->snoop_table_size + 1,
+ GFP_ATOMIC);
+ if (!new_snoop_table) {
+ i = -ENOMEM;
+ goto out;
+ }
+ if (qp_info->snoop_table) {
+ memcpy(new_snoop_table, qp_info->snoop_table,
+ sizeof mad_snoop_priv *
+ qp_info->snoop_table_size);
+ kfree(qp_info->snoop_table);
+ }
+ qp_info->snoop_table = new_snoop_table;
+ qp_info->snoop_table_size++;
+ }
+ qp_info->snoop_table[i] = mad_snoop_priv;
+ atomic_inc(&qp_info->snoop_count);
+out:
+ spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
+ return i;
+}
+
+struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
+ u8 port_num,
+ enum ib_qp_type qp_type,
+ int mad_snoop_flags,
+ ib_mad_snoop_handler snoop_handler,
+ ib_mad_recv_handler recv_handler,
+ void *context)
+{
+ struct ib_mad_port_private *port_priv;
+ struct ib_mad_agent *ret;
+ struct ib_mad_snoop_private *mad_snoop_priv;
+ int qpn;
+
+ /* Validate parameters */
+ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
+ (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
+ ret = ERR_PTR(-EINVAL);
+ goto error1;
+ }
+ qpn = get_spl_qp_index(qp_type);
+ if (qpn == -1) {
+ ret = ERR_PTR(-EINVAL);
+ goto error1;
+ }
+ port_priv = ib_get_mad_port(device, port_num);
+ if (!port_priv) {
+ ret = ERR_PTR(-ENODEV);
+ goto error1;
+ }
+ /* Allocate structures */
+ mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
+ if (!mad_snoop_priv) {
+ ret = ERR_PTR(-ENOMEM);
+ goto error1;
+ }
+
+ /* Now, fill in the various structures */
+ memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
+ mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
+ mad_snoop_priv->agent.device = device;
+ mad_snoop_priv->agent.recv_handler = recv_handler;
+ mad_snoop_priv->agent.snoop_handler = snoop_handler;
+ mad_snoop_priv->agent.context = context;
+ mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
+ mad_snoop_priv->agent.port_num = port_num;
+ mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
+ init_waitqueue_head(&mad_snoop_priv->wait);
+ mad_snoop_priv->snoop_index = register_snoop_agent(
+ &port_priv->qp_info[qpn],
+ mad_snoop_priv);
+ if (mad_snoop_priv->snoop_index < 0) {
+ ret = ERR_PTR(mad_snoop_priv->snoop_index);
+ goto error2;
+ }
+
+ atomic_set(&mad_snoop_priv->refcount, 1);
+ return &mad_snoop_priv->agent;
+
+error2:
+ kfree(mad_snoop_priv);
+error1:
+ return ret;
+}
+EXPORT_SYMBOL(ib_register_mad_snoop);
+
+static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
+{
+ struct ib_mad_port_private *port_priv;
+ unsigned long flags;
/* Note that we could still be handling received MADs */
@@ -405,6 +517,46 @@
if (mad_agent_priv->reg_req)
kfree(mad_agent_priv->reg_req);
kfree(mad_agent_priv);
+}
+
+static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
+{
+ struct ib_mad_qp_info *qp_info;
+ unsigned long flags;
+
+ qp_info = mad_snoop_priv->qp_info;
+ spin_lock_irqsave(&qp_info->snoop_lock, flags);
+ qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
+ atomic_dec(&qp_info->snoop_count);
+ spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
+
+ atomic_dec(&mad_snoop_priv->refcount);
+ wait_event(mad_snoop_priv->wait,
+ !atomic_read(&mad_snoop_priv->refcount));
+
+ kfree(mad_snoop_priv);
+}
+
+/*
+ * ib_unregister_mad_agent - Unregisters a client from using MAD services
+ */
+int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
+{
+ struct ib_mad_agent_private *mad_agent_priv;
+ struct ib_mad_snoop_private *mad_snoop_priv;
+
+ /* If the TID is zero, the agent can only snoop. */
+ if (mad_agent->hi_tid) {
+ mad_agent_priv = container_of(mad_agent,
+ struct ib_mad_agent_private,
+ agent);
+ unregister_mad_agent(mad_agent_priv);
+ } else {
+ mad_snoop_priv = container_of(mad_agent,
+ struct ib_mad_snoop_private,
+ agent);
+ unregister_mad_snoop(mad_snoop_priv);
+ }
return 0;
}
EXPORT_SYMBOL(ib_unregister_mad_agent);
@@ -422,30 +574,82 @@
spin_unlock_irqrestore(&mad_queue->lock, flags);
}
+static void snoop_send(struct ib_mad_qp_info *qp_info,
+ struct ib_send_wr *send_wr,
+ struct ib_mad_send_wc *mad_send_wc,
+ int mad_snoop_flags)
+{
+ struct ib_mad_snoop_private *mad_snoop_priv;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&qp_info->snoop_lock, flags);
+ for (i = 0; i < qp_info->snoop_table_size; i++) {
+ mad_snoop_priv = qp_info->snoop_table[i];
+ if (!mad_snoop_priv ||
+ !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
+ continue;
+
+ atomic_inc(&mad_snoop_priv->refcount);
+ spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
+ mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
+ send_wr, mad_send_wc);
+ if (atomic_dec_and_test(&mad_snoop_priv->refcount))
+ wake_up(&mad_snoop_priv->wait);
+ spin_lock_irqsave(&qp_info->snoop_lock, flags);
+ }
+ spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
+}
+
+static void snoop_recv(struct ib_mad_qp_info *qp_info,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ int mad_snoop_flags)
+{
+ struct ib_mad_snoop_private *mad_snoop_priv;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&qp_info->snoop_lock, flags);
+ for (i = 0; i < qp_info->snoop_table_size; i++) {
+ mad_snoop_priv = qp_info->snoop_table[i];
+ if (!mad_snoop_priv ||
+ !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
+ continue;
+
+ atomic_inc(&mad_snoop_priv->refcount);
+ spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
+ mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
+ mad_recv_wc);
+ if (atomic_dec_and_test(&mad_snoop_priv->refcount))
+ wake_up(&mad_snoop_priv->wait);
+ spin_lock_irqsave(&qp_info->snoop_lock, flags);
+ }
+ spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
+}
+
/*
* Return 0 if SMP is to be sent
* Return 1 if SMP was consumed locally (whether or not solicited)
* Return < 0 if error
*/
-static int handle_outgoing_smp(struct ib_mad_agent *mad_agent,
+static int handle_outgoing_smp(struct ib_mad_agent_private *mad_agent_priv,
struct ib_smp *smp,
struct ib_send_wr *send_wr)
{
int ret;
struct ib_mad_private *mad_priv;
struct ib_mad_send_wc mad_send_wc;
+ struct ib_device *device = mad_agent_priv->agent.device;
+ u8 port_num = mad_agent_priv->agent.port_num;
- if (!smi_handle_dr_smp_send(smp,
- mad_agent->device->node_type,
- mad_agent->port_num)) {
+ if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
ret = -EINVAL;
printk(KERN_ERR PFX "Invalid directed route\n");
goto out;
}
/* Check to post send on QP or process locally */
- ret = smi_check_local_dr_smp(smp, mad_agent->device,
- mad_agent->port_num);
- if (!ret || !mad_agent->device->process_mad)
+ ret = smi_check_local_dr_smp(smp, device, port_num);
+ if (!ret || !device->process_mad)
goto out;
mad_priv = kmem_cache_alloc(ib_mad_cache,
@@ -456,10 +660,9 @@
printk(KERN_ERR PFX "No memory for local response MAD\n");
goto out;
}
- ret = mad_agent->device->process_mad(mad_agent->device, 0,
- mad_agent->port_num, smp->dr_slid,
- (struct ib_mad *)smp,
- (struct ib_mad *)&mad_priv->mad);
+ ret = device->process_mad(device, 0, port_num, smp->dr_slid,
+ (struct ib_mad *)smp,
+ (struct ib_mad *)&mad_priv->mad);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -468,7 +671,7 @@
* there is a recv handler
*/
if (solicited_mad(&mad_priv->mad.mad) &&
- mad_agent->recv_handler) {
+ mad_agent_priv->agent.recv_handler) {
struct ib_wc wc;
/*
@@ -494,7 +697,12 @@
mad_priv->header.recv_buf.mad = &mad_priv->mad.mad;
mad_priv->header.recv_wc.recv_buf =
&mad_priv->header.recv_buf;
- mad_agent->recv_handler(mad_agent,
+ if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
+ snoop_recv(mad_agent_priv->qp_info,
+ &mad_priv->header.recv_wc,
+ IB_MAD_SNOOP_RECVS);
+ mad_agent_priv->agent.recv_handler(
+ &mad_agent_priv->agent,
&mad_priv->header.recv_wc);
} else
kmem_cache_free(ib_mad_cache, mad_priv);
@@ -516,7 +724,11 @@
mad_send_wc.status = IB_WC_SUCCESS;
mad_send_wc.vendor_err = 0;
mad_send_wc.wr_id = send_wr->wr_id;
- mad_agent->send_handler(mad_agent, &mad_send_wc);
+ if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
+ snoop_send(mad_agent_priv->qp_info, send_wr, &mad_send_wc,
+ IB_MAD_SNOOP_SEND_COMPLETIONS);
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ &mad_send_wc);
ret = 1;
out:
return ret;
@@ -610,7 +822,7 @@
smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr;
if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- ret = handle_outgoing_smp(mad_agent, smp, send_wr);
+ ret = handle_outgoing_smp(mad_agent_priv, smp, send_wr);
if (ret < 0) /* error */
goto error2;
else if (ret == 1) /* locally consumed */
@@ -1383,6 +1595,9 @@
recv->header.recv_buf.mad = (struct ib_mad *)&recv->mad;
recv->header.recv_buf.grh = &recv->grh;
+ if (atomic_read(&qp_info->snoop_count))
+ snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
+
/* Validate MAD */
if (!validate_mad(recv->header.recv_buf.mad, qp_info->qp->qp_num))
goto out;
@@ -1600,7 +1815,11 @@
/* Restore client wr_id in WC and complete send */
wc->wr_id = mad_send_wr->wr_id;
- ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc*)wc);
+ if (atomic_read(&qp_info->snoop_count))
+ snoop_send(qp_info, &mad_send_wr->send_wr,
+ (struct ib_mad_send_wc *)wc,
+ IB_MAD_SNOOP_SEND_COMPLETIONS);
+ ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc);
if (queued_send_wr) {
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
@@ -2068,6 +2287,10 @@
init_mad_queue(qp_info, &qp_info->send_queue);
init_mad_queue(qp_info, &qp_info->recv_queue);
INIT_LIST_HEAD(&qp_info->overflow_list);
+ spin_lock_init(&qp_info->snoop_lock);
+ qp_info->snoop_table = NULL;
+ qp_info->snoop_table_size = 0;
+ atomic_set(&qp_info->snoop_count, 0);
}
static int create_mad_qp(struct ib_mad_qp_info *qp_info,
@@ -2108,6 +2331,8 @@
static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
{
ib_destroy_qp(qp_info->qp);
+ if (qp_info->snoop_table)
+ kfree(qp_info->snoop_table);
}
/*
Index: core/mad_priv.h
===================================================================
--- core/mad_priv.h (revision 1316)
+++ core/mad_priv.h (working copy)
@@ -121,6 +121,15 @@
u8 rmpp_version;
};
+struct ib_mad_snoop_private {
+ struct ib_mad_agent agent;
+ struct ib_mad_qp_info *qp_info;
+ int snoop_index;
+ int mad_snoop_flags;
+ atomic_t refcount;
+ wait_queue_head_t wait;
+};
+
struct ib_mad_send_wr_private {
struct ib_mad_list_head mad_list;
struct list_head agent_list;
@@ -171,6 +180,10 @@
struct ib_mad_queue send_queue;
struct ib_mad_queue recv_queue;
struct list_head overflow_list;
+ spinlock_t snoop_lock;
+ struct ib_mad_snoop_private **snoop_table;
+ int snoop_table_size;
+ atomic_t snoop_count;
};
struct ib_mad_port_private {
Index: include/ib_mad.h
===================================================================
--- include/ib_mad.h (revision 1316)
+++ include/ib_mad.h (working copy)
@@ -126,13 +126,29 @@
struct ib_mad_send_wc *mad_send_wc);
/**
+ * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
+ * @mad_agent: MAD agent that snooped the MAD.
+ * @send_wr: Work request information on the sent MAD.
+ * @mad_send_wc: Work completion information on the sent MAD. Valid
+ * only for snooping that occurs on a send completion.
+ *
+ * Clients snooping MADs should not modify data referenced by the @send_wr
+ * or @mad_send_wc.
+ */
+typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
+ struct ib_send_wr *send_wr,
+ struct ib_mad_send_wc *mad_send_wc);
+
+/**
* ib_mad_recv_handler - callback handler for a received MAD.
* @mad_agent: MAD agent requesting the received MAD.
* @mad_recv_wc: Received work completion information on the received MAD.
*
* MADs received in response to a send request operation will be handed to
* the user after the send operation completes. All data buffers given
- * to the user through this routine are owned by the receiving client.
+ * to registered agents through this routine are owned by the receiving
+ * client, except for snooping agents. Clients snooping MADs should not
+ * modify the data referenced by @mad_recv_wc.
*/
typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc);
@@ -143,6 +159,7 @@
* @qp: Reference to QP used for sending and receiving MADs.
* @recv_handler: Callback handler for a received MAD.
* @send_handler: Callback handler for a sent MAD.
+ * @snoop_handler: Callback handler for snooped sent MADs.
* @context: User-specified context associated with this registration.
* @hi_tid: Access layer assigned transaction ID for this client.
* Unsolicited MADs sent by this client will have the upper 32-bits
@@ -154,6 +171,7 @@
struct ib_qp *qp;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
+ ib_mad_snoop_handler snoop_handler;
void *context;
u32 hi_tid;
u8 port_num;
@@ -247,6 +265,35 @@
ib_mad_recv_handler recv_handler,
void *context);
+enum ib_mad_snoop_flags {
+ /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
+ /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
+ IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
+ /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
+ IB_MAD_SNOOP_RECVS = (1<<4)
+ /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
+ /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
+};
+
+/**
+ * ib_register_mad_snoop - Register to snoop sent and received MADs.
+ * @device: The device to register with.
+ * @port_num: The port on the specified device to use.
+ * @qp_type: Specifies which QP traffic to snoop. Must be either
+ * IB_QPT_SMI or IB_QPT_GSI.
+ * @mad_snoop_flags: Specifies information where snooping occurs.
+ * @send_handler: The callback routine invoked for a snooped send.
+ * @recv_handler: The callback routine invoked for a snooped receive.
+ * @context: User specified context associated with the registration.
+ */
+struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
+ u8 port_num,
+ enum ib_qp_type qp_type,
+ int mad_snoop_flags,
+ ib_mad_snoop_handler snoop_handler,
+ ib_mad_recv_handler recv_handler,
+ void *context);
+
/**
* ib_unregister_mad_agent - Unregisters a client from using MAD services.
* @mad_agent: Corresponding MAD registration request to deregister.
More information about the general
mailing list