[openib-general] [PATCH] cancel outstanding MADs when deregistering
Sean Hefty
mshefty at ichips.intel.com
Tue Sep 28 12:32:21 PDT 2004
This patch should allow canceling of sent MADs when deregistration occurs. This seemed a little trickier (to keep simple anyway) than I thought at first, so comments are welcome.
- Sean
--
Index: access/ib_mad_priv.h
===================================================================
--- access/ib_mad_priv.h (revision 899)
+++ access/ib_mad_priv.h (working copy)
@@ -120,7 +120,8 @@
struct ib_mad_agent *agent;
u64 wr_id; /* client WRID */
int timeout_ms;
- int is_active;
+ int refcount;
+ enum ib_wc_status status;
};
struct ib_mad_mgmt_method_table {
Index: access/ib_mad.c
===================================================================
--- access/ib_mad.c (revision 899)
+++ access/ib_mad.c (working copy)
@@ -86,7 +86,7 @@
struct ib_qp *qp);
static int ib_mad_post_receive_mads(struct ib_mad_port_private *priv);
static inline u8 convert_mgmt_class(u8 mgmt_class);
-
+static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
/*
* ib_register_mad_agent - Register to send/receive MADs
@@ -252,11 +252,11 @@
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
agent);
- /* Cleanup outstanding sends/pending receives for this agent !!! */
+ /* Cleanup pending receives for this agent !!! */
+ cancel_mads(mad_agent_priv);
spin_lock_irqsave(&mad_agent_priv->port_priv->reg_lock, flags);
remove_mad_reg_req(mad_agent_priv);
- /* Remove mad agent from port's agent list */
list_del(&mad_agent_priv->agent_list);
spin_unlock_irqrestore(&mad_agent_priv->port_priv->reg_lock, flags);
@@ -343,18 +343,21 @@
return -ENOMEM;
}
- /* Track sent MAD with agent. */
+ mad_send_wr->agent = mad_agent;
+ mad_send_wr->timeout_ms = cur_send_wr->wr.ud.timeout_ms;
+ if (mad_send_wr->timeout_ms)
+ mad_send_wr->refcount = 2;
+ else
+ mad_send_wr->refcount = 1;
+ mad_send_wr->status = IB_WC_SUCCESS;
+
+ /* Reference MAD agent until send completes. */
+ atomic_inc(&mad_agent_priv->refcount);
spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags);
list_add_tail(&mad_send_wr->agent_send_list,
&mad_agent_priv->send_list);
spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags);
- /* Reference MAD agent until send completes. */
- atomic_inc(&mad_agent_priv->refcount);
- mad_send_wr->agent = mad_agent;
- mad_send_wr->timeout_ms = cur_send_wr->wr.ud.timeout_ms;
- mad_send_wr->is_active = 1;
-
wr = *cur_send_wr;
wr.next = NULL;
@@ -368,9 +371,11 @@
list_del(&mad_send_wr->agent_send_list);
spin_unlock_irqrestore(&mad_agent_priv->send_list_lock,
flags);
+
*bad_send_wr = cur_send_wr;
if (atomic_dec_and_test(&mad_agent_priv->refcount))
wake_up(&mad_agent_priv->wait);
+
printk(KERN_NOTICE "ib_send_mad failed, ret = %d\n", ret);
return ret;
}
@@ -826,20 +831,31 @@
mad_agent_priv = container_of(mad_send_wr->agent,
struct ib_mad_agent_private, agent);
- /* Check whether timeout was requested !!! */
- mad_send_wr->is_active = 0;
+ spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags);
+ if (mad_send_wc->status != IB_WC_SUCCESS &&
+ mad_send_wr->status == IB_WC_SUCCESS) {
+
+ mad_send_wr->status = mad_send_wc->status;
+ if (mad_send_wr->timeout_ms) {
+ mad_send_wr->timeout_ms = 0;
+ mad_send_wr->refcount--;
+ }
+ }
- /* Handle RMPP... */
+ if (--mad_send_wr->refcount > 0) {
+ spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags);
+ return;
+ }
/* Remove send from MAD agent and notify client of completion. */
- spin_lock_irqsave(&mad_agent_priv->send_list_lock,
- flags);
list_del(&mad_send_wr->agent_send_list);
- spin_unlock_irqrestore(&mad_agent_priv->send_list_lock,
- flags);
+ spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags);
+
+ if (mad_send_wr->status != IB_WC_SUCCESS )
+ mad_send_wc->status = mad_send_wr->status;
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, mad_send_wc);
- /* Release reference taken when sending. */
+ /* Release reference on agent taken when sending. */
if (atomic_dec_and_test(&mad_agent_priv->refcount))
wake_up(&mad_agent_priv->wait);
@@ -935,6 +951,55 @@
}
}
+static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
+{
+ unsigned long flags;
+ struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
+ struct ib_mad_send_wc mad_send_wc;
+ struct list_head cancel_list;
+
+ INIT_LIST_HEAD(&cancel_list);
+
+ spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags);
+ list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
+ &mad_agent_priv->send_list, agent_send_list) {
+
+ if (mad_send_wr->status == IB_WC_SUCCESS)
+ mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
+
+ if (mad_send_wr->timeout_ms) {
+ mad_send_wr->timeout_ms = 0;
+ mad_send_wr->refcount--;
+ }
+
+ if (mad_send_wr->refcount <= 0) {
+ list_del(&mad_send_wr->agent_send_list);
+ list_add_tail(&mad_send_wr->agent_send_list,
+ &cancel_list);
+ }
+ }
+ spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags);
+
+ /* Report all canceled requests. */
+ mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
+ mad_send_wc.vendor_err = 0;
+
+ list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
+ &cancel_list, agent_send_list) {
+
+ mad_send_wc.wr_id = mad_send_wr->wr_id;
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ &mad_send_wc);
+
+ list_del(&mad_send_wr->agent_send_list);
+ kfree(mad_send_wr);
+
+ /* Release reference on agent taken when sending. */
+ if (atomic_dec_and_test(&mad_agent_priv->refcount))
+ wake_up(&mad_agent_priv->wait);
+ }
+}
+
/*
* IB MAD thread
*/
More information about the general
mailing list