[openib-general] [PATCH] ib_mad: Better handling of send and receive handlers
Hal Rosenstock
halr at voltaire.com
Thu Oct 14 12:32:01 PDT 2004
ib_mad: Better handling of send and receive handlers
Index: ib_mad.c
===================================================================
--- ib_mad.c (revision 1004)
+++ ib_mad.c (working copy)
@@ -127,17 +127,9 @@
ret = ERR_PTR(-EINVAL);
goto error1;
}
- if (!bitmap_empty(mad_reg_req->method_mask,
- IB_MGMT_MAX_METHODS)) {
- if (!recv_handler) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- } else {
- if (!send_handler) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
+ if (!recv_handler) {
+ ret = ERR_PTR(-EINVAL);
+ goto error1;
}
if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
/*
@@ -351,6 +343,12 @@
return -EINVAL;
}
+ if (!mad_agent->send_handler ||
+ (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler)) {
+ *bad_send_wr = cur_send_wr;
+ return -EINVAL;
+ }
+
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
agent);
port_priv = mad_agent_priv->port_priv;
@@ -758,6 +756,14 @@
}
ret:
+ if (!mad_agent->agent.recv_handler) {
+ printk(KERN_ERR PFX "No receive handler for client "
+ "0x%x on port %d\n",
+ (unsigned int)&mad_agent->agent,
+ port_priv->port_num);
+ mad_agent = NULL;
+ }
+
return mad_agent;
}
@@ -854,8 +860,7 @@
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
/* Defined behavior is to complete response before request */
- if (mad_agent_priv->agent.recv_handler)
- mad_agent_priv->agent.recv_handler(
+ mad_agent_priv->agent.recv_handler(
&mad_agent_priv->agent,
&recv->header.recv_wc);
atomic_dec(&mad_agent_priv->refcount);
@@ -865,8 +870,7 @@
mad_send_wc.wr_id = mad_send_wr->wr_id;
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
} else {
- if (mad_agent_priv->agent.recv_handler)
- mad_agent_priv->agent.recv_handler(
+ mad_agent_priv->agent.recv_handler(
&mad_agent_priv->agent,
&recv->header.recv_wc);
if (atomic_dec_and_test(&mad_agent_priv->refcount))
@@ -967,7 +971,7 @@
}
ret:
- if (!mad_agent || !mad_agent->agent.recv_handler) {
+ if (!mad_agent) {
/* Should this case be optimized ? */
kmem_cache_free(ib_mad_cache, recv);
}
@@ -1033,9 +1037,8 @@
if (mad_send_wr->status != IB_WC_SUCCESS )
mad_send_wc->status = mad_send_wr->status;
- if (mad_agent_priv->agent.send_handler)
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- mad_send_wc);
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ mad_send_wc);
/* Release reference on agent taken when sending */
if (atomic_dec_and_test(&mad_agent_priv->refcount))
@@ -1148,9 +1151,7 @@
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
&cancel_list, agent_list) {
mad_send_wc.wr_id = mad_send_wr->wr_id;
- if (mad_agent_priv->agent.send_handler)
- mad_agent_priv->agent.send_handler(
- &mad_agent_priv->agent,
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
list_del(&mad_send_wr->agent_list);
@@ -1211,9 +1212,8 @@
mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
mad_send_wc.vendor_err = 0;
mad_send_wc.wr_id = mad_send_wr->wr_id;
- if (mad_agent_priv->agent.send_handler)
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ &mad_send_wc);
kfree(mad_send_wr);
if (atomic_dec_and_test(&mad_agent_priv->refcount))
More information about the general
mailing list