[openib-general] [PATCH] mad: Use thread context for callbacks due to locally handled MADs in handle_outgoing_smp
Hal Rosenstock
halr at voltaire.com
Wed Dec 15 14:23:25 PST 2004
mad: Use thread context for callbacks due to locally handled MADs in
handle_outgoing_smp
Signed-off-by: Hal Rosenstock <halr at voltaire.com>
Index: mad_priv.h
===================================================================
--- mad_priv.h (revision 1332)
+++ mad_priv.h (working copy)
@@ -114,6 +114,8 @@
struct list_head wait_list;
struct work_struct timed_work;
unsigned long timeout;
+ struct list_head local_list;
+ struct work_struct local_work;
atomic_t refcount;
wait_queue_head_t wait;
@@ -143,6 +145,15 @@
enum ib_wc_status status;
};
+struct ib_mad_local_private {
+ struct list_head completion_list;
+ struct ib_mad_private *mad_priv;
+ struct ib_send_wr send_wr;
+ struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
+ u64 wr_id; /* client WR ID */
+ u64 tid;
+};
+
struct ib_mad_mgmt_method_table {
struct ib_mad_agent_private *agent[IB_MGMT_MAX_METHODS];
};
Index: mad.c
===================================================================
--- mad.c (revision 1332)
+++ mad.c (working copy)
@@ -87,6 +87,7 @@
static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc);
static void timeout_sends(void *data);
+static void local_completions(void *data);
static int solicited_mad(struct ib_mad *mad);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
struct ib_mad_agent_private *agent_priv,
@@ -356,6 +357,9 @@
INIT_LIST_HEAD(&mad_agent_priv->send_list);
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+ INIT_LIST_HEAD(&mad_agent_priv->local_list);
+ INIT_WORK(&mad_agent_priv->local_work, local_completions,
+ mad_agent_priv);
atomic_set(&mad_agent_priv->refcount, 1);
init_waitqueue_head(&mad_agent_priv->wait);
@@ -640,9 +644,10 @@
struct ib_smp *smp,
struct ib_send_wr *send_wr)
{
- int ret;
+ int ret, alloc_flags;
+ unsigned long flags;
+ struct ib_mad_local_private *local;
struct ib_mad_private *mad_priv;
- struct ib_mad_send_wc mad_send_wc;
struct ib_device *device = mad_agent_priv->agent.device;
u8 port_num = mad_agent_priv->agent.port_num;
@@ -656,12 +661,22 @@
if (!ret || !device->process_mad)
goto out;
- mad_priv = kmem_cache_alloc(ib_mad_cache,
- (in_atomic() || irqs_disabled()) ?
- GFP_ATOMIC : GFP_KERNEL);
+ if (in_atomic() || irqs_disabled())
+ alloc_flags = GFP_ATOMIC;
+ else
+ alloc_flags = GFP_KERNEL;
+ local = kmalloc(sizeof *local, alloc_flags);
+ if (!local) {
+ ret = -ENOMEM;
+ printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
+ goto out;
+ }
+ local->mad_priv = NULL;
+ mad_priv = kmem_cache_alloc(ib_mad_cache, alloc_flags);
if (!mad_priv) {
ret = -ENOMEM;
printk(KERN_ERR PFX "No memory for local response MAD\n");
+ kfree(local);
goto out;
}
ret = device->process_mad(device, 0, port_num, smp->dr_slid,
@@ -675,39 +690,9 @@
* there is a recv handler
*/
if (solicited_mad(&mad_priv->mad.mad) &&
- mad_agent_priv->agent.recv_handler) {
- struct ib_wc wc;
-
- /*
- * Defined behavior is to complete response
- * before request
- */
- wc.wr_id = send_wr->wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
- wc.byte_len = sizeof(struct ib_mad);
- wc.src_qp = IB_QP0;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = IB_LID_PERMISSIVE;
- wc.sl = 0;
- wc.dlid_path_bits = 0;
- mad_priv->header.recv_wc.wc = &wc;
- mad_priv->header.recv_wc.mad_len =
- sizeof(struct ib_mad);
- INIT_LIST_HEAD(&mad_priv->header.recv_wc.recv_buf.list);
- mad_priv->header.recv_wc.recv_buf.grh = NULL;
- mad_priv->header.recv_wc.recv_buf.mad =
- &mad_priv->mad.mad;
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
- snoop_recv(mad_agent_priv->qp_info,
- &mad_priv->header.recv_wc,
- IB_MAD_SNOOP_RECVS);
- mad_agent_priv->agent.recv_handler(
- &mad_agent_priv->agent,
- &mad_priv->header.recv_wc);
- } else
+ mad_agent_priv->agent.recv_handler)
+ local->mad_priv = mad_priv;
+ else
kmem_cache_free(ib_mad_cache, mad_priv);
break;
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
@@ -715,23 +700,31 @@
break;
case IB_MAD_RESULT_SUCCESS:
kmem_cache_free(ib_mad_cache, mad_priv);
+ kfree(local);
ret = 0;
goto out;
default:
kmem_cache_free(ib_mad_cache, mad_priv);
+ kfree(local);
ret = -EINVAL;
goto out;
}
- /* Complete send */
- mad_send_wc.status = IB_WC_SUCCESS;
- mad_send_wc.vendor_err = 0;
- mad_send_wc.wr_id = send_wr->wr_id;
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
- snoop_send(mad_agent_priv->qp_info, send_wr, &mad_send_wc,
- IB_MAD_SNOOP_SEND_COMPLETIONS);
- mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
- &mad_send_wc);
+ local->send_wr = *send_wr;
+ local->send_wr.sg_list = local->sg_list;
+ memcpy(local->sg_list, send_wr->sg_list,
+ sizeof *send_wr->sg_list * send_wr->num_sge);
+ local->send_wr.next = NULL;
+ local->tid = send_wr->wr.ud.mad_hdr->tid;
+ local->wr_id = send_wr->wr_id;
+ /* Reference MAD agent until local completion handled */
+ atomic_inc(&mad_agent_priv->refcount);
+ /* Queue local completion to local list */
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ queue_work(mad_agent_priv->qp_info->port_priv->wq,
+ &mad_agent_priv->local_work);
ret = 1;
out:
return ret;
@@ -2020,6 +2013,73 @@
}
EXPORT_SYMBOL(ib_cancel_mad);
+static void local_completions(void *data)
+{
+ struct ib_mad_agent_private *mad_agent_priv;
+ struct ib_mad_local_private *local;
+ unsigned long flags;
+ struct ib_wc wc;
+ struct ib_mad_send_wc mad_send_wc;
+
+ mad_agent_priv = (struct ib_mad_agent_private *)data;
+
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ while (!list_empty(&mad_agent_priv->local_list)) {
+ local = list_entry(mad_agent_priv->local_list.next,
+ struct ib_mad_local_private,
+ completion_list);
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ if (local->mad_priv) {
+ /*
+ * Defined behavior is to complete response
+ * before request
+ */
+ wc.wr_id = local->wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.byte_len = sizeof(struct ib_mad);
+ wc.src_qp = IB_QP0;
+ wc.wc_flags = 0;
+ wc.pkey_index = 0;
+ wc.slid = IB_LID_PERMISSIVE;
+ wc.sl = 0;
+ wc.dlid_path_bits = 0;
+ local->mad_priv->header.recv_wc.wc = &wc;
+ local->mad_priv->header.recv_wc.mad_len =
+ sizeof(struct ib_mad);
+ INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list);
+ local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
+ local->mad_priv->header.recv_wc.recv_buf.mad =
+ &local->mad_priv->mad.mad;
+ if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
+ snoop_recv(mad_agent_priv->qp_info,
+ &local->mad_priv->header.recv_wc,
+ IB_MAD_SNOOP_RECVS);
+ mad_agent_priv->agent.recv_handler(
+ &mad_agent_priv->agent,
+ &local->mad_priv->header.recv_wc);
+ }
+
+ /* Complete send */
+ mad_send_wc.status = IB_WC_SUCCESS;
+ mad_send_wc.vendor_err = 0;
+ mad_send_wc.wr_id = local->wr_id;
+ if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
+ snoop_send(mad_agent_priv->qp_info, &local->send_wr,
+ &mad_send_wc,
+ IB_MAD_SNOOP_SEND_COMPLETIONS);
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ &mad_send_wc);
+
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ list_del(&local->completion_list);
+ atomic_dec(&mad_agent_priv->refcount);
+ kfree(local);
+ }
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+}
+
static void timeout_sends(void *data)
{
struct ib_mad_agent_private *mad_agent_priv;
More information about the general
mailing list