[openib-general] [PATCH] CM: add callback for user establishment
Sean Hefty
mshefty at ichips.intel.com
Wed Feb 2 13:42:41 PST 2005
This patch invokes a callback after a user calls ib_cm_establish.
Users will either receive either an IB_CM_RTU_RECEIVED or
IB_CM_USER_ESTABLISHED event indicating that a connection has been
established on the passive side.
The only item of note is that the connection state goes to ESTABLISHED
in the call to ib_cm_establish. This is necessary to avoid timimg out
the connection from a missing RTU, but implies that a
DREQ message could be received and processed before the ESTABLISHED
callback is invoked. (In which case there will not be an ESTABLISH
callback.)
- Sean
Index: include/ib_cm.h
===================================================================
--- include/ib_cm.h (revision 1720)
+++ include/ib_cm.h (working copy)
@@ -72,6 +72,7 @@
IB_CM_REP_ERROR,
IB_CM_REP_RECEIVED,
IB_CM_RTU_RECEIVED,
+ IB_CM_USER_ESTABLISHED,
IB_CM_DREQ_ERROR,
IB_CM_DREQ_RECEIVED,
IB_CM_DREP_RECEIVED,
Index: core/cm.c
===================================================================
--- core/cm.c (revision 1720)
+++ core/cm.c (working copy)
@@ -36,6 +36,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/idr.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -133,7 +134,8 @@
struct work_struct work;
struct list_head list;
struct cm_port *port;
- struct ib_mad_recv_wc *mad_recv_wc;
+ struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
+ u32 local_id; /* Established */
struct ib_cm_event cm_event;
struct ib_sa_path_rec path[];
};
@@ -922,7 +924,6 @@
struct ib_cm_req_event_param *param;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
- work->cm_event.event = IB_CM_REQ_RECEIVED;
param = &work->cm_event.param.req_rcvd;
param->listen_id = listen_id;
param->device = cm_id_priv->av.port->mad_agent->device;
@@ -1262,7 +1263,6 @@
struct ib_cm_rep_event_param *param;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
- work->cm_event.event = IB_CM_REP_RECEIVED;
param = &work->cm_event.param.rep_rcvd;
param->remote_ca_guid = rep_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
@@ -1341,29 +1341,23 @@
return -EINVAL;
}
-static int cm_rtu_handler(struct cm_work *work)
+static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
- struct cm_rtu_msg *rtu_msg;
unsigned long flags;
u64 wr_id;
int ret;
- rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id_priv = cm_acquire_id_by_local_id(rtu_msg->remote_comm_id);
+ /* See comment in ib_cm_establish about lookup. */
+ cm_id_priv = cm_acquire_id_by_local_id(work->local_id);
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.event = IB_CM_RTU_RECEIVED;
- work->cm_event.private_data = &rtu_msg->private_data;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->id.state != IB_CM_REP_SENT &&
- cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+ if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
goto out;
}
- cm_id_priv->id.state = IB_CM_ESTABLISHED;
wr_id = (unsigned long) cm_id_priv->msg;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -1382,32 +1376,45 @@
return -EINVAL;
}
-int ib_cm_establish(struct ib_cm_id *cm_id)
+static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
+ struct cm_rtu_msg *rtu_msg;
unsigned long flags;
- int ret = 0;
+ u64 wr_id;
+ int ret;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
+ cm_id_priv = cm_acquire_id_by_local_id(rtu_msg->remote_comm_id);
+ if (!cm_id_priv)
+ return -EINVAL;
+
+ work->cm_event.private_data = &rtu_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch (cm_id->state)
- {
- case IB_CM_REP_SENT:
- case IB_CM_MRA_REP_RCVD:
- cm_id->state = IB_CM_ESTABLISHED;
- break;
- case IB_CM_ESTABLISHED:
- ret = -EISCONN;
- break;
- default:
- ret = -EINVAL;
- break;
+ if (cm_id_priv->id.state != IB_CM_REP_SENT &&
+ cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ goto out;
}
+ cm_id_priv->id.state = IB_CM_ESTABLISHED;
+
+ wr_id = (unsigned long) cm_id_priv->msg;
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return ret;
+
+ ib_cancel_mad(cm_id_priv->av.port->mad_agent, wr_id);
+ if (ret)
+ cm_process_work(cm_id_priv, work);
+ else
+ cm_deref_id(cm_id_priv);
+ return 0;
+out:
+ cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
-EXPORT_SYMBOL(ib_cm_establish);
static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
struct cm_id_private *cm_id_priv,
@@ -1557,7 +1564,6 @@
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.event = IB_CM_DREQ_RECEIVED;
work->cm_event.private_data = &dreq_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -1608,7 +1614,6 @@
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.event = IB_CM_DREP_RECEIVED;
work->cm_event.private_data = &drep_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -1960,7 +1965,6 @@
if (!cm_id_priv)
return -EINVAL;
- work->cm_event.event = IB_CM_LAP_RECEIVED;
param = &work->cm_event.param.lap_rcvd;
param->alternate_path = &work->path[0];
cm_format_path_from_lap(param->alternate_path, lap_msg);
@@ -2071,7 +2075,6 @@
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
- work->cm_event.event = IB_CM_APR_RECEIVED;
work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
@@ -2185,7 +2188,6 @@
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
- work->cm_event.event = IB_CM_SIDR_REQ_RECEIVED;
param = &work->cm_event.param.sidr_req_rcvd;
param->pkey = sidr_req_msg->pkey;
param->listen_id = listen_id;
@@ -2324,7 +2326,6 @@
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
- work->cm_event.event = IB_CM_SIDR_REP_RECEIVED;
param = &work->cm_event.param.sidr_rep_rcvd;
param->status = sidr_rep_msg->status;
param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
@@ -2465,38 +2466,41 @@
struct cm_work *work = data;
int ret;
- switch (work->mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
- case CM_REQ_ATTR_ID:
+ switch (work->cm_event.event) {
+ case IB_CM_REQ_RECEIVED:
ret = cm_req_handler(work);
break;
- case CM_MRA_ATTR_ID:
+ case IB_CM_MRA_RECEIVED:
ret = cm_mra_handler(work);
break;
- case CM_REJ_ATTR_ID:
+ case IB_CM_REJ_RECEIVED:
ret = cm_rej_handler(work);
break;
- case CM_REP_ATTR_ID:
+ case IB_CM_REP_RECEIVED:
ret = cm_rep_handler(work);
break;
- case CM_RTU_ATTR_ID:
+ case IB_CM_RTU_RECEIVED:
ret = cm_rtu_handler(work);
break;
- case CM_DREQ_ATTR_ID:
+ case IB_CM_USER_ESTABLISHED:
+ ret = cm_establish_handler(work);
+ break;
+ case IB_CM_DREQ_RECEIVED:
ret = cm_dreq_handler(work);
break;
- case CM_DREP_ATTR_ID:
+ case IB_CM_DREP_RECEIVED:
ret = cm_drep_handler(work);
break;
- case CM_SIDR_REQ_ATTR_ID:
+ case IB_CM_SIDR_REQ_RECEIVED:
ret = cm_sidr_req_handler(work);
break;
- case CM_SIDR_REP_ATTR_ID:
+ case IB_CM_SIDR_REP_RECEIVED:
ret = cm_sidr_rep_handler(work);
break;
- case CM_LAP_ATTR_ID:
+ case IB_CM_LAP_RECEIVED:
ret = cm_lap_handler(work);
break;
- case CM_APR_ATTR_ID:
+ case IB_CM_APR_RECEIVED:
ret = cm_apr_handler(work);
break;
default:
@@ -2507,23 +2511,102 @@
cm_free_work(work);
}
+int ib_cm_establish(struct ib_cm_id *cm_id)
+{
+ struct cm_id_private *cm_id_priv;
+ struct cm_work *work;
+ unsigned long flags;
+ int ret = 0;
+
+ work = kmalloc(sizeof *work, (in_atomic() || irqs_disabled()) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id->state)
+ {
+ case IB_CM_REP_SENT:
+ case IB_CM_MRA_REP_RCVD:
+ cm_id->state = IB_CM_ESTABLISHED;
+ break;
+ case IB_CM_ESTABLISHED:
+ ret = -EISCONN;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ if (ret) {
+ kfree(work);
+ goto out;
+ }
+
+ /*
+ * The CM worker thread may try to destroy the cm_id before it
+ * can execute this work item. To prevent potential deadlock,
+ * we need to find the cm_id once we're in the context of the
+ * worker thread, rather than holding a reference on it.
+ */
+ INIT_WORK(&work->work, cm_work_handler, work);
+ work->local_id = cm_id->local_id;
+ work->cm_event.event = IB_CM_USER_ESTABLISHED;
+ queue_work(cm.wq, &work->work);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(ib_cm_establish);
+
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct cm_work *work;
- int paths;
+ enum ib_cm_event_type event;
+ int paths = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
alt_local_lid != 0);
+ event = IB_CM_REQ_RECEIVED;
+ break;
+ case CM_MRA_ATTR_ID:
+ event = IB_CM_MRA_RECEIVED;
+ break;
+ case CM_REJ_ATTR_ID:
+ event = IB_CM_REJ_RECEIVED;
+ break;
+ case CM_REP_ATTR_ID:
+ event = IB_CM_REP_RECEIVED;
+ break;
+ case CM_RTU_ATTR_ID:
+ event = IB_CM_RTU_RECEIVED;
+ break;
+ case CM_DREQ_ATTR_ID:
+ event = IB_CM_DREQ_RECEIVED;
+ break;
+ case CM_DREP_ATTR_ID:
+ event = IB_CM_DREP_RECEIVED;
+ break;
+ case CM_SIDR_REQ_ATTR_ID:
+ event = IB_CM_SIDR_REQ_RECEIVED;
+ break;
+ case CM_SIDR_REP_ATTR_ID:
+ event = IB_CM_SIDR_REP_RECEIVED;
break;
case CM_LAP_ATTR_ID:
paths = 1;
+ event = IB_CM_LAP_RECEIVED;
break;
- default:
- paths = 0;
+ case CM_APR_ATTR_ID:
+ event = IB_CM_APR_RECEIVED;
break;
+ default:
+ ib_free_recv_mad(mad_recv_wc);
+ return;
}
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
@@ -2534,6 +2617,7 @@
}
INIT_WORK(&work->work, cm_work_handler, work);
+ work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = (struct cm_port *)mad_agent->context;
queue_work(cm.wq, &work->work);
More information about the general
mailing list