[openib-general] [PATCH] CM: serialize callbacks to a single client
Sean Hefty
mshefty at ichips.intel.com
Sat Jan 22 15:28:11 PST 2005
Hopefully, this patch will serialize all callbacks to a single CM client.
signed-off-by: Sean Hefty <sean.hefty at intel.com>
Index: core/cm.c
===================================================================
--- core/cm.c (revision 1624)
+++ core/cm.c (working copy)
@@ -111,10 +111,14 @@ struct cm_id_private {
u8 max_cm_retries;
u8 passive;
u8 peer_to_peer;
+
+ struct list_head work_list;
+ atomic_t work_count;
};
-struct cm_recv_work {
+struct cm_work {
struct work_struct work;
+ struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc;
struct ib_cm_event cm_event;
@@ -505,6 +509,8 @@ struct ib_cm_id *ib_create_cm_id(ib_cm_h
spin_lock_init(&cm_id_priv->lock);
init_waitqueue_head(&cm_id_priv->wait);
+ INIT_LIST_HEAD(&cm_id_priv->work_list);
+ atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
@@ -514,9 +520,28 @@ error:
}
EXPORT_SYMBOL(ib_create_cm_id);
+static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
+{
+ struct cm_work *work;
+
+ if (list_empty(&cm_id_priv->work_list))
+ return NULL;
+
+ work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
+ list_del(&work->list);
+ return work;
+}
+
+static void cm_free_work(struct cm_work *work)
+{
+ ib_free_recv_mad(work->mad_recv_wc);
+ kfree(work);
+}
+
int ib_destroy_cm_id(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
+ struct cm_work *work;
unsigned long flags;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -575,6 +600,8 @@ retest:
cm_free_id(cm_id->local_id);
atomic_dec(&cm_id_priv->refcount);
wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
+ while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
+ cm_free_work(work);
kfree(cm_id_priv);
return 0;
}
@@ -823,7 +850,7 @@ static inline void cm_format_paths_from_
primary_path->packet_life_time_selector = IB_SA_EQ;
primary_path->packet_life_time =
cm_req_get_primary_local_ack_timeout(req_msg);
-
+
if (alt_path) {
memset(alt_path, 0, sizeof *alt_path);
alt_path->dgid = req_msg->alt_local_gid;
@@ -846,22 +873,81 @@ static inline void cm_format_paths_from_
}
}
-static void cm_req_handler(struct cm_recv_work *recv_work)
+static void cm_format_req_event(struct cm_work *work,
+ struct ib_cm_id *listen_id)
+{
+ struct cm_req_msg *req_msg;
+ struct ib_cm_req_event_param *param;
+
+ req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
+ work->cm_event.event = IB_CM_REQ_RECEIVED;
+ param = &work->cm_event.param.req_rcvd;
+ param->listen_id = listen_id;
+ param->device = work->port->mad_agent->device;
+ param->port = work->port->port_num;
+ param->primary_path = &work->path[0];
+ if (req_msg->alt_local_lid)
+ param->alternate_path = &work->path[1];
+ else
+ param->alternate_path = NULL;
+ cm_format_paths_from_req(param->primary_path, param->alternate_path,
+ req_msg);
+ param->remote_ca_guid = req_msg->local_ca_guid;
+ param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
+ param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
+ param->qp_type = cm_req_get_qp_type(req_msg);
+ param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
+ param->responder_resources = cm_req_get_resp_res(req_msg);
+ param->initiator_depth = cm_req_get_init_depth(req_msg);
+ param->local_cm_response_timeout =
+ cm_req_get_remote_resp_timeout(req_msg);
+ param->flow_control = cm_req_get_flow_ctrl(req_msg);
+ param->remote_cm_response_timeout =
+ cm_req_get_local_resp_timeout(req_msg);
+ param->retry_count = cm_req_get_retry_count(req_msg);
+ param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
+ param->srq = cm_req_get_srq(req_msg);
+ work->cm_event.private_data = &req_msg->private_data;
+}
+
+static void cm_process_work(struct cm_id_private *cm_id_priv,
+ struct cm_work *work)
+{
+ unsigned long flags;
+ int ret;
+
+ /* We will typically only have the current event to report. */
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
+ cm_free_work(work);
+
+ while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ work = cm_dequeue_work(cm_id_priv);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ BUG_ON(!work);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
+ &work->cm_event);
+ cm_free_work(work);
+ }
+ cm_deref_id(cm_id_priv);
+ if (ret)
+ ib_destroy_cm_id(&cm_id_priv->id);
+}
+
+static int cm_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_req_msg *req_msg;
unsigned long flags;
- struct ib_cm_req_event_param *param;
struct ib_wc *wc;
- int ret;
cm_id = ib_create_cm_id(NULL, NULL);
if (IS_ERR(cm_id))
- return;
+ return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- req_msg = (struct cm_req_msg *)recv_work->mad_recv_wc->recv_buf.mad;
+ req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv->remote_ca_guid = req_msg->local_ca_guid;
cm_id_priv->id.remote_id = req_msg->local_comm_id;
cm_id_priv->passive = 1;
@@ -878,6 +964,9 @@ static void cm_req_handler(struct cm_rec
cur_cm_id_priv = cm_find_listen(req_msg->service_id);
if (cur_cm_id_priv) {
atomic_inc(&cur_cm_id_priv->refcount);
+ atomic_inc(&cm_id_priv->refcount);
+ cm_id_priv->id.state = IB_CM_REQ_RCVD;
+ atomic_inc(&cm_id_priv->work_count);
cm_insert_remote_id(cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
@@ -885,7 +974,6 @@ static void cm_req_handler(struct cm_rec
cm_id_priv->id.context = cur_cm_id_priv->id.context;
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = ~0ULL;
- cm_id_priv->id.state = IB_CM_REQ_RCVD;
} else {
/* Search for a peer request. */
/* todo: fix peer-to-peer */
@@ -894,7 +982,7 @@ static void cm_req_handler(struct cm_rec
/* todo: reject with no match */
goto out;
}
- if (cm_is_active_peer(recv_work->port->ca_guid,
+ if (cm_is_active_peer(work->port->ca_guid,
req_msg->local_ca_guid,
cur_cm_id_priv->local_qpn,
cm_req_get_local_qpn(req_msg))) {
@@ -910,56 +998,29 @@ static void cm_req_handler(struct cm_rec
cm_insert_remote_id(cur_cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
- ib_cancel_mad(recv_work->port->mad_agent,
+ ib_cancel_mad(work->port->mad_agent,
(unsigned long) cur_cm_id_priv->msg);
ib_destroy_cm_id(&cm_id_priv->id);
cm_id_priv = cur_cm_id_priv;
}
- cm_id_priv->port = recv_work->port;
+ cm_id_priv->port = work->port;
cm_id_priv->timeout_ms = cm_convert_to_ms(
cm_req_get_local_resp_timeout(req_msg));
cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->remote_port_gid = req_msg->primary_local_gid;
- wc = recv_work->mad_recv_wc->wc;
+ wc = work->mad_recv_wc->wc;
cm_id_priv->pkey_index = wc->pkey_index;
- cm_set_ah_attr(&cm_id_priv->ah_attr, recv_work->port->port_num,
+ cm_set_ah_attr(&cm_id_priv->ah_attr, work->port->port_num,
wc->slid, wc->sl, wc->dlid_path_bits);
- recv_work->cm_event.event = IB_CM_REQ_RECEIVED;
- param = &recv_work->cm_event.param.req_rcvd;
- param->listen_id = &cur_cm_id_priv->id;
- param->device = recv_work->port->mad_agent->device;
- param->port = recv_work->port->port_num;
- param->primary_path = &recv_work->path[0];
- if (req_msg->alt_local_lid)
- param->alternate_path = &recv_work->path[1];
- else
- param->alternate_path = NULL;
- cm_format_paths_from_req(param->primary_path, param->alternate_path,
- req_msg);
- param->remote_ca_guid = req_msg->local_ca_guid;
- param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
- param->remote_qpn = be32_to_cpu(cm_id_priv->remote_qpn);
- param->qp_type = cm_req_get_qp_type(req_msg);
- param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
- param->responder_resources = cm_req_get_resp_res(req_msg);
- param->initiator_depth = cm_req_get_init_depth(req_msg);
- param->local_cm_response_timeout =
- cm_req_get_remote_resp_timeout(req_msg);
- param->flow_control = cm_req_get_flow_ctrl(req_msg);
- param->remote_cm_response_timeout =
- cm_req_get_local_resp_timeout(req_msg);
- param->retry_count = cm_req_get_retry_count(req_msg);
- param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
- param->srq = cm_req_get_srq(req_msg);
- recv_work->cm_event.private_data = &req_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
+ cm_format_req_event(work, &cur_cm_id_priv->id);
+ cm_process_work(cm_id_priv, work);
cm_deref_id(cur_cm_id_priv);
- if (!ret)
- return;
+ return 0;
out:
ib_destroy_cm_id(&cm_id_priv->id);
+ return -EINVAL;
}
static void cm_format_rep(struct cm_rep_msg *rep_msg,
@@ -1120,18 +1181,42 @@ out:
}
EXPORT_SYMBOL(ib_send_cm_rtu);
-static void cm_rep_handler(struct cm_recv_work *recv_work)
+static void cm_format_rep_event(struct cm_work *work)
{
- struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
struct ib_cm_rep_event_param *param;
+
+ rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
+ work->cm_event.event = IB_CM_REP_RECEIVED;
+ param = &work->cm_event.param.rep_rcvd;
+ param->remote_ca_guid = rep_msg->local_ca_guid;
+ param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
+ param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
+ param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
+ param->responder_resources = rep_msg->resp_resources;
+ param->initiator_depth = rep_msg->initiator_depth;
+ param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
+ param->failover_accepted = cm_rep_get_failover(rep_msg);
+ param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
+ param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
+ param->srq = cm_rep_get_srq(rep_msg);
+ work->cm_event.private_data = &rep_msg->private_data;
+}
+
+static int cm_rep_handler(struct cm_work *work)
+{
+ struct cm_id_private *cm_id_priv;
+ struct cm_rep_msg *rep_msg;
unsigned long flags;
+ u64 wr_id;
int ret;
- rep_msg = (struct cm_rep_msg *)recv_work->mad_recv_wc->recv_buf.mad;
+ rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(rep_msg->remote_comm_id);
if (!cm_id_priv)
- return;
+ return -EINVAL;
+
+ cm_format_rep_event(work);
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
@@ -1147,10 +1232,9 @@ static void cm_rep_handler(struct cm_rec
goto out;
}
cm_id_priv->id.state = IB_CM_REP_RCVD;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-
- ib_cancel_mad(recv_work->port->mad_agent,
- (unsigned long) cm_id_priv->msg);
+ cm_id_priv->id.remote_id = rep_msg->local_comm_id;
+ cm_id_priv->remote_ca_guid = rep_msg->local_ca_guid;
+ cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
/* todo: handle peer_to_peer
if (cm_id_priv->peer_to_peer) {
@@ -1161,44 +1245,38 @@ static void cm_rep_handler(struct cm_rec
}
*/
- cm_id_priv->id.remote_id = rep_msg->local_comm_id;
- cm_id_priv->remote_ca_guid = rep_msg->local_ca_guid;
- cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
+ wr_id = (unsigned long) cm_id_priv->msg;
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- recv_work->cm_event.event = IB_CM_REP_RECEIVED;
- param = &recv_work->cm_event.param.rep_rcvd;
- param->remote_ca_guid = rep_msg->local_ca_guid;
- param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
- param->remote_qpn = be32_to_cpu(cm_id_priv->remote_qpn);
- param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
- param->responder_resources = rep_msg->resp_resources;
- param->initiator_depth = rep_msg->initiator_depth;
- param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
- param->failover_accepted = cm_rep_get_failover(rep_msg);
- param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
- param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
- param->srq = cm_rep_get_srq(rep_msg);
- recv_work->cm_event.private_data = &rep_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
- if (ret) {
- atomic_dec(&cm_id_priv->refcount);
- ib_destroy_cm_id(&cm_id_priv->id);
- return;
- }
+ ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
+ if (ret)
+ cm_process_work(cm_id_priv, work);
+ else
+ cm_deref_id(cm_id_priv);
+ return 0;
out:
cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
-static void cm_rtu_handler(struct cm_recv_work *recv_work)
+static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
unsigned long flags;
+ u64 wr_id;
+ int ret;
- rtu_msg = (struct cm_rtu_msg *)recv_work->mad_recv_wc->recv_buf.mad;
+ rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(rtu_msg->remote_comm_id);
if (!cm_id_priv)
- return;
+ return -EINVAL;
+
+ work->cm_event.event = IB_CM_RTU_RECEIVED;
+ work->cm_event.private_data = &rtu_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
@@ -1207,16 +1285,22 @@ static void cm_rtu_handler(struct cm_rec
goto out;
}
cm_id_priv->id.state = IB_CM_ESTABLISHED;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(recv_work->port->mad_agent,
- (unsigned long) cm_id_priv->msg);
+ wr_id = (unsigned long) cm_id_priv->msg;
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- recv_work->cm_event.event = IB_CM_RTU_RECEIVED;
- recv_work->cm_event.private_data = &rtu_msg->private_data;
- cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
+ ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
+ if (ret)
+ cm_process_work(cm_id_priv, work);
+ else
+ cm_deref_id(cm_id_priv);
+ return 0;
out:
cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
int ib_cm_establish(struct ib_cm_id *cm_id)
@@ -1382,17 +1466,20 @@ out:
}
EXPORT_SYMBOL(ib_send_cm_drep);
-static void cm_dreq_handler(struct cm_recv_work *recv_work)
+static int cm_dreq_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
unsigned long flags;
int ret;
- dreq_msg = (struct cm_dreq_msg *)recv_work->mad_recv_wc->recv_buf.mad;
+ dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(dreq_msg->remote_comm_id);
if (!cm_id_priv)
- return;
+ return -EINVAL;
+
+ work->cm_event.event = IB_CM_DREQ_RECEIVED;
+ work->cm_event.private_data = &dreq_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) {
@@ -1414,31 +1501,36 @@ static void cm_dreq_handler(struct cm_re
goto out;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- recv_work->cm_event.event = IB_CM_DREQ_RECEIVED;
- recv_work->cm_event.private_data = &dreq_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
- if (ret) {
- atomic_dec(&cm_id_priv->refcount);
- ib_destroy_cm_id(&cm_id_priv->id);
- return;
- }
+ if (ret)
+ cm_process_work(cm_id_priv, work);
+ else
+ cm_deref_id(cm_id_priv);
+ return 0;
out:
cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
-static void cm_drep_handler(struct cm_recv_work *recv_work)
+static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
unsigned long flags;
+ u64 wr_id;
int ret;
- drep_msg = (struct cm_drep_msg *)recv_work->mad_recv_wc->recv_buf.mad;
+ drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(drep_msg->remote_comm_id);
if (!cm_id_priv)
- return;
+ return -EINVAL;
+
+ work->cm_event.event = IB_CM_DREP_RECEIVED;
+ work->cm_event.private_data = &drep_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT) {
@@ -1446,24 +1538,25 @@ static void cm_drep_handler(struct cm_re
goto out;
}
cm_id_priv->id.state = IB_CM_TIMEWAIT;
+
+ wr_id = (unsigned long) cm_id_priv->msg;
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
if (cm_id_priv->passive)
cm_remove_remote_id(cm_id_priv);
- ib_cancel_mad(recv_work->port->mad_agent,
- (unsigned long) cm_id_priv->msg);
-
- recv_work->cm_event.event = IB_CM_DREP_RECEIVED;
- recv_work->cm_event.private_data = &drep_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
- if (ret) {
- atomic_dec(&cm_id_priv->refcount);
- ib_destroy_cm_id(&cm_id_priv->id);
- return;
- }
+ if (ret)
+ cm_process_work(cm_id_priv, work);
+ else
+ cm_deref_id(cm_id_priv);
+ return 0;
out:
cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
static void cm_format_rej(struct cm_rej_msg *rej_msg,
@@ -1560,9 +1653,10 @@ out:
}
EXPORT_SYMBOL(ib_send_cm_rej);
-static void cm_rej_handler(struct cm_recv_work *recv_work)
+static int cm_rej_handler(struct cm_work *work)
{
/* todo: write reject handler */
+ return -EINVAL;
}
static void cm_format_mra(struct cm_mra_msg *mra_msg,
@@ -1665,11 +1759,12 @@ out:
}
EXPORT_SYMBOL(ib_send_cm_mra);
-static void cm_mra_handler(struct cm_recv_work *recv_work)
+static int cm_mra_handler(struct cm_work *work)
{
/* todo: write MRA handler */
/* todo: add timeout mechanism separate from retries for
receiver of MRA */
+ return -EINVAL;
}
static void cm_format_lap(struct cm_lap_msg *lap_msg,
@@ -1771,7 +1866,7 @@ static void cm_format_path_from_lap(stru
path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
}
-static void cm_lap_handler(struct cm_recv_work *recv_work)
+static int cm_lap_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_lap_msg *lap_msg;
@@ -1780,10 +1875,16 @@ static void cm_lap_handler(struct cm_rec
int ret;
/* todo: verify LAP request and send reject APR if invalid. */
- lap_msg = (struct cm_lap_msg *)recv_work->mad_recv_wc->recv_buf.mad;
+ lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(lap_msg->remote_comm_id);
if (!cm_id_priv)
- return;
+ return -EINVAL;
+
+ work->cm_event.event = IB_CM_LAP_RECEIVED;
+ param = &work->cm_event.param.lap_rcvd;
+ param->alternate_path = &work->path[0];
+ cm_format_path_from_lap(param->alternate_path, lap_msg);
+ work->cm_event.private_data = &lap_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED &&
@@ -1792,21 +1893,19 @@ static void cm_lap_handler(struct cm_rec
goto out;
}
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- recv_work->cm_event.event = IB_CM_LAP_RECEIVED;
- param = &recv_work->cm_event.param.lap_rcvd;
- param->alternate_path = &recv_work->path[0];
- cm_format_path_from_lap(param->alternate_path, lap_msg);
- recv_work->cm_event.private_data = &lap_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
- if (ret) {
- atomic_dec(&cm_id_priv->refcount);
- ib_destroy_cm_id(&cm_id_priv->id);
- return;
- }
+ if (ret)
+ cm_process_work(cm_id_priv, work);
+ else
+ cm_deref_id(cm_id_priv);
+ return 0;
out:
cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
static void cm_format_apr(struct cm_apr_msg *apr_msg,
@@ -1879,17 +1978,24 @@ out:
}
EXPORT_SYMBOL(ib_send_cm_apr);
-static void cm_apr_handler(struct cm_recv_work *recv_work)
+static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
- struct cm_msg *msg;
unsigned long flags;
+ u64 wr_id;
+ int ret;
- apr_msg = (struct cm_apr_msg *)recv_work->mad_recv_wc->recv_buf.mad;
+ apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(apr_msg->remote_comm_id);
if (!cm_id_priv)
- return; /* Unmatched reply. */
+ return -EINVAL; /* Unmatched reply. */
+
+ work->cm_event.event = IB_CM_APR_RECEIVED;
+ work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
+ work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
+ work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
+ work->cm_event.private_data = &apr_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
@@ -1899,21 +2005,24 @@ static void cm_apr_handler(struct cm_rec
goto out;
}
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
- msg = cm_id_priv->msg;
+ wr_id = (unsigned long) cm_id_priv->msg;
cm_id_priv->msg = NULL;
+
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(recv_work->port->mad_agent,
- (unsigned long) msg);
+ ib_cancel_mad(cm_id_priv->port->mad_agent, wr_id);
- recv_work->cm_event.event = IB_CM_APR_RECEIVED;
- recv_work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
- recv_work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
- recv_work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
- recv_work->cm_event.private_data = &apr_msg->private_data;
- cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
+ if (ret)
+ cm_process_work(cm_id_priv, work);
+ else
+ cm_deref_id(cm_id_priv);
+ return 0;
out:
cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
@@ -1992,28 +2101,45 @@ out:
}
EXPORT_SYMBOL(ib_send_cm_sidr_req);
-static void cm_sidr_req_handler(struct cm_recv_work *recv_work)
+static void cm_format_sidr_req_event(struct cm_work *work,
+ struct ib_cm_id *listen_id)
+{
+ struct cm_sidr_req_msg *sidr_req_msg;
+ struct ib_cm_sidr_req_event_param *param;
+
+ sidr_req_msg = (struct cm_sidr_req_msg *)
+ work->mad_recv_wc->recv_buf.mad;
+ work->cm_event.event = IB_CM_SIDR_REQ_RECEIVED;
+ param = &work->cm_event.param.sidr_req_rcvd;
+ param->pkey = sidr_req_msg->pkey;
+ param->listen_id = listen_id;
+ param->device = work->port->mad_agent->device;
+ param->port = work->port->port_num;
+ work->cm_event.private_data = &sidr_req_msg->private_data;
+}
+
+static int cm_sidr_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
- struct ib_cm_sidr_req_event_param *param;
struct ib_wc *wc;
unsigned long flags;
- int ret;
cm_id = ib_create_cm_id(NULL, NULL);
if (IS_ERR(cm_id))
- return;
+ return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
/* Record SGID/SLID and request ID for lookup. */
sidr_req_msg = (struct cm_sidr_req_msg *)
- recv_work->mad_recv_wc->recv_buf.mad;
- wc = recv_work->mad_recv_wc->wc;
+ work->mad_recv_wc->recv_buf.mad;
+ wc = work->mad_recv_wc->wc;
cm_id_priv->remote_port_gid.global.subnet_prefix = wc->slid;
cm_id_priv->remote_port_gid.global.interface_id = 0;
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
+ cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
+ atomic_inc(&cm_id_priv->work_count);
spin_lock_irqsave(&cm.lock, flags);
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
@@ -2035,25 +2161,18 @@ static void cm_sidr_req_handler(struct c
cm_id_priv->id.context = cur_cm_id_priv->id.context;
cm_id_priv->id.service_id = sidr_req_msg->service_id;
cm_id_priv->id.service_mask = ~0ULL;
- cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
- cm_id_priv->port = recv_work->port;
+ cm_id_priv->port = work->port;
cm_id_priv->pkey_index = wc->pkey_index;
- cm_set_ah_attr(&cm_id_priv->ah_attr, recv_work->port->port_num,
+ cm_set_ah_attr(&cm_id_priv->ah_attr, work->port->port_num,
wc->slid, wc->sl, wc->dlid_path_bits);
- recv_work->cm_event.event = IB_CM_SIDR_REQ_RECEIVED;
- param = &recv_work->cm_event.param.sidr_req_rcvd;
- param->pkey = sidr_req_msg->pkey;
- param->listen_id = &cur_cm_id_priv->id;
- param->device = recv_work->port->mad_agent->device;
- param->port = recv_work->port->port_num;
- recv_work->cm_event.private_data = &sidr_req_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
+ cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
+ cm_process_work(cm_id_priv, work);
cm_deref_id(cur_cm_id_priv);
- if (!ret)
- return;
+ return 0;
out:
ib_destroy_cm_id(&cm_id_priv->id);
+ return -EINVAL;
}
static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
@@ -2122,19 +2241,34 @@ out:
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
-static void cm_sidr_rep_handler(struct cm_recv_work *recv_work)
+static void cm_format_sidr_rep_event(struct cm_work *work)
{
- struct cm_id_private *cm_id_priv;
struct cm_sidr_rep_msg *sidr_rep_msg;
struct ib_cm_sidr_rep_event_param *param;
+
+ sidr_rep_msg = (struct cm_sidr_rep_msg *)
+ work->mad_recv_wc->recv_buf.mad;
+ work->cm_event.event = IB_CM_SIDR_REP_RECEIVED;
+ param = &work->cm_event.param.sidr_rep_rcvd;
+ param->status = sidr_rep_msg->status;
+ param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
+ param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
+ param->info = &sidr_rep_msg->info;
+ param->info_len = sidr_rep_msg->info_length;
+ work->cm_event.private_data = &sidr_rep_msg->private_data;
+}
+
+static int cm_sidr_rep_handler(struct cm_work *work)
+{
+ struct cm_sidr_rep_msg *sidr_rep_msg;
+ struct cm_id_private *cm_id_priv;
unsigned long flags;
- int ret;
sidr_rep_msg = (struct cm_sidr_rep_msg *)
- recv_work->mad_recv_wc->recv_buf.mad;
+ work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(sidr_rep_msg->request_id);
if (!cm_id_priv)
- return; /* Unmatched reply. */
+ return -EINVAL; /* Unmatched reply. */
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
@@ -2144,25 +2278,14 @@ static void cm_sidr_rep_handler(struct c
cm_id_priv->id.state = IB_CM_IDLE;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(recv_work->port->mad_agent,
+ ib_cancel_mad(cm_id_priv->port->mad_agent,
(unsigned long) cm_id_priv->msg);
-
- recv_work->cm_event.event = IB_CM_SIDR_REP_RECEIVED;
- param = &recv_work->cm_event.param.sidr_rep_rcvd;
- param->status = sidr_rep_msg->status;
- param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
- param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
- param->info = &sidr_rep_msg->info;
- param->info_len = sidr_rep_msg->info_length;
- recv_work->cm_event.private_data = &sidr_rep_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
- if (ret) {
- atomic_dec(&cm_id_priv->refcount);
- ib_destroy_cm_id(&cm_id_priv->id);
- return;
- }
+ cm_format_sidr_rep_event(work);
+ cm_process_work(cm_id_priv, work);
+ return 0;
out:
cm_deref_id(cm_id_priv);
+ return -EINVAL;
}
static void cm_process_send_error(struct cm_msg *msg,
@@ -2261,55 +2384,57 @@ static void cm_send_handler(struct ib_ma
}
}
-static void cm_recv_work_handler(void *data)
+static void cm_work_handler(void *data)
{
- struct cm_recv_work *recv_work = data;
+ struct cm_work *work = data;
+ int ret;
- switch (recv_work->mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
+ switch (work->mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
- cm_req_handler(recv_work);
+ ret = cm_req_handler(work);
break;
case CM_MRA_ATTR_ID:
- cm_mra_handler(recv_work);
+ ret = cm_mra_handler(work);
break;
case CM_REJ_ATTR_ID:
- cm_rej_handler(recv_work);
+ ret = cm_rej_handler(work);
break;
case CM_REP_ATTR_ID:
- cm_rep_handler(recv_work);
+ ret = cm_rep_handler(work);
break;
case CM_RTU_ATTR_ID:
- cm_rtu_handler(recv_work);
+ ret = cm_rtu_handler(work);
break;
case CM_DREQ_ATTR_ID:
- cm_dreq_handler(recv_work);
+ ret = cm_dreq_handler(work);
break;
case CM_DREP_ATTR_ID:
- cm_drep_handler(recv_work);
+ ret = cm_drep_handler(work);
break;
case CM_SIDR_REQ_ATTR_ID:
- cm_sidr_req_handler(recv_work);
+ ret = cm_sidr_req_handler(work);
break;
case CM_SIDR_REP_ATTR_ID:
- cm_sidr_rep_handler(recv_work);
+ ret = cm_sidr_rep_handler(work);
break;
case CM_LAP_ATTR_ID:
- cm_lap_handler(recv_work);
+ ret = cm_lap_handler(work);
break;
case CM_APR_ATTR_ID:
- cm_apr_handler(recv_work);
+ ret = cm_apr_handler(work);
break;
default:
+ ret = -EINVAL;
break;
}
- ib_free_recv_mad(recv_work->mad_recv_wc);
- kfree(recv_work);
+ if (ret)
+ cm_free_work(work);
}
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
- struct cm_recv_work *recv_work;
+ struct cm_work *work;
int paths;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
@@ -2325,17 +2450,17 @@ static void cm_recv_handler(struct ib_ma
break;
}
- recv_work = kmalloc(sizeof *recv_work + sizeof(struct ib_sa_path_rec) *
- paths, GFP_KERNEL);
- if (!recv_work) {
+ work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
+ GFP_KERNEL);
+ if (!work) {
ib_free_recv_mad(mad_recv_wc);
return;
}
- INIT_WORK(&recv_work->work, cm_recv_work_handler, recv_work);
- recv_work->mad_recv_wc = mad_recv_wc;
- recv_work->port = (struct cm_port *)mad_agent->context;
- queue_work(cm.wq, &recv_work->work);
+ INIT_WORK(&work->work, cm_work_handler, work);
+ work->mad_recv_wc = mad_recv_wc;
+ work->port = (struct cm_port *)mad_agent->context;
+ queue_work(cm.wq, &work->work);
}
static u64 cm_get_ca_guid(struct ib_device *device)
More information about the general
mailing list