[openib-general] [PATCH] CM: combine allocation of work item and event structures
Sean Hefty
mshefty at ichips.intel.com
Tue Jan 18 19:23:47 PST 2005
This patch combines the allocation of receive work queue items and the
corresponding event reporting structure into a single allocation.
signed-off-by: Sean Hefty <sean.hefty at intel.com>
Index: core/cm.c
===================================================================
--- core/cm.c (revision 1569)
+++ core/cm.c (working copy)
@@ -115,8 +115,8 @@
struct work_struct work;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc;
- /* todo: allocate event when allocating work structure */
- /* struct ib_cm_event cm_event; */
+ struct ib_cm_event cm_event;
+ struct ib_sa_path_rec path[];
};
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
@@ -835,31 +835,22 @@
cm_req_get_alt_local_ack_timeout(req_msg);
}
-static void cm_req_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_req_handler(struct cm_recv_work *recv_work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_req_msg *req_msg;
- struct ib_cm_event *cm_event;
unsigned long flags, flags2;
struct ib_cm_req_event_param *param;
struct ib_wc *wc;
int ret;
- cm_event = kmalloc(sizeof *cm_event +
- sizeof *cm_event->param.req_rcvd.primary_path +
- sizeof *cm_event->param.req_rcvd.alternate_path,
- GFP_KERNEL);
- if (!cm_event)
- return;
-
cm_id = ib_create_cm_id(NULL, NULL);
if (IS_ERR(cm_id))
- goto out;
+ return;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- req_msg = (struct cm_req_msg *)mad_recv_wc->recv_buf.mad;
+ req_msg = (struct cm_req_msg *)recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv->remote_ca_guid = req_msg->local_ca_guid;
cm_id_priv->id.remote_id = req_msg->local_comm_id;
@@ -869,14 +860,14 @@
req_msg->local_comm_id);
if (cur_cm_id_priv) {
spin_unlock_irqrestore(&cm.lock, flags);
- goto destroy_id; /* Duplicate message. */
+ goto out; /* Duplicate message. */
}
/* Find matching listen/peer request. */
cur_cm_id_priv = cm_find_service(req_msg->service_id);
if (!cur_cm_id_priv) {
spin_unlock_irqrestore(&cm.lock, flags);
/* todo: reject with no match */
- goto destroy_id; /* No match. */
+ goto out; /* No match. */
}
spin_lock_irqsave(&cur_cm_id_priv->lock, flags2);
if (cur_cm_id_priv->id.state == IB_CM_LISTEN) {
@@ -892,11 +883,12 @@
cm_id_priv->id.state = IB_CM_REQ_RCVD;
} else {
/* Process peer requests. */
- if (cm_is_active_peer(port->ca_guid, req_msg->local_ca_guid,
+ if (cm_is_active_peer(recv_work->port->ca_guid,
+ req_msg->local_ca_guid,
cur_cm_id_priv->local_qpn,
cm_req_get_local_qpn(req_msg))) {
spin_unlock_irqrestore(&cm.lock, flags);
- goto destroy_id; /* Stay active. */
+ goto out; /* Stay active. */
}
atomic_inc(&cur_cm_id_priv->refcount);
cur_cm_id_priv->id.state = IB_CM_REQ_RCVD;
@@ -907,32 +899,32 @@
cm_insert_remote_id(cur_cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
- ib_cancel_mad(port->mad_agent,
+ ib_cancel_mad(recv_work->port->mad_agent,
(unsigned long) cur_cm_id_priv->msg);
ib_destroy_cm_id(&cm_id_priv->id);
cm_id_priv = cur_cm_id_priv;
}
- cm_id_priv->port = port;
+ cm_id_priv->port = recv_work->port;
cm_id_priv->timeout_ms = cm_convert_to_ms(
cm_req_get_local_resp_timeout(req_msg));
cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->remote_port_gid = req_msg->primary_local_gid;
- wc = mad_recv_wc->wc;
+ wc = recv_work->mad_recv_wc->wc;
cm_id_priv->pkey_index = wc->pkey_index;
- cm_set_ah_attr(&cm_id_priv->ah_attr, port->port_num,
+ cm_set_ah_attr(&cm_id_priv->ah_attr, recv_work->port->port_num,
wc->slid, wc->sl, wc->dlid_path_bits);
- cm_event->event = IB_CM_REQ_RECEIVED;
- param = &cm_event->param.req_rcvd;
+ recv_work->cm_event.event = IB_CM_REQ_RECEIVED;
+ param = &recv_work->cm_event.param.req_rcvd;
param->listen_id = &cur_cm_id_priv->id;
- param->device = port->mad_agent->device;
- param->port = port->port_num;
- param->primary_path = (struct ib_sa_path_rec *)
- (u8*)cm_event + sizeof *cm_event;
- param->alternate_path = (struct ib_sa_path_rec *)
- (u8*)param->primary_path +
- sizeof *param->primary_path;
+ param->device = recv_work->port->mad_agent->device;
+ param->port = recv_work->port->port_num;
+ param->primary_path = &recv_work->path[0];
+ if (req_msg->alt_local_lid)
+ param->alternate_path = &recv_work->path[1];
+ else
+ param->alternate_path = NULL;
cm_format_paths_from_req(param->primary_path, param->alternate_path,
req_msg);
param->remote_ca_guid = req_msg->local_ca_guid;
@@ -942,21 +934,21 @@
param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
param->responder_resources = cm_req_get_resp_res(req_msg);
param->initiator_depth = cm_req_get_init_depth(req_msg);
- param->local_cm_response_timeout = cm_req_get_remote_resp_timeout(req_msg);
+ param->local_cm_response_timeout =
+ cm_req_get_remote_resp_timeout(req_msg);
param->flow_control = cm_req_get_flow_ctrl(req_msg);
- param->remote_cm_response_timeout = cm_req_get_local_resp_timeout(req_msg);
+ param->remote_cm_response_timeout =
+ cm_req_get_local_resp_timeout(req_msg);
param->retry_count = cm_req_get_retry_count(req_msg);
param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
param->srq = cm_req_get_srq(req_msg);
- cm_event->private_data = &req_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.private_data = &req_msg->private_data;
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
cm_deref_id(cur_cm_id_priv);
if (!ret)
- goto out;
-destroy_id:
- ib_destroy_cm_id(&cm_id_priv->id);
+ return;
out:
- kfree(cm_event);
+ ib_destroy_cm_id(&cm_id_priv->id);
}
static void cm_format_rep(struct cm_rep_msg *rep_msg,
@@ -1117,25 +1109,19 @@
}
EXPORT_SYMBOL(ib_send_cm_rtu);
-static void cm_rep_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_rep_handler(struct cm_recv_work *recv_work)
{
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
- struct ib_cm_event *cm_event;
struct ib_cm_rep_event_param *param;
unsigned long flags;
int ret;
- rep_msg = (struct cm_rep_msg *)mad_recv_wc->recv_buf.mad;
+ rep_msg = (struct cm_rep_msg *)recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(rep_msg->remote_comm_id);
if (!cm_id_priv)
return;
- cm_event = kmalloc(sizeof *cm_event, GFP_KERNEL);
- if (!cm_event)
- goto out;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
@@ -1152,14 +1138,15 @@
cm_id_priv->id.state = IB_CM_REP_RCVD;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(port->mad_agent, (unsigned long) cm_id_priv->msg);
+ ib_cancel_mad(recv_work->port->mad_agent,
+ (unsigned long) cm_id_priv->msg);
cm_id_priv->id.remote_id = rep_msg->local_comm_id;
cm_id_priv->remote_ca_guid = rep_msg->local_ca_guid;
cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
- cm_event->event = IB_CM_REP_RECEIVED;
- param = &cm_event->param.rep_rcvd;
+ recv_work->cm_event.event = IB_CM_REP_RECEIVED;
+ param = &recv_work->cm_event.param.rep_rcvd;
param->remote_ca_guid = rep_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_id_priv->remote_qpn);
@@ -1171,36 +1158,28 @@
param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
param->srq = cm_rep_get_srq(rep_msg);
- cm_event->private_data = &rep_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.private_data = &rep_msg->private_data;
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
if (ret) {
- kfree(cm_event);
atomic_dec(&cm_id_priv->refcount);
ib_destroy_cm_id(&cm_id_priv->id);
return;
}
out:
- kfree(cm_event);
cm_deref_id(cm_id_priv);
}
-static void cm_rtu_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_rtu_handler(struct cm_recv_work *recv_work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
- struct ib_cm_event *cm_event;
unsigned long flags;
- rtu_msg = (struct cm_rtu_msg *)mad_recv_wc->recv_buf.mad;
+ rtu_msg = (struct cm_rtu_msg *)recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(rtu_msg->remote_comm_id);
if (!cm_id_priv)
return;
- cm_event = kmalloc(sizeof *cm_event, GFP_KERNEL);
- if (!cm_event)
- goto out;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
@@ -1210,13 +1189,13 @@
cm_id_priv->id.state = IB_CM_ESTABLISHED;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(port->mad_agent, (unsigned long) cm_id_priv->msg);
+ ib_cancel_mad(recv_work->port->mad_agent,
+ (unsigned long) cm_id_priv->msg);
- cm_event->event = IB_CM_RTU_RECEIVED;
- cm_event->private_data = &rtu_msg->private_data;
- cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.event = IB_CM_RTU_RECEIVED;
+ recv_work->cm_event.private_data = &rtu_msg->private_data;
+ cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
out:
- kfree(cm_event);
cm_deref_id(cm_id_priv);
}
@@ -1307,24 +1286,18 @@
}
EXPORT_SYMBOL(ib_send_cm_dreq);
-static void cm_dreq_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_dreq_handler(struct cm_recv_work *recv_work)
{
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
- struct ib_cm_event *cm_event;
unsigned long flags;
int ret;
- dreq_msg = (struct cm_dreq_msg *)mad_recv_wc->recv_buf.mad;
+ dreq_msg = (struct cm_dreq_msg *)recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(dreq_msg->remote_comm_id);
if (!cm_id_priv)
return;
- cm_event = kmalloc(sizeof *cm_event, GFP_KERNEL);
- if (!cm_event)
- goto out;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) {
@@ -1335,17 +1308,15 @@
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_event->event = IB_CM_DREQ_RECEIVED;
- cm_event->private_data = &dreq_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.event = IB_CM_DREQ_RECEIVED;
+ recv_work->cm_event.private_data = &dreq_msg->private_data;
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
if (ret) {
- kfree(cm_event);
atomic_dec(&cm_id_priv->refcount);
ib_destroy_cm_id(&cm_id_priv->id);
return;
}
out:
- kfree(cm_event);
cm_deref_id(cm_id_priv);
}
@@ -1405,24 +1376,18 @@
}
EXPORT_SYMBOL(ib_send_cm_drep);
-static void cm_drep_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_drep_handler(struct cm_recv_work *recv_work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
- struct ib_cm_event *cm_event;
unsigned long flags;
int ret;
- drep_msg = (struct cm_drep_msg *)mad_recv_wc->recv_buf.mad;
+ drep_msg = (struct cm_drep_msg *)recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(drep_msg->remote_comm_id);
if (!cm_id_priv)
return;
- cm_event = kmalloc(sizeof *cm_event, GFP_KERNEL);
- if (!cm_event)
- goto out;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -1431,19 +1396,18 @@
cm_id_priv->id.state = IB_CM_TIMEWAIT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(port->mad_agent, (unsigned long) cm_id_priv->msg);
+ ib_cancel_mad(recv_work->port->mad_agent,
+ (unsigned long) cm_id_priv->msg);
- cm_event->event = IB_CM_DREP_RECEIVED;
- cm_event->private_data = &drep_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.event = IB_CM_DREP_RECEIVED;
+ recv_work->cm_event.private_data = &drep_msg->private_data;
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
if (ret) {
- kfree(cm_event);
atomic_dec(&cm_id_priv->refcount);
ib_destroy_cm_id(&cm_id_priv->id);
return;
}
out:
- kfree(cm_event);
cm_deref_id(cm_id_priv);
}
@@ -1535,8 +1499,7 @@
}
EXPORT_SYMBOL(ib_send_cm_rej);
-static void cm_rej_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_rej_handler(struct cm_recv_work *recv_work)
{
/* todo: write reject handler */
}
@@ -1641,8 +1604,7 @@
}
EXPORT_SYMBOL(ib_send_cm_mra);
-static void cm_mra_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_mra_handler(struct cm_recv_work *recv_work)
{
/* todo: write MRA handler */
/* todo: add timeout mechanism separate from retries for
@@ -1748,27 +1710,20 @@
path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
}
-static void cm_lap_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_lap_handler(struct cm_recv_work *recv_work)
{
struct cm_id_private *cm_id_priv;
struct cm_lap_msg *lap_msg;
- struct ib_cm_event *cm_event;
+ struct ib_cm_lap_event_param *param;
unsigned long flags;
int ret;
/* todo: verify LAP request and send reject APR if invalid. */
- lap_msg = (struct cm_lap_msg *)mad_recv_wc->recv_buf.mad;
+ lap_msg = (struct cm_lap_msg *)recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(lap_msg->remote_comm_id);
if (!cm_id_priv)
return;
- cm_event = kmalloc(sizeof *cm_event +
- sizeof *(cm_event->param.lap_rcvd.alternate_path),
- GFP_KERNEL);
- if (!cm_event)
- goto out;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED &&
cm_id_priv->id.lap_state != IB_CM_LAP_IDLE) {
@@ -1778,22 +1733,18 @@
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- cm_event->event = IB_CM_LAP_RECEIVED;
- cm_event->param.lap_rcvd.alternate_path = (struct ib_sa_path_rec *)
- (u8*)cm_event +
- sizeof *cm_event;
- cm_format_path_from_lap(cm_event->param.lap_rcvd.alternate_path,
- lap_msg);
- cm_event->private_data = &lap_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.event = IB_CM_LAP_RECEIVED;
+ param = &recv_work->cm_event.param.lap_rcvd;
+ param->alternate_path = &recv_work->path[0];
+ cm_format_path_from_lap(param->alternate_path, lap_msg);
+ recv_work->cm_event.private_data = &lap_msg->private_data;
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
if (ret) {
- kfree(cm_event);
atomic_dec(&cm_id_priv->refcount);
ib_destroy_cm_id(&cm_id_priv->id);
return;
}
out:
- kfree(cm_event);
cm_deref_id(cm_id_priv);
}
@@ -1868,24 +1819,18 @@
}
EXPORT_SYMBOL(ib_send_cm_apr);
-static void cm_apr_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_apr_handler(struct cm_recv_work *recv_work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
struct cm_msg *msg;
- struct ib_cm_event *cm_event;
unsigned long flags;
- apr_msg = (struct cm_apr_msg *)mad_recv_wc->recv_buf.mad;
+ apr_msg = (struct cm_apr_msg *)recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(apr_msg->remote_comm_id);
if (!cm_id_priv)
return; /* Unmatched reply. */
- cm_event = kmalloc(sizeof *cm_event, GFP_KERNEL);
- if (!cm_event)
- goto out;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
(cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
@@ -1898,16 +1843,16 @@
cm_id_priv->msg = NULL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(port->mad_agent, (unsigned long) msg);
+ ib_cancel_mad(recv_work->port->mad_agent,
+ (unsigned long) msg);
- cm_event->event = IB_CM_APR_RECEIVED;
- cm_event->param.apr_rcvd.ap_status = apr_msg->ap_status;
- cm_event->param.apr_rcvd.apr_info = &apr_msg->info;
- cm_event->param.apr_rcvd.info_len = apr_msg->info_length;
- cm_event->private_data = &apr_msg->private_data;
- cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.event = IB_CM_APR_RECEIVED;
+ recv_work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
+ recv_work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
+ recv_work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
+ recv_work->cm_event.private_data = &apr_msg->private_data;
+ cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
out:
- kfree(cm_event);
cm_deref_id(cm_id_priv);
}
@@ -1985,29 +1930,25 @@
}
EXPORT_SYMBOL(ib_send_cm_sidr_req);
-static void cm_sidr_req_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_sidr_req_handler(struct cm_recv_work *recv_work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
- struct ib_cm_event *cm_event;
+ struct ib_cm_sidr_req_event_param *param;
struct ib_wc *wc;
unsigned long flags;
int ret;
- cm_event = kmalloc(sizeof *cm_event, GFP_KERNEL);
- if (!cm_event)
- return;
-
cm_id = ib_create_cm_id(NULL, NULL);
if (IS_ERR(cm_id))
- goto out;
+ return;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
/* Record SGID/SLID and request ID for lookup. */
- sidr_req_msg = (struct cm_sidr_req_msg *)mad_recv_wc->recv_buf.mad;
- wc = mad_recv_wc->wc;
+ sidr_req_msg = (struct cm_sidr_req_msg *)
+ recv_work->mad_recv_wc->recv_buf.mad;
+ wc = recv_work->mad_recv_wc->wc;
cm_id_priv->remote_port_gid.global.subnet_prefix = wc->slid;
cm_id_priv->remote_port_gid.global.interface_id = 0;
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
@@ -2016,14 +1957,14 @@
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (cur_cm_id_priv) {
spin_unlock_irqrestore(&cm.lock, flags);
- goto destroy_id; /* Duplicate message. */
+ goto out; /* Duplicate message. */
}
cur_cm_id_priv = cm_find_service(sidr_req_msg->service_id);
if (!cur_cm_id_priv || cur_cm_id_priv->id.state != IB_CM_LISTEN) {
rb_erase(&cm_id_priv->remote_id_node, &cm.remote_sidr_table);
spin_unlock_irqrestore(&cm.lock, flags);
/* todo: reject with no match */
- goto destroy_id; /* No match. */
+ goto out; /* No match. */
}
atomic_inc(&cur_cm_id_priv->refcount);
spin_unlock_irqrestore(&cm.lock, flags);
@@ -2033,25 +1974,24 @@
cm_id_priv->id.service_id = sidr_req_msg->service_id;
cm_id_priv->id.service_mask = ~0ULL;
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
- cm_id_priv->port = port;
+ cm_id_priv->port = recv_work->port;
cm_id_priv->pkey_index = wc->pkey_index;
- cm_set_ah_attr(&cm_id_priv->ah_attr, port->port_num, wc->slid,
- wc->sl, wc->dlid_path_bits);
+ cm_set_ah_attr(&cm_id_priv->ah_attr, recv_work->port->port_num,
+ wc->slid, wc->sl, wc->dlid_path_bits);
- cm_event->event = IB_CM_SIDR_REQ_RECEIVED;
- cm_event->param.sidr_req_rcvd.pkey = sidr_req_msg->pkey;
- cm_event->param.sidr_req_rcvd.listen_id = &cur_cm_id_priv->id;
- cm_event->param.sidr_req_rcvd.device = port->mad_agent->device;
- cm_event->param.sidr_req_rcvd.port = port->port_num;
- cm_event->private_data = &sidr_req_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.event = IB_CM_SIDR_REQ_RECEIVED;
+ param = &recv_work->cm_event.param.sidr_req_rcvd;
+ param->pkey = sidr_req_msg->pkey;
+ param->listen_id = &cur_cm_id_priv->id;
+ param->device = recv_work->port->mad_agent->device;
+ param->port = recv_work->port->port_num;
+ recv_work->cm_event.private_data = &sidr_req_msg->private_data;
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
cm_deref_id(cur_cm_id_priv);
if (!ret)
- goto out;
-destroy_id:
- ib_destroy_cm_id(&cm_id_priv->id);
+ return;
out:
- kfree(cm_event);
+ ib_destroy_cm_id(&cm_id_priv->id);
}
static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
@@ -2120,24 +2060,20 @@
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
-static void cm_sidr_rep_handler(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static void cm_sidr_rep_handler(struct cm_recv_work *recv_work)
{
struct cm_id_private *cm_id_priv;
struct cm_sidr_rep_msg *sidr_rep_msg;
- struct ib_cm_event *cm_event;
+ struct ib_cm_sidr_rep_event_param *param;
unsigned long flags;
int ret;
- sidr_rep_msg = (struct cm_sidr_rep_msg *)mad_recv_wc->recv_buf.mad;
+ sidr_rep_msg = (struct cm_sidr_rep_msg *)
+ recv_work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id_by_local_id(sidr_rep_msg->request_id);
if (!cm_id_priv)
return; /* Unmatched reply. */
- cm_event = kmalloc(sizeof *cm_event, GFP_KERNEL);
- if (!cm_event)
- goto out;
-
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -2146,25 +2082,24 @@
cm_id_priv->id.state = IB_CM_IDLE;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_cancel_mad(port->mad_agent, (unsigned long) cm_id_priv->msg);
+ ib_cancel_mad(recv_work->port->mad_agent,
+ (unsigned long) cm_id_priv->msg);
- cm_event->event = IB_CM_SIDR_REP_RECEIVED;
- cm_event->param.sidr_rep_rcvd.status = sidr_rep_msg->status;
- cm_event->param.sidr_rep_rcvd.qkey = be32_to_cpu(sidr_rep_msg->qkey);
- cm_event->param.sidr_rep_rcvd.qpn = be32_to_cpu(cm_sidr_rep_get_qpn(
- sidr_rep_msg));
- cm_event->param.sidr_rep_rcvd.info = &sidr_rep_msg->info;
- cm_event->param.sidr_rep_rcvd.info_len = sidr_rep_msg->info_length;
- cm_event->private_data = &sidr_rep_msg->private_data;
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, cm_event);
+ recv_work->cm_event.event = IB_CM_SIDR_REP_RECEIVED;
+ param = &recv_work->cm_event.param.sidr_rep_rcvd;
+ param->status = sidr_rep_msg->status;
+ param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
+ param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
+ param->info = &sidr_rep_msg->info;
+ param->info_len = sidr_rep_msg->info_length;
+ recv_work->cm_event.private_data = &sidr_rep_msg->private_data;
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &recv_work->cm_event);
if (ret) {
- kfree(cm_event);
atomic_dec(&cm_id_priv->refcount);
ib_destroy_cm_id(&cm_id_priv->id);
return;
}
out:
- kfree(cm_event);
cm_deref_id(cm_id_priv);
}
@@ -2270,37 +2205,37 @@
switch (recv_work->mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
- cm_req_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_req_handler(recv_work);
break;
case CM_MRA_ATTR_ID:
- cm_mra_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_mra_handler(recv_work);
break;
case CM_REJ_ATTR_ID:
- cm_rej_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_rej_handler(recv_work);
break;
case CM_REP_ATTR_ID:
- cm_rep_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_rep_handler(recv_work);
break;
case CM_RTU_ATTR_ID:
- cm_rtu_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_rtu_handler(recv_work);
break;
case CM_DREQ_ATTR_ID:
- cm_dreq_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_dreq_handler(recv_work);
break;
case CM_DREP_ATTR_ID:
- cm_drep_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_drep_handler(recv_work);
break;
case CM_SIDR_REQ_ATTR_ID:
- cm_sidr_req_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_sidr_req_handler(recv_work);
break;
case CM_SIDR_REP_ATTR_ID:
- cm_sidr_rep_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_sidr_rep_handler(recv_work);
break;
case CM_LAP_ATTR_ID:
- cm_lap_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_lap_handler(recv_work);
break;
case CM_APR_ATTR_ID:
- cm_apr_handler(recv_work->port, recv_work->mad_recv_wc);
+ cm_apr_handler(recv_work);
break;
default:
break;
@@ -2313,8 +2248,23 @@
struct ib_mad_recv_wc *mad_recv_wc)
{
struct cm_recv_work *recv_work;
+ int paths;
+
+ switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
+ case CM_REQ_ATTR_ID:
+ paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
+ alt_local_lid != 0);
+ break;
+ case CM_LAP_ATTR_ID:
+ paths = 1;
+ break;
+ default:
+ paths = 0;
+ break;
+ }
- recv_work = kmalloc(sizeof *recv_work, GFP_KERNEL);
+ recv_work = kmalloc(sizeof *recv_work + sizeof(struct ib_sa_path_rec) *
+ paths, GFP_KERNEL);
if (!recv_work) {
ib_free_recv_mad(mad_recv_wc);
return;
More information about the general
mailing list