[openib-general] [PATCH] [MAD RMPP] add RMPP send support to MAD layer
Sean Hefty
mshefty at ichips.intel.com
Thu Apr 21 18:34:39 PDT 2005
The following patch adds RMPP send support to the kernel MAD layer.
- NACKs are not implemented
- Spec compliant double-sided transfers are not implemented. Request/
reply matching works, but missing is the ACK to the ACK that
occurs during the RMPP direction switch.
- Clients are limited to a single sge.
- Timeout values are hard-coded until such time that packet lifetimes
magically appear.
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
Index: include/ib_verbs.h
===================================================================
--- include/ib_verbs.h (revision 2207)
+++ include/ib_verbs.h (working copy)
@@ -573,6 +573,7 @@
u32 remote_qpn;
u32 remote_qkey;
int timeout_ms; /* valid for MADs only */
+ int retries; /* valid for MADs only */
u16 pkey_index; /* valid for GSI only */
u8 port_num; /* valid for DR SMPs on switch only */
} ud;
Index: core/mad_rmpp.c
===================================================================
--- core/mad_rmpp.c (revision 2207)
+++ core/mad_rmpp.c (working copy)
@@ -76,20 +76,6 @@
struct ib_sge sge;
};
-static struct ib_ah * create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
- u8 port_num)
-{
- struct ib_ah_attr ah_attr;
-
- memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.dlid = wc->slid;
- ah_attr.sl = wc->sl;
- ah_attr.src_path_bits = wc->dlid_path_bits;
- ah_attr.port_num = port_num;
-
- return ib_create_ah(pd, &ah_attr);
-}
-
static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
{
atomic_dec(&rmpp_recv->refcount);
@@ -164,9 +150,10 @@
if (!rmpp_recv)
return NULL;
- rmpp_recv->ah = create_ah_from_wc(agent->agent.qp->pd,
- mad_recv_wc->wc,
- agent->agent.port_num);
+ rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
+ mad_recv_wc->wc,
+ mad_recv_wc->recv_buf.grh,
+ agent->agent.port_num);
if (IS_ERR(rmpp_recv->ah))
goto error;
@@ -291,18 +278,28 @@
kfree(msg);
}
+static int data_offset(u8 mgmt_class)
+{
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
+ return offsetof(struct ib_sa_mad, data);
+ else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
+ (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
+ return offsetof(struct ib_vendor_mad, data);
+ else
+ return offsetof(struct ib_rmpp_mad, data);
+}
+
static void format_ack(struct ib_rmpp_mad *ack,
struct ib_rmpp_mad *data,
struct mad_rmpp_recv *rmpp_recv)
{
unsigned long flags;
- ack->mad_hdr = data->mad_hdr;
+ memcpy(&ack->mad_hdr, &data->mad_hdr,
+ data_offset(data->mad_hdr.mgmt_class));
+
ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
- ack->rmpp_hdr.rmpp_version = data->rmpp_hdr.rmpp_version;
ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
- ib_set_rmpp_resptime(&ack->rmpp_hdr,
- ib_get_rmpp_resptime(&data->rmpp_hdr));
ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
spin_lock_irqsave(&rmpp_recv->lock, flags);
@@ -392,12 +389,18 @@
static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
{
- int hdr_size;
+ struct ib_rmpp_mad *rmpp_mad;
+ int hdr_size, data_size, pad;
- /* TODO: need to check for SA MADs - requires access to SA header */
- hdr_size = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
- return rmpp_recv->seg_num * (sizeof(struct ib_mad) - hdr_size) +
- hdr_size;
+ rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
+
+ hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
+ data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
+ pad = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+ if (pad > data_size)
+ pad = 0;
+
+ return hdr_size + rmpp_recv->seg_num * data_size - pad;
}
static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
@@ -513,6 +516,121 @@
return mad_recv_wc;
}
+static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
+ (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
+ (mad_send_wr->seg_num - 1);
+}
+
+static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_rmpp_mad *rmpp_mad;
+ int timeout;
+
+ rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
+ ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
+ rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
+
+ if (mad_send_wr->seg_num == 1) {
+ rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
+ rmpp_mad->rmpp_hdr.paylen_newwin =
+ cpu_to_be32(mad_send_wr->total_seg *
+ (sizeof(struct ib_rmpp_mad) -
+ offsetof(struct ib_rmpp_mad, data)));
+ mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
+ } else {
+ mad_send_wr->send_wr.num_sge = 2;
+ mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
+ mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
+ mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
+ mad_send_wr->data_offset;
+ mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
+ }
+
+ if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
+ rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
+ rmpp_mad->rmpp_hdr.paylen_newwin =
+ cpu_to_be32(sizeof(struct ib_rmpp_mad) -
+ offsetof(struct ib_rmpp_mad, data) -
+ mad_send_wr->pad);
+ }
+
+ /* 5 seconds until we can find the packet lifetime */
+ timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
+ if (timeout && timeout < 5000)
+ mad_send_wr->timeout = msecs_to_jiffies(timeout);
+ else
+ mad_send_wr->timeout = msecs_to_jiffies(5000);
+ mad_send_wr->seg_num++;
+
+ return ib_send_mad(mad_send_wr);
+}
+
+static void process_rmpp_ack(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ struct ib_mad_send_wr_private *mad_send_wr;
+ struct ib_rmpp_mad *rmpp_mad;
+ unsigned long flags;
+ int seg_num, newwin, ret;
+
+ rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
+ if (rmpp_mad->rmpp_hdr.rmpp_status)
+ return;
+
+ seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
+ newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+
+ spin_lock_irqsave(&agent->lock, flags);
+ mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
+ if (!mad_send_wr)
+ goto out; /* Unmatched ACK */
+
+ if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
+ (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
+ goto out; /* Send is already done */
+
+ if (seg_num > mad_send_wr->total_seg)
+ goto out; /* Bad ACK */
+
+ if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
+ goto out; /* Old ACK */
+
+ if (seg_num > mad_send_wr->last_ack) {
+ mad_send_wr->last_ack = seg_num;
+ mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
+ }
+ mad_send_wr->newwin = newwin;
+ if (mad_send_wr->refcount > 1)
+ goto out; /* Send is active */
+
+ if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
+ /* If no response is expected, the ACK completes the send */
+ if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
+ struct ib_mad_send_wc wc;
+
+ ib_mark_mad_done(mad_send_wr);
+ spin_unlock_irqrestore(&agent->lock, flags);
+
+ wc.status = IB_WC_SUCCESS;
+ wc.vendor_err = 0;
+ wc.wr_id = mad_send_wr->wr_id;
+ ib_mad_complete_send_wr(mad_send_wr, &wc);
+ return;
+ }
+ ib_reset_mad_timeout(mad_send_wr,
+ mad_send_wr->send_wr.wr.ud.timeout_ms);
+ } else if (mad_send_wr->seg_num < mad_send_wr->newwin) {
+ /* Send failure will just result in a timeout/retry */
+ ret = send_next_seg(mad_send_wr);
+ if (!ret)
+ mad_send_wr->refcount++;
+ }
+out:
+ spin_unlock_irqrestore(&agent->lock, flags);
+}
+
struct ib_mad_recv_wc *
ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
@@ -523,6 +641,9 @@
if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
return mad_recv_wc;
+ if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION)
+ goto out;
+
switch (rmpp_mad->rmpp_hdr.rmpp_type) {
case IB_MGMT_RMPP_TYPE_DATA:
if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1))
@@ -530,38 +651,121 @@
else
return continue_rmpp(agent, mad_recv_wc);
case IB_MGMT_RMPP_TYPE_ACK:
- /* process_rmpp_ack(agent, mad_recv_wc); */
+ process_rmpp_ack(agent, mad_recv_wc);
break;
case IB_MGMT_RMPP_TYPE_STOP:
case IB_MGMT_RMPP_TYPE_ABORT:
- /* process_rmpp_nack(agent, mad_recv_wc); */
+ /* TODO: process_rmpp_nack(agent, mad_recv_wc); */
break;
default:
break;
}
+out:
ib_free_recv_mad(mad_recv_wc);
return NULL;
}
+int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_rmpp_mad *rmpp_mad;
+ int i, total_len, ret;
+
+ rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
+ if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
+ IB_MGMT_RMPP_FLAG_ACTIVE))
+ return IB_RMPP_RESULT_UNHANDLED;
+
+ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
+ return IB_RMPP_RESULT_INTERNAL;
+
+ if (mad_send_wr->send_wr.num_sge > 1)
+ return -EINVAL; /* TODO: support num_sge > 1 */
+
+ mad_send_wr->seg_num = 1;
+ mad_send_wr->newwin = 1;
+ mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
+
+ total_len = 0;
+ for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
+ total_len += mad_send_wr->send_wr.sg_list[i].length;
-enum ib_mad_result
-ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
- struct ib_mad_send_wc *mad_send_wc)
+ mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
+ (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
+ mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
+ be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+ mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
+
+ /* We need to wait for the final ACK even if there isn't a response */
+ mad_send_wr->refcount += (mad_send_wr->timeout == 0);
+
+ ret = send_next_seg(mad_send_wr);
+ if (!ret)
+ return IB_RMPP_RESULT_CONSUMED;
+ return ret;
+}
+
+int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_send_wc *mad_send_wc)
{
struct ib_rmpp_mad *rmpp_mad;
struct rmpp_msg *msg;
+ int ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE))
- return IB_MAD_RESULT_SUCCESS;
+ return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
msg = (struct rmpp_msg *) (unsigned long) mad_send_wc->wr_id;
free_rmpp_msg(msg);
- return IB_MAD_RESULT_CONSUMED;
+ return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
}
- /* TODO: continue send until done - ACKed or we have a response */
- return IB_MAD_RESULT_SUCCESS;
+ if (mad_send_wc->status != IB_WC_SUCCESS ||
+ mad_send_wr->status != IB_WC_SUCCESS)
+ return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
+
+ if (!mad_send_wr->timeout)
+ return IB_RMPP_RESULT_PROCESSED; /* Response received */
+
+ if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
+ mad_send_wr->timeout =
+ msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
+ return IB_RMPP_RESULT_PROCESSED; /* Send done */
+ }
+
+ if (mad_send_wr->seg_num > mad_send_wr->newwin ||
+ mad_send_wr->seg_num > mad_send_wr->total_seg)
+ return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
+
+ ret = send_next_seg(mad_send_wr);
+ if (ret) {
+ mad_send_wc->status = IB_WC_GENERAL_ERR;
+ return IB_RMPP_RESULT_PROCESSED;
+ }
+ return IB_RMPP_RESULT_CONSUMED;
+}
+
+int ib_timeout_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_rmpp_mad *rmpp_mad;
+ int ret;
+
+ rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
+ if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
+ IB_MGMT_RMPP_FLAG_ACTIVE))
+ return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
+
+ if (mad_send_wr->last_ack == mad_send_wr->total_seg ||
+ !mad_send_wr->retries--)
+ return IB_RMPP_RESULT_PROCESSED;
+
+ mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
+ ret = send_next_seg(mad_send_wr);
+ if (ret)
+ return IB_RMPP_RESULT_PROCESSED;
+
+ mad_send_wr->refcount++;
+ return IB_RMPP_RESULT_CONSUMED;
}
Index: core/mad.c
===================================================================
--- core/mad.c (revision 2207)
+++ core/mad.c (working copy)
@@ -63,8 +63,6 @@
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
- struct ib_mad_send_wc *mad_send_wc);
static void timeout_sends(void *data);
static void cancel_sends(void *data);
static void local_completions(void *data);
@@ -851,7 +849,7 @@
}
EXPORT_SYMBOL(ib_free_send_mad);
-static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
+int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_qp_info *qp_info;
struct ib_send_wr *bad_send_wr;
@@ -953,19 +951,18 @@
ret = -ENOMEM;
goto error2;
}
+ memset(mad_send_wr, 0, sizeof *mad_send_wr);
mad_send_wr->send_wr = *send_wr;
mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
memcpy(mad_send_wr->sg_list, send_wr->sg_list,
sizeof *send_wr->sg_list * send_wr->num_sge);
- mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id;
- mad_send_wr->send_wr.next = NULL;
+ mad_send_wr->wr_id = send_wr->wr_id;
mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
mad_send_wr->mad_agent_priv = mad_agent_priv;
/* Timeout will be updated after send completes */
mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr.
ud.timeout_ms);
- mad_send_wr->retry = 0;
/* One reference for each work request to QP + response */
mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
mad_send_wr->status = IB_WC_SUCCESS;
@@ -977,8 +974,13 @@
&mad_agent_priv->send_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- ret = ib_send_mad(mad_send_wr);
- if (ret) {
+ if (mad_agent_priv->agent.rmpp_version) {
+ ret = ib_send_rmpp_mad(mad_send_wr);
+ if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
+ ret = ib_send_mad(mad_send_wr);
+ } else
+ ret = ib_send_mad(mad_send_wr);
+ if (ret < 0) {
/* Fail send request */
spin_lock_irqsave(&mad_agent_priv->lock, flags);
list_del(&mad_send_wr->agent_list);
@@ -1538,19 +1540,6 @@
return valid;
}
-static struct ib_mad_recv_wc *
-process_recv(struct ib_mad_agent_private *mad_agent_priv,
- struct ib_mad_recv_wc *mad_recv_wc)
-{
- INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
- list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
-
- if (mad_agent_priv->agent.rmpp_version)
- return ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc);
- else
- return mad_recv_wc;
-}
-
static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_hdr *mad_hdr)
{
@@ -1563,9 +1552,8 @@
(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
}
-static struct ib_mad_send_wr_private*
-find_send_req(struct ib_mad_agent_private *mad_agent_priv,
- u64 tid)
+struct ib_mad_send_wr_private*
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid)
{
struct ib_mad_send_wr_private *mad_send_wr;
@@ -1592,7 +1580,7 @@
return NULL;
}
-static void ib_mark_req_done(struct ib_mad_send_wr_private *mad_send_wr)
+void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
{
mad_send_wr->timeout = 0;
if (mad_send_wr->refcount == 1) {
@@ -1610,19 +1598,24 @@
unsigned long flags;
u64 tid;
- /* Process the receive before giving it to the user. */
- mad_recv_wc = process_recv(mad_agent_priv, mad_recv_wc);
- if (!mad_recv_wc) {
- if (atomic_dec_and_test(&mad_agent_priv->refcount))
- wake_up(&mad_agent_priv->wait);
- return;
+ INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
+ list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
+
+ if (mad_agent_priv->agent.rmpp_version) {
+ mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
+ mad_recv_wc);
+ if (!mad_recv_wc) {
+ if (atomic_dec_and_test(&mad_agent_priv->refcount))
+ wake_up(&mad_agent_priv->wait);
+ return;
+ }
}
/* Complete corresponding request */
if (response_mad(mad_recv_wc->recv_buf.mad)) {
tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
- mad_send_wr = find_send_req(mad_agent_priv, tid);
+ mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
if (!mad_send_wr) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
ib_free_recv_mad(mad_recv_wc);
@@ -1630,7 +1623,7 @@
wake_up(&mad_agent_priv->wait);
return;
}
- ib_mark_req_done(mad_send_wr);
+ ib_mark_mad_done(mad_send_wr);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
/* Defined behavior is to complete response before request */
@@ -1821,23 +1814,33 @@
}
}
+void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
+ int timeout_ms)
+{
+ mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
+ wait_for_response(mad_send_wr);
+ adjust_timeout(mad_send_wr->mad_agent_priv);
+}
+
/*
* Process a send work completion
*/
-static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
- struct ib_mad_send_wc *mad_send_wc)
+void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_send_wc *mad_send_wc)
{
struct ib_mad_agent_private *mad_agent_priv;
unsigned long flags;
- enum ib_mad_result ret;
+ int ret;
mad_agent_priv = mad_send_wr->mad_agent_priv;
- if (mad_agent_priv->agent.rmpp_version)
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ if (mad_agent_priv->agent.rmpp_version) {
ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
- else
- ret = IB_MAD_RESULT_SUCCESS;
+ if (ret == IB_RMPP_RESULT_CONSUMED)
+ goto done;
+ } else
+ ret = IB_RMPP_RESULT_UNHANDLED;
- spin_lock_irqsave(&mad_agent_priv->lock, flags);
if (mad_send_wc->status != IB_WC_SUCCESS &&
mad_send_wr->status == IB_WC_SUCCESS) {
mad_send_wr->status = mad_send_wc->status;
@@ -1849,8 +1852,7 @@
mad_send_wr->status == IB_WC_SUCCESS) {
wait_for_response(mad_send_wr);
}
- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
- return;
+ goto done;
}
/* Remove send from MAD agent and notify client of completion */
@@ -1860,7 +1862,7 @@
if (mad_send_wr->status != IB_WC_SUCCESS )
mad_send_wc->status = mad_send_wr->status;
- if (ret == IB_MAD_RESULT_SUCCESS)
+ if (ret != IB_RMPP_RESULT_INTERNAL)
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
mad_send_wc);
@@ -1869,6 +1871,9 @@
wake_up(&mad_agent_priv->wait);
kfree(mad_send_wr);
+ return;
+done:
+ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}
static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
@@ -2066,8 +2071,7 @@
}
static struct ib_mad_send_wr_private*
-find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
- u64 wr_id)
+find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id)
{
struct ib_mad_send_wr_private *mad_send_wr;
@@ -2234,6 +2238,7 @@
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags, delay;
+ int ret;
mad_agent_priv = (struct ib_mad_agent_private *)data;
@@ -2257,6 +2262,14 @@
}
list_del(&mad_send_wr->agent_list);
+ if (mad_agent_priv->agent.rmpp_version) {
+ ret = ib_timeout_rmpp(mad_send_wr);
+ if (ret == IB_RMPP_RESULT_CONSUMED) {
+ list_add_tail(&mad_send_wr->agent_list,
+ &mad_agent_priv->send_list);
+ continue;
+ }
+ }
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
mad_send_wc.wr_id = mad_send_wr->wr_id;
Index: core/mad_rmpp.h
===================================================================
--- core/mad_rmpp.h (revision 2207)
+++ core/mad_rmpp.h (working copy)
@@ -37,14 +37,24 @@
#include "mad_priv.h"
+enum {
+ IB_RMPP_RESULT_PROCESSED,
+ IB_RMPP_RESULT_CONSUMED,
+ IB_RMPP_RESULT_INTERNAL,
+ IB_RMPP_RESULT_UNHANDLED
+};
+
+int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr);
+
struct ib_mad_recv_wc *
-ib_process_rmpp_recv_wc(struct ib_mad_agent_private *mad_agent_priv,
+ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc);
-enum ib_mad_result
-ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
- struct ib_mad_send_wc *mad_send_wc);
+int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_send_wc *mad_send_wc);
+
+void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent);
-void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *mad_agent_priv);
+int ib_timeout_rmpp(struct ib_mad_send_wr_private *mad_send_wr);
#endif /* __MAD_RMPP_H__ */
Index: core/mad_priv.h
===================================================================
--- core/mad_priv.h (revision 2207)
+++ core/mad_priv.h (working copy)
@@ -126,6 +126,15 @@
int retry;
int refcount;
enum ib_wc_status status;
+
+ /* RMPP control */
+ int last_ack;
+ int seg_num;
+ int newwin;
+ int total_seg;
+ int data_offset;
+ int pad;
+ int retries;
};
struct ib_mad_local_private {
@@ -198,4 +207,17 @@
extern kmem_cache_t *ib_mad_cache;
+int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
+
+struct ib_mad_send_wr_private *
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid);
+
+void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_send_wc *mad_send_wc);
+
+void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
+
+void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
+ int timeout_ms);
+
#endif /* __IB_MAD_PRIV_H__ */
More information about the general
mailing list