[openib-general] [PATCH] [RMPP] ib_coalesce_recv_mad
Sean Hefty
mshefty at ichips.intel.com
Tue May 3 16:35:00 PDT 2005
The following patch implements ib_coalesce_recv_mad and fixes an issue
where the mad_len was set incorrectly on the receive side.
Note, you will need to use the attached file for the patch to apply
correctly, but provided inline for review.
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
Index: include/ib_mad.h
===================================================================
--- include/ib_mad.h (revision 2248)
+++ include/ib_mad.h (working copy)
@@ -447,16 +447,20 @@
/**
* ib_coalesce_recv_mad - Coalesces received MAD data into a single
buffer.
+ * @mad_agent: Specifies the MAD agent that received the MAD.
* @mad_recv_wc: Work completion information for a received MAD.
* @buf: User-provided data buffer to receive the coalesced buffers. The
* referenced buffer should be at least the size of the mad_len
specified
* by @mad_recv_wc.
+ * @usermode: Indicates if the specified data buffer is a userspace
allocated
+ * data buffer.
*
* This call copies a chain of received RMPP MADs into a single data
buffer,
* removing duplicated headers.
*/
-void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
- void *buf);
+unsigned long ib_coalesce_recv_mad(struct ib_mad_agent *mad_agent,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *buf, int usermode);
/**
* ib_free_recv_mad - Returns data buffers used to receive a MAD.
Index: core/mad_rmpp.c
===================================================================
--- core/mad_rmpp.c (revision 2248)
+++ core/mad_rmpp.c (working copy)
@@ -33,6 +33,7 @@
*/
#include <linux/dma-mapping.h>
+#include <asm/uaccess.h>
#include "mad_rmpp.h"
#include "mad_priv.h"
@@ -396,8 +397,8 @@
hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
- pad = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
- if (pad data_size)
+ pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+ if (pad data_size || pad < 0)
pad = 0;
return hdr_size + rmpp_recv->seg_num * data_size - pad;
@@ -419,6 +420,56 @@
return rmpp_wc;
}
+unsigned long ib_coalesce_recv_mad(struct ib_mad_agent *mad_agent,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *buf, int usermode)
+{
+ struct ib_mad_recv_buf *seg_buf;
+ struct ib_rmpp_mad *rmpp_mad;
+ void *data;
+ int size, len, offset, ret = 0;
+ u8 flags;
+
+ len = mad_recv_wc->mad_len;
+ if (len <= sizeof(struct ib_mad)) {
+ if (usermode)
+ ret = copy_to_user(buf, mad_recv_wc->recv_buf.mad, len);
+ else
+ memcpy(buf, mad_recv_wc->recv_buf.mad, len);
+ goto out;
+ }
+
+ offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
+
+ list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
+ rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
+ flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
+
+ if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
+ data = rmpp_mad;
+ size = sizeof(*rmpp_mad);
+ } else {
+ data = rmpp_mad + offset;
+ if (flags & IB_MGMT_RMPP_FLAG_LAST)
+ size = len;
+ else
+ size = sizeof(*rmpp_mad) - offset;
+ }
+
+ if (usermode) {
+ ret = copy_to_user(buf, data, size);
+ if (ret)
+ goto out;
+ } else
+ memcpy(buf, data, size);
+
+ len -= size;
+ buf += size;
+ }
+out: return ret;
+}
+EXPORT_SYMBOL(ib_coalesce_recv_mad);
+
static struct ib_mad_recv_wc *
continue_rmpp(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
Index: core/mad.c
===================================================================
--- core/mad.c (revision 2248)
+++ core/mad.c (working copy)
@@ -1027,13 +1027,6 @@
}
EXPORT_SYMBOL(ib_free_recv_mad);
-void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
- void *buf)
-{
- printk(KERN_ERR PFX "ib_coalesce_recv_mad() not implemented yet\n");
-}
-EXPORT_SYMBOL(ib_coalesce_recv_mad);
-
struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
u8 rmpp_version,
ib_mad_send_handler send_handler,
-------------- next part --------------
An embedded and charset-unspecified text was scrubbed...
Name: diffs
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20050503/0bb1f44e/attachment.ksh>
More information about the general
mailing list