[openib-general] [PATCH 1/2] (fixed) mthca: max_inline_data support
Michael S. Tsirkin
mst at mellanox.co.il
Tue May 10 08:35:43 PDT 2005
Here's an updated patch - I'll be offline till Sunday and I hope
it's good to go. Please note there's a companion patch for libmthca,
([PATCH 2/2]) which is also needed to actually use this stuff.
Quoting r. Roland Dreier <roland at topspin.com>:
> Subject: Re: [openib-general] [PATCH 1/2] mthca: max_inline_data support
>
> Thanks... does it make sense to set the max_inline_data value for
> kernel QPs but then not support posting inline data?
You are right: max_inline_data in init_attr shall be left 0
until posting inline data in kernel is supported. Fixed.
> Also should we add some error checking against the HCA's maximum WQE
> size?
I added such a check.
Support max_inline_data parameter for userspace QPs.
Check max_send_sge/max_recv_sge/max_inline_data against the HCA's
limits.
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
Index: hw/mthca/mthca_dev.h
===================================================================
--- hw/mthca/mthca_dev.h (revision 2292)
+++ hw/mthca/mthca_dev.h (working copy)
@@ -439,12 +439,14 @@ int mthca_alloc_qp(struct mthca_dev *dev
struct mthca_cq *recv_cq,
enum ib_qp_type type,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
struct mthca_qp *qp);
int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
int qpn,
int port,
struct mthca_sqp *sqp);
Index: hw/mthca/mthca_provider.c
===================================================================
--- hw/mthca/mthca_provider.c (revision 2292)
+++ hw/mthca/mthca_provider.c (working copy)
@@ -486,16 +486,11 @@ static struct ib_qp *mthca_create_qp(str
qp->rq.db_index = ucmd.rq_db_index;
}
- qp->sq.max = init_attr->cap.max_send_wr;
- qp->rq.max = init_attr->cap.max_recv_wr;
- qp->sq.max_gs = init_attr->cap.max_send_sge;
- qp->rq.max_gs = init_attr->cap.max_recv_sge;
-
err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->qp_type, init_attr->sq_sig_type,
- qp);
+ &init_attr->cap, qp);
if (err && pd->uobject) {
context = to_mucontext(pd->uobject->context);
@@ -524,17 +519,12 @@ static struct ib_qp *mthca_create_qp(str
if (!qp)
return ERR_PTR(-ENOMEM);
- qp->sq.max = init_attr->cap.max_send_wr;
- qp->rq.max = init_attr->cap.max_recv_wr;
- qp->sq.max_gs = init_attr->cap.max_send_sge;
- qp->rq.max_gs = init_attr->cap.max_recv_sge;
-
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
- init_attr->sq_sig_type,
+ init_attr->sq_sig_type, &init_attr->cap,
qp->ibqp.qp_num, init_attr->port_num,
to_msqp(qp));
break;
Index: hw/mthca/mthca_qp.c
===================================================================
--- hw/mthca/mthca_qp.c (revision 2292)
+++ hw/mthca/mthca_qp.c (working copy)
@@ -46,7 +46,9 @@ enum {
MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
MTHCA_ACK_REQ_FREQ = 10,
MTHCA_FLIGHT_LIMIT = 9,
- MTHCA_UD_HEADER_SIZE = 72 /* largest UD header possible */
+ MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
+ MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
+ MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
};
enum {
@@ -1205,22 +1207,41 @@ static int mthca_alloc_qp_common(struct
return 0;
}
-static void mthca_align_qp_size(struct mthca_dev *dev, struct mthca_qp *qp)
+static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap* cap,
+ struct mthca_qp *qp)
{
int i;
+ u32 d;
- if (!mthca_is_memfree(dev))
- return;
+ if (mthca_is_memfree(dev)) {
+ for (i = 0; 1 << i < cap->max_recv_wr; ++i)
+ ; /* nothing */
- for (i = 0; 1 << i < qp->rq.max; ++i)
- ; /* nothing */
+ qp->rq.max = 1 << i;
- qp->rq.max = 1 << i;
+ for (i = 0; 1 << i < cap->max_send_wr; ++i)
+ ; /* nothing */
- for (i = 0; 1 << i < qp->sq.max; ++i)
- ; /* nothing */
+ qp->sq.max = 1 << i;
+ } else {
+ qp->rq.max = cap->max_recv_wr;
+ qp->sq.max = cap->max_send_wr;
+ }
+
+ qp->rq.max_gs = cap->max_recv_sge;
- qp->sq.max = 1 << i;
+ d = ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
+ MTHCA_INLINE_CHUNK_SIZE) / sizeof (struct mthca_data_seg);
+ qp->sq.max_gs = max(cap->max_send_sge, d);
+
+ if (qp->transport == MLX) {
+ /* For MLX transport we need 2 extra S/G entries:
+ * for header and checksum */
+ qp->sq.max_gs += 2;
+ }
+
+ return (qp->sq.max_gs > dev->limits.max_sg ||
+ qp->rq.max_gs > dev->limits.max_sg) ? -EINVAL : 0;
}
int mthca_alloc_qp(struct mthca_dev *dev,
@@ -1229,11 +1250,14 @@ int mthca_alloc_qp(struct mthca_dev *dev
struct mthca_cq *recv_cq,
enum ib_qp_type type,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
struct mthca_qp *qp)
{
int err;
- mthca_align_qp_size(dev, qp);
+ err = mthca_set_qp_size(dev, cap, qp);
+ if (err)
+ return err;
switch (type) {
case IB_QPT_RC: qp->transport = RC; break;
@@ -1266,14 +1290,17 @@ int mthca_alloc_sqp(struct mthca_dev *de
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
int qpn,
int port,
struct mthca_sqp *sqp)
{
- int err = 0;
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
+ int err;
- mthca_align_qp_size(dev, &sqp->qp);
+ err = mthca_set_qp_size(dev, cap, &sqp->qp);
+ if (err)
+ return err;
sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
More information about the general
mailing list