[openib-general] Re: [PATCH 1/2] (fixed) mthca: max_inline_data support
Roland Dreier
roland at topspin.com
Fri May 13 13:36:26 PDT 2005
Thanks, I ended up committing this slightly modified patch:
--- infiniband/hw/mthca/mthca_dev.h (revision 2287)
+++ infiniband/hw/mthca/mthca_dev.h (working copy)
@@ -439,12 +439,14 @@ int mthca_alloc_qp(struct mthca_dev *dev
struct mthca_cq *recv_cq,
enum ib_qp_type type,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
struct mthca_qp *qp);
int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
int qpn,
int port,
struct mthca_sqp *sqp);
--- infiniband/hw/mthca/mthca_provider.c (revision 2287)
+++ infiniband/hw/mthca/mthca_provider.c (working copy)
@@ -486,16 +486,11 @@ static struct ib_qp *mthca_create_qp(str
qp->rq.db_index = ucmd.rq_db_index;
}
- qp->sq.max = init_attr->cap.max_send_wr;
- qp->rq.max = init_attr->cap.max_recv_wr;
- qp->sq.max_gs = init_attr->cap.max_send_sge;
- qp->rq.max_gs = init_attr->cap.max_recv_sge;
-
err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->qp_type, init_attr->sq_sig_type,
- qp);
+ &init_attr->cap, qp);
if (err && pd->uobject) {
context = to_mucontext(pd->uobject->context);
@@ -524,17 +519,12 @@ static struct ib_qp *mthca_create_qp(str
if (!qp)
return ERR_PTR(-ENOMEM);
- qp->sq.max = init_attr->cap.max_send_wr;
- qp->rq.max = init_attr->cap.max_recv_wr;
- qp->sq.max_gs = init_attr->cap.max_send_sge;
- qp->rq.max_gs = init_attr->cap.max_recv_sge;
-
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
- init_attr->sq_sig_type,
+ init_attr->sq_sig_type, &init_attr->cap,
qp->ibqp.qp_num, init_attr->port_num,
to_msqp(qp));
break;
@@ -552,6 +542,8 @@ static struct ib_qp *mthca_create_qp(str
init_attr->cap.max_inline_data = 0;
init_attr->cap.max_send_wr = qp->sq.max;
init_attr->cap.max_recv_wr = qp->rq.max;
+ init_attr->cap.max_send_sge = qp->sq.max_gs;
+ init_attr->cap.max_recv_sge = qp->rq.max_gs;
return &qp->ibqp;
}
--- infiniband/hw/mthca/mthca_qp.c (revision 2287)
+++ infiniband/hw/mthca/mthca_qp.c (working copy)
@@ -46,7 +46,9 @@ enum {
MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
MTHCA_ACK_REQ_FREQ = 10,
MTHCA_FLIGHT_LIMIT = 9,
- MTHCA_UD_HEADER_SIZE = 72 /* largest UD header possible */
+ MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
+ MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
+ MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
};
enum {
@@ -1205,22 +1207,31 @@ static int mthca_alloc_qp_common(struct
return 0;
}
-static void mthca_align_qp_size(struct mthca_dev *dev, struct mthca_qp *qp)
+static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
+ struct mthca_qp *qp)
{
- int i;
-
- if (!mthca_is_memfree(dev))
- return;
-
- for (i = 0; 1 << i < qp->rq.max; ++i)
- ; /* nothing */
+ if (mthca_is_memfree(dev)) {
+ qp->rq.max = roundup_pow_of_two(cap->max_recv_wr);
+ qp->sq.max = roundup_pow_of_two(cap->max_send_wr);
+ } else {
+ qp->rq.max = cap->max_recv_wr;
+ qp->sq.max = cap->max_send_wr;
+ }
- qp->rq.max = 1 << i;
+ qp->rq.max_gs = cap->max_recv_sge;
+ qp->sq.max_gs = max_t(int, cap->max_send_sge,
+ ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
+ MTHCA_INLINE_CHUNK_SIZE) / sizeof (struct mthca_data_seg));
- for (i = 0; 1 << i < qp->sq.max; ++i)
- ; /* nothing */
+ /*
+ * For MLX transport we need 2 extra S/G entries:
+ * one for the header and one for the checksum at the end
+ */
+ if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
+ qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
+ return -EINVAL;
- qp->sq.max = 1 << i;
+ return 0;
}
int mthca_alloc_qp(struct mthca_dev *dev,
@@ -1229,11 +1240,14 @@ int mthca_alloc_qp(struct mthca_dev *dev
struct mthca_cq *recv_cq,
enum ib_qp_type type,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
struct mthca_qp *qp)
{
int err;
- mthca_align_qp_size(dev, qp);
+ err = mthca_set_qp_size(dev, cap, qp);
+ if (err)
+ return err;
switch (type) {
case IB_QPT_RC: qp->transport = RC; break;
@@ -1266,14 +1280,17 @@ int mthca_alloc_sqp(struct mthca_dev *de
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_sig_type send_policy,
+ struct ib_qp_cap *cap,
int qpn,
int port,
struct mthca_sqp *sqp)
{
- int err = 0;
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
+ int err;
- mthca_align_qp_size(dev, &sqp->qp);
+ err = mthca_set_qp_size(dev, cap, &sqp->qp);
+ if (err)
+ return err;
sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
More information about the general
mailing list