[ofa-general] [PATCH 11/16 v4] IB/mlx4: Add LSO support to mlx4
Eli Cohen
eli at mellanox.co.il
Wed Jan 30 08:31:35 PST 2008
IB/mlx4: Add LSO support to mlx4
Signed-off-by: Eli Cohen <eli at mellnaox.co.il>
---
drivers/infiniband/hw/mlx4/cq.c | 3 ++
drivers/infiniband/hw/mlx4/main.c | 4 +++
drivers/infiniband/hw/mlx4/qp.c | 52 +++++++++++++++++++++++++++++++++---
drivers/net/mlx4/fw.c | 9 ++++++
drivers/net/mlx4/fw.h | 1 +
drivers/net/mlx4/main.c | 1 +
include/linux/mlx4/device.h | 1 +
include/linux/mlx4/qp.h | 5 +++
8 files changed, 71 insertions(+), 5 deletions(-)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 539c69c..75fc2b3 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -409,6 +409,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
+ case MLX4_OPCODE_LSO:
+ wc->opcode = IB_WC_LSO;
+ break;
}
} else {
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8ce94a1..2dd0de3 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -101,6 +101,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_IP_CSUM;
+ if (dev->dev->caps.max_gso_sz)
+ props->device_cap_flags |= IB_DEVICE_TCP_TSO;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
@@ -616,6 +618,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (ibdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
ibdev->ib_dev.flags |= IB_DEVICE_IP_CSUM;
+ if (ibdev->dev->caps.max_gso_sz)
+ ibdev->ib_dev.flags |= IB_DEVICE_TCP_TSO;
if (init_node_data(ibdev))
goto err_map;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a04e931..fc4811c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -69,6 +69,7 @@ enum {
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
+ [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
[IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
[IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
[IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -243,6 +244,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *i
{
struct ib_qp_cap *cap = &init_attr->cap;
enum ib_qp_type type = init_attr->qp_type;
+ int reserve = 0;
/* Sanity check SQ size before proceeding */
if (cap->max_send_wr > dev->dev->caps.max_wqes ||
@@ -259,15 +261,18 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *i
cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
return -EINVAL;
- if (init_attr->create_flags & QP_CREATE_LSO)
+ if (init_attr->create_flags & QP_CREATE_LSO) {
qp->flags |= MLX4_QP_LSO;
+ reserve = 64;
+ }
qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
- sizeof (struct mlx4_wqe_data_seg),
+ sizeof (struct mlx4_wqe_data_seg) +
+ reserve,
cap->max_inline_data +
sizeof (struct mlx4_wqe_inline_seg)) +
send_wqe_overhead(type)));
- qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
+ qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - reserve - send_wqe_overhead(type)) /
sizeof (struct mlx4_wqe_data_seg);
/*
@@ -755,9 +760,11 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
- ibqp->qp_type == IB_QPT_UD)
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+ else if (ibqp->qp_type == IB_QPT_UD)
+ context->mtu_msgmax = (IB_MTU_4096 << 5) |
+ ilog2(dev->dev->caps.max_gso_sz);
else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
printk(KERN_ERR "path MTU (%u) is invalid\n",
@@ -1274,6 +1281,28 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
dseg->addr = cpu_to_be64(sg->addr);
}
+static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
+ struct mlx4_ib_qp *qp, int *lso_seg_len)
+{
+ int halign;
+
+ halign = ALIGN(wr->wr.ud.hlen, 16);
+ if (unlikely(!(qp->flags & MLX4_QP_LSO) && wr->num_sge > qp->sq.max_gs - (halign >> 4)))
+ return -EINVAL;
+
+ memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+ /* make sure LSO header is written before
+ overwriting stamping */
+ wmb();
+
+ wqe->mss_hdr_size = cpu_to_be32(((wr->wr.ud.mss - wr->wr.ud.hlen)
+ << 16) | wr->wr.ud.hlen);
+
+ *lso_seg_len = halign;
+ return 0;
+}
+
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -1364,6 +1393,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+
+ if (wr->opcode == IB_WR_LSO) {
+ int hlen;
+
+ err = build_lso_seg(wqe, wr, qp, &hlen);
+ if (err) {
+ *bad_wr = wr;
+ goto out;
+ }
+ wqe += hlen;
+ size += hlen >> 4;
+ }
+
break;
case IB_QPT_SMI:
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 736942f..7b426ff 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
+#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
+ field &= 0x1f;
+ if (!field)
+ dev_cap->max_gso_sz = 0;
+ else
+ dev_cap->max_gso_sz = 1 << field;
+
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
dev_cap->max_rdma_global = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
+ mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
dump_dev_cap_flags(dev, dev_cap->flags);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 7e1dd9e..ad5abf3 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -96,6 +96,7 @@ struct mlx4_dev_cap {
u8 bmme_flags;
u32 reserved_lkey;
u64 max_icm_sz;
+ int max_gso_sz;
};
struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 89b3f0b..ed2c648 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -159,6 +159,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
+ dev->caps.max_gso_sz = dev_cap->max_gso_sz;
return 0;
}
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 222815d..856570f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -181,6 +181,7 @@ struct mlx4_caps {
u32 flags;
u16 stat_rate_support;
u8 port_width_cap[MLX4_MAX_PORTS + 1];
+ int max_gso_sz;
};
struct mlx4_buf_list {
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4eb921..0bac8e8 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -215,6 +215,11 @@ struct mlx4_wqe_datagram_seg {
__be32 reservd[2];
};
+struct mlx4_lso_seg {
+ __be32 mss_hdr_size;
+ __be32 header[0];
+};
+
struct mlx4_wqe_bind_seg {
__be32 flags1;
__be32 flags2;
--
1.5.3.8
More information about the general
mailing list