[ofa-general] [PATCH - 10] mlx4 - add LSO support
Eli Cohen
eli at mellanox.co.il
Wed Aug 15 11:21:49 PDT 2007
Add LSO support to mlx4
Signed-off-by: Eli Cohen <eli at mellnaox.co.il>
---
Index: linux-2.6.23-rc1/drivers/net/mlx4/fw.c
===================================================================
--- linux-2.6.23-rc1.orig/drivers/net/mlx4/fw.c 2007-08-15 20:50:30.000000000 +0300
+++ linux-2.6.23-rc1/drivers/net/mlx4/fw.c 2007-08-15 20:50:36.000000000 +0300
@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
+#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *
dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
+ field &= 0x1f;
+ if (!field)
+ dev_cap->max_gso_sz = 0;
+ else
+ dev_cap->max_gso_sz = 1 << field;
+
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
dev_cap->max_rdma_global = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
+ mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
dump_dev_cap_flags(dev, dev_cap->flags);
Index: linux-2.6.23-rc1/drivers/net/mlx4/fw.h
===================================================================
--- linux-2.6.23-rc1.orig/drivers/net/mlx4/fw.h 2007-08-15 20:50:12.000000000 +0300
+++ linux-2.6.23-rc1/drivers/net/mlx4/fw.h 2007-08-15 20:50:36.000000000 +0300
@@ -96,6 +96,7 @@ struct mlx4_dev_cap {
u8 bmme_flags;
u32 reserved_lkey;
u64 max_icm_sz;
+ int max_gso_sz;
};
struct mlx4_adapter {
Index: linux-2.6.23-rc1/drivers/net/mlx4/main.c
===================================================================
--- linux-2.6.23-rc1.orig/drivers/net/mlx4/main.c 2007-08-15 20:50:12.000000000 +0300
+++ linux-2.6.23-rc1/drivers/net/mlx4/main.c 2007-08-15 20:50:36.000000000 +0300
@@ -158,6 +158,7 @@ static int __devinit mlx4_dev_cap(struct
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
+ dev->caps.max_gso_sz = dev_cap->max_gso_sz;
return 0;
}
Index: linux-2.6.23-rc1/drivers/infiniband/hw/mlx4/main.c
===================================================================
--- linux-2.6.23-rc1.orig/drivers/infiniband/hw/mlx4/main.c 2007-08-15 20:50:30.000000000 +0300
+++ linux-2.6.23-rc1/drivers/infiniband/hw/mlx4/main.c 2007-08-15 20:50:36.000000000 +0300
@@ -101,6 +101,8 @@ static int mlx4_ib_query_device(struct i
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_IP_CSUM;
+ if (dev->dev->caps.max_gso_sz)
+ props->device_cap_flags |= IB_DEVICE_TCP_GSO;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
@@ -572,6 +574,8 @@ static void *mlx4_ib_add(struct mlx4_dev
if (ibdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
ibdev->ib_dev.flags |= IB_DEVICE_IP_CSUM;
+ if (ibdev->dev->caps.max_gso_sz)
+ ibdev->ib_dev.flags |= IB_DEVICE_TCP_GSO;
if (init_node_data(ibdev))
goto err_map;
Index: linux-2.6.23-rc1/drivers/infiniband/hw/mlx4/qp.c
===================================================================
--- linux-2.6.23-rc1.orig/drivers/infiniband/hw/mlx4/qp.c 2007-08-15 20:50:34.000000000 +0300
+++ linux-2.6.23-rc1/drivers/infiniband/hw/mlx4/qp.c 2007-08-15 20:50:36.000000000 +0300
@@ -65,6 +65,7 @@ struct mlx4_ib_sqp {
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
+ [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
[IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
[IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
[IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -737,9 +738,11 @@ static int __mlx4_ib_modify_qp(struct ib
}
}
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
- ibqp->qp_type == IB_QPT_UD)
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+ else if (ibqp->qp_type == IB_QPT_UD)
+ context->mtu_msgmax = (IB_MTU_4096 << 5) |
+ ilog2(dev->dev->caps.max_gso_sz);
else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
printk(KERN_ERR "path MTU (%u) is invalid\n",
@@ -1221,6 +1224,29 @@ static void set_data_seg(struct mlx4_wqe
dseg->addr = cpu_to_be64(sg->addr);
}
+static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
+ struct mlx4_ib_qp *qp, int *lso_seg_len)
+{
+ int halign;
+
+ memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+ /* make sure LSO header is written before
+ overwriting stamping */
+ wmb();
+
+ wqe->mss_hdr_size = cpu_to_be32(((wr->wr.ud.mss - wr->wr.ud.hlen)
+ << 16) | wr->wr.ud.hlen);
+
+ halign = ALIGN(wr->wr.ud.hlen, 16);
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs - (halign >> 4)))
+ return -EINVAL;
+
+ *lso_seg_len = halign;
+ return 0;
+}
+
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -1233,6 +1259,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp
int ind;
int size;
int i;
+ int hlen;
spin_lock_irqsave(&qp->rq.lock, flags);
@@ -1311,6 +1338,17 @@ int mlx4_ib_post_send(struct ib_qp *ibqp
set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+
+ if (wr->opcode == IB_WR_LSO) {
+ err = build_lso_seg(wqe, wr, qp, &hlen);
+ if (err) {
+ *bad_wr = wr;
+ goto out;
+ }
+ wqe += hlen;
+ size += hlen >> 4;
+ }
+
break;
case IB_QPT_SMI:
@@ -1364,6 +1402,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
+
/*
* We can improve latency by not stamping the last
* send queue WQE until after ringing the doorbell, so
Index: linux-2.6.23-rc1/include/linux/mlx4/device.h
===================================================================
--- linux-2.6.23-rc1.orig/include/linux/mlx4/device.h 2007-08-15 20:50:12.000000000 +0300
+++ linux-2.6.23-rc1/include/linux/mlx4/device.h 2007-08-15 20:50:36.000000000 +0300
@@ -177,6 +177,7 @@ struct mlx4_caps {
u32 flags;
u16 stat_rate_support;
u8 port_width_cap[MLX4_MAX_PORTS + 1];
+ int max_gso_sz;
};
struct mlx4_buf_list {
Index: linux-2.6.23-rc1/include/linux/mlx4/qp.h
===================================================================
--- linux-2.6.23-rc1.orig/include/linux/mlx4/qp.h 2007-08-15 20:50:30.000000000 +0300
+++ linux-2.6.23-rc1/include/linux/mlx4/qp.h 2007-08-15 20:50:36.000000000 +0300
@@ -215,6 +215,11 @@ struct mlx4_wqe_datagram_seg {
__be32 reservd[2];
};
+struct mlx4_lso_seg {
+ __be32 mss_hdr_size;
+ __be32 header[0];
+};
+
struct mlx4_wqe_bind_seg {
__be32 flags1;
__be32 flags2;
Index: linux-2.6.23-rc1/drivers/infiniband/hw/mlx4/cq.c
===================================================================
--- linux-2.6.23-rc1.orig/drivers/infiniband/hw/mlx4/cq.c 2007-08-15 20:50:30.000000000 +0300
+++ linux-2.6.23-rc1/drivers/infiniband/hw/mlx4/cq.c 2007-08-15 20:50:36.000000000 +0300
@@ -403,6 +403,9 @@ static int mlx4_ib_poll_one(struct mlx4_
case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
+ case MLX4_OPCODE_LSO:
+ wc->opcode = IB_WC_LSO;
+ break;
}
} else {
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
More information about the general
mailing list