[ofa-general] [PATCH v5] IB/mlx4: shrinking WQE
Michael S. Tsirkin
mst at dev.mellanox.co.il
Mon Sep 10 23:01:34 PDT 2007
IB/mlx4: shrinking WQE
ConnectX supports shrinking wqe, such that a single WR can include
multiple units of wqe_shift. This way, WRs can differ in size, and
do not have to be a power of 2 in size, saving memory and speeding up
send WR posting. Unfortunately, if we do this wqe_index field in CQE
can't be used to look up the WR ID anymore, so do this only if
selective signalling is off.
Further, on 32-bit platforms, we can't use vmap to make
the QP buffer virtually contigious. Thus we have to use
constant-sized WRs to make sure a WR is always fully within
a single page-sized chunk.
Finally, we use NOP opcode to avoid wrap-around in the middle of WR.
Since MLX QPs only support SEND, we use constant-sized WRs in this
case. We look for the smallest value of wqe_shift such that the
resulting number of wqes does not exceed device capabilities.
Signed-off-by: Michael S. Tsirkin <mst at dev.mellanox.co.il>
---
Changes since v3: make shrinking WQE also work with
latest firmware (newer than 2.2.0).
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 8bf44da..0981f3c 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -331,6 +331,11 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR;
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP)) {
+ printk(KERN_WARNING "Completion for NOP opcode detected!\n");
+ return -EINVAL;
+ }
+
if (!*cur_qp ||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
/*
@@ -353,8 +358,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
if (is_send) {
wq = &(*cur_qp)->sq;
- wqe_ctr = be16_to_cpu(cqe->wqe_index);
- wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
+ if (!(*cur_qp)->sq_signal_bits) {
+ wqe_ctr = be16_to_cpu(cqe->wqe_index);
+ wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
+ }
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
} else if ((*cur_qp)->ibqp.srq) {
@@ -403,6 +410,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
+ default:
+ printk("Unrecognized send opcode 0x%x!\n",
+ cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK);
+ return -EINVAL;
}
} else {
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
@@ -422,6 +433,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
wc->wc_flags = IB_WC_WITH_IMM;
wc->imm_data = cqe->immed_rss_invalid;
break;
+ default:
+ printk("Unrecognized recv opcode 0x%x!\n",
+ cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK);
+ return -EINVAL;
}
wc->slid = be16_to_cpu(cqe->rlid);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 705ff2f..a72ecb9 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -115,6 +115,8 @@ struct mlx4_ib_qp {
u32 doorbell_qpn;
__be32 sq_signal_bits;
+ unsigned sq_next_wqe;
+ int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx4_ib_wq sq;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ba0428d..2afd48d 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/log2.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
@@ -92,7 +93,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
{
- if (qp->buf.nbufs == 1)
+ if (BITS_PER_LONG == 64 || qp->buf.nbufs == 1)
return qp->buf.u.direct.buf + offset;
else
return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf +
@@ -111,16 +112,71 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
/*
* Stamp a SQ WQE so that it is invalid if prefetched by marking the
- * first four bytes of every 64 byte chunk with 0xffffffff, except for
- * the very first chunk of the WQE.
+ * first four bytes of every 64 byte chunk with
+ * 0x7FFFFFF | (invalid_ownership_value << 31).
+ *
+ * When max WR is than or equal to the WQE size,
+ * as an optimization, we can stamp WQE with 0xffffffff,
+ * and skip the very first chunk of the WQE.
*/
-static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
+static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
{
u32 *wqe = get_send_wqe(qp, n);
int i;
+ int s;
+ __be32 stamp;
+
+ s = roundup(size, 1 << qp->sq.wqe_shift) / sizeof *wqe;
+ if (qp->sq_max_wqes_per_wr > 1) {
+ stamp = cpu_to_be32(0x7fffffff | (n & qp->sq.wqe_cnt ? 0 : 1 << 31));
+ for (i = 0; i < s; i += 16)
+ wqe[i] = stamp;
+ } else {
+ for (i = 16; i < s; i += 16)
+ wqe[i] = 0xffffffff;
+ }
+}
+
+static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
+{
+ struct mlx4_wqe_ctrl_seg *ctrl;
+ struct mlx4_wqe_inline_seg *inl;
+ void *wqe;
+ int s;
+
+ stamp_send_wqe(qp, (n + qp->sq_spare_wqes) & (qp->sq.wqe_cnt - 1), size);
+
+ ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+ s = sizeof(struct mlx4_wqe_ctrl_seg) + (qp->ibqp.qp_type == IB_QPT_UD ?
+ sizeof(struct mlx4_wqe_datagram_seg) : 0);
+
+ /* Pad the remainder of the WQE with an inline data segment. */
+ if (size > s) {
+ inl = wqe + s;
+ inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
+ }
+ ctrl->srcrb_flags = 0;
+ ctrl->fence_size = size / 16;
+ /*
+ * Make sure descriptor is fully written before
+ * setting ownership bit (because HW can start
+ * executing as soon as we do).
+ */
+ wmb();
+
+ ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
+ (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
+}
- for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16)
- wqe[i] = 0xffffffff;
+/* Post NOP WQE to prevent wrap-around in the middle of WR */
+static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
+{
+ unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
+ if (unlikely(s < qp->sq_max_wqes_per_wr)) {
+ post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
+ ind += s;
+ }
+ return ind;
}
static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
@@ -234,9 +290,35 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
return 0;
}
+static int nop_wqe_shift(enum ib_qp_type type)
+{
+ /*
+ * WQE size is at least 0x20.
+ * UD WQEs must have a datagram segment.
+ * RC and UC WQEs must have control segment.
+ * MLX WQEs do not support NOP.
+ */
+ switch (type) {
+ case IB_QPT_UD:
+ return ilog2(roundup_pow_of_two(max(sizeof (struct mlx4_wqe_ctrl_seg) +
+ sizeof (struct mlx4_wqe_datagram_seg),
+ (size_t)0x20)));
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ return -EINVAL;
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ default:
+ return ilog2(roundup_pow_of_two(max(sizeof (struct mlx4_wqe_ctrl_seg),
+ (size_t)0x20)));
+ }
+}
+
static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
enum ib_qp_type type, struct mlx4_ib_qp *qp)
{
+ int s;
+
/* Sanity check SQ size before proceeding */
if (cap->max_send_wr > dev->dev->caps.max_wqes ||
cap->max_send_sge > dev->dev->caps.max_sq_sg ||
@@ -252,20 +334,60 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
return -EINVAL;
- qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
- sizeof (struct mlx4_wqe_data_seg),
- cap->max_inline_data +
- sizeof (struct mlx4_wqe_inline_seg)) +
- send_wqe_overhead(type)));
- qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
- sizeof (struct mlx4_wqe_data_seg);
+ s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
+ cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
+ send_wqe_overhead(type);
/*
- * We need to leave 2 KB + 1 WQE of headroom in the SQ to
- * allow HW to prefetch.
+ * Hermon supports shrinking wqe, such that a single WR can include
+ * multiple units of wqe_shift. This way, WRs can differ in size, and
+ * do not have to be a power of 2 in size, saving memory and speeding up
+ * send WR posting. Unfortunately, if we do this wqe_index field in CQE
+ * can't be used to look up the WR ID anymore, so do this only if
+ * selective signalling is off.
+ *
+ * Further, on 32-bit platforms, we can't use vmap to make
+ * the QP buffer virtually contigious. Thus we have to use
+ * constant-sized WRs to make sure a WR is always fully within
+ * a single page-sized chunk.
+ *
+ * Finally, we use NOP opcode to avoid wrap-around in the middle of WR.
+ * Since MLX QPs only support SEND, we use constant-sized WRs in this
+ * case.
+ *
+ * We look for the smallest value of wqe_shift such that the resulting
+ * number of wqes does not exceed device capabilities.
*/
- qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
- qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes);
+ qp->sq.wqe_shift = nop_wqe_shift(type);
+ if (!qp->sq_signal_bits || BITS_PER_LONG != 64 || qp->sq.wqe_shift < 0)
+ qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
+
+ for (;;) {
+ if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz)
+ return -EINVAL;
+
+ qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1 << qp->sq.wqe_shift);
+
+ /*
+ * We need to leave 2 KB + 1 WR of headroom in the SQ to
+ * allow HW to prefetch.
+ */
+ qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
+ qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
+ qp->sq_max_wqes_per_wr +
+ qp->sq_spare_wqes);
+
+ if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
+ break;
+
+ if (qp->sq_max_wqes_per_wr <= 1)
+ return -EINVAL;
+
+ ++qp->sq.wqe_shift;
+ }
+
+ qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
+ send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg);
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -277,7 +399,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
qp->sq.offset = 0;
}
- cap->max_send_wr = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes;
+ cap->max_send_wr = qp->sq.max_post =
+ (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
cap->max_send_sge = qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
cap->max_inline_data = 0;
@@ -315,6 +438,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->rq.tail = 0;
qp->sq.head = 0;
qp->sq.tail = 0;
+ qp->sq_next_wqe = 0;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
+ else
+ qp->sq_signal_bits = 0;
err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
if (err)
@@ -405,11 +534,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*/
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
- else
- qp->sq_signal_bits = 0;
-
qp->mqp.event = mlx4_ib_qp_event;
return 0;
@@ -904,7 +1028,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31);
- stamp_send_wqe(qp, i);
+ stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
}
}
@@ -1228,14 +1352,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
int nreq;
int err = 0;
- int ind;
- int size;
+ unsigned ind;
+ int uninitialized_var(stamp);
+ int uninitialized_var(size);
int i;
spin_lock_irqsave(&qp->rq.lock, flags);
- ind = qp->sq.head;
-
+ ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
err = -ENOMEM;
@@ -1250,7 +1374,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
- qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
+ qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
ctrl->srcrb_flags =
(wr->send_flags & IB_SEND_SIGNALED ?
@@ -1266,7 +1390,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->imm = 0;
wqe += sizeof *ctrl;
- size = sizeof *ctrl / 16;
+ size = sizeof *ctrl;
switch (ibqp->qp_type) {
case IB_QPT_RC:
@@ -1281,8 +1405,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_atomic_seg);
- size += (sizeof (struct mlx4_wqe_raddr_seg) +
- sizeof (struct mlx4_wqe_atomic_seg)) / 16;
+ size += sizeof (struct mlx4_wqe_raddr_seg) +
+ sizeof (struct mlx4_wqe_atomic_seg);
break;
@@ -1292,7 +1416,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
- size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
+ size += sizeof (struct mlx4_wqe_raddr_seg);
break;
default:
@@ -1304,7 +1428,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_QPT_UD:
set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
- size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+ size += sizeof (struct mlx4_wqe_datagram_seg);
break;
case IB_QPT_SMI:
@@ -1315,7 +1439,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out;
}
wqe += err;
- size += err / 16;
+ size += err;
err = 0;
break;
@@ -1328,7 +1452,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mlx4_wqe_data_seg);
- size += sizeof (struct mlx4_wqe_data_seg) / 16;
+ size += sizeof (struct mlx4_wqe_data_seg);
}
/* Add one more inline data segment for ICRC for MLX sends */
@@ -1337,11 +1461,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
cpu_to_be32((1 << 31) | 4);
((u32 *) wqe)[1] = 0;
wqe += sizeof (struct mlx4_wqe_data_seg);
- size += sizeof (struct mlx4_wqe_data_seg) / 16;
+ size += sizeof (struct mlx4_wqe_data_seg);
}
ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
- MLX4_WQE_CTRL_FENCE : 0) | size;
+ MLX4_WQE_CTRL_FENCE : 0) | (size / 16);
/*
* Make sure descriptor is fully written before
@@ -1358,16 +1482,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
+ stamp = (ind + qp->sq_spare_wqes) & (qp->sq.wqe_cnt - 1);
+ ind += DIV_ROUND_UP(size, 1 << qp->sq.wqe_shift);
+
/*
* We can improve latency by not stamping the last
* send queue WQE until after ringing the doorbell, so
* only stamp here if there are still more WQEs to post.
+ *
+ * Same optimization applies to padding with NOP wqe
+ * in case of WQE shrinking (used to prevent wrap-around
+ * in the middle of WR).
*/
- if (wr->next)
- stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) &
- (qp->sq.wqe_cnt - 1));
+ if (wr->next) {
+ stamp_send_wqe(qp, stamp, size);
+ ind = pad_wraparound(qp, ind);
+ }
- ++ind;
}
out:
@@ -1389,8 +1520,10 @@ out:
*/
mmiowb();
- stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) &
- (qp->sq.wqe_cnt - 1));
+ stamp_send_wqe(qp, stamp, size);
+
+ ind = pad_wraparound(qp, ind);
+ qp->sq_next_wqe = ind;
}
spin_unlock_irqrestore(&qp->rq.lock, flags);
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index f8d63d3..0fce74d 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -151,6 +151,19 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
}
+
+ if (BITS_PER_LONG == 64) {
+ struct page **pages;
+ pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->u.page_list[i].buf);
+ buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->u.direct.buf)
+ goto err_free;
+ }
}
return 0;
@@ -170,6 +183,9 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
buf->u.direct.map);
else {
+ if (BITS_PER_LONG == 64)
+ vunmap(buf->u.direct.buf);
+
for (i = 0; i < buf->nbufs; ++i)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->u.page_list[i].buf,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index cfb78fb..bd3ed64 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -185,7 +185,7 @@ struct mlx4_buf_list {
};
struct mlx4_buf {
- union {
+ struct {
struct mlx4_buf_list direct;
struct mlx4_buf_list *page_list;
} u;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 3968b94..bf37369 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -158,6 +158,7 @@ enum {
MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX4_WQE_CTRL_NEC = 1 << 29,
};
struct mlx4_wqe_ctrl_seg {
--
MST
More information about the general
mailing list