[ofa-general] [PATCH] mlx4_ib: Optimize hugetlab pages support
Eli Cohen
eli at mellanox.co.il
Tue Jan 20 10:04:06 PST 2009
Since Linux does not merge adjacent pages into a single scatter entry through
calls to dma_map_sg(), we do this for huge pages which are guaranteed to be
comprised of adjacent natural pages of size PAGE_SIZE. This will result in a
significantly lower number of MTT segments used for registering hugetlb memory
regions.
Signed-off-by: Eli Cohen <eli at mellanox.co.il>
---
I tried this patch and improves memory scalability. Without it,
increasing the amount of memory used cause caused decrease in
throughput. With this patch there was no drop at all - I used a test
based on MPI with 2 processes.
drivers/infiniband/hw/mlx4/mr.c | 67 ++++++++++++++++++++++++++++++++++----
1 files changed, 60 insertions(+), 7 deletions(-)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 8e4d26d..641ea96 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -119,6 +119,38 @@ out:
return err;
}
+int mlx4_ib_umem_write_huge_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
+ struct ib_umem *umem, int nhpages)
+{
+ struct ib_umem_chunk *chunk;
+ int j, i, k;
+ dma_addr_t *arr;
+ int err;
+
+ arr = kmalloc(nhpages * sizeof *arr, GFP_KERNEL);
+ if (!arr)
+ return -ENOMEM;
+
+ i = 0;
+ k = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list)
+ for (j = 0; j < chunk->nmap; ++j, ++k) {
+ if (!(k & ((1 << (HPAGE_SHIFT - PAGE_SHIFT)) - 1))) {
+ if (sg_dma_len(&chunk->page_list[j]) != PAGE_SIZE) {
+ err = -EAGAIN;
+ goto out;
+ }
+ arr[i++] = sg_dma_address(&chunk->page_list[j]);
+ }
+ }
+
+ err = mlx4_write_mtt(dev->dev, mtt, 0, nhpages, arr);
+
+out:
+ kfree(arr);
+ return err;
+}
+
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -128,6 +160,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int shift;
int err;
int n;
+ int nhuge;
+ int shift_huge;
mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr)
@@ -142,15 +176,34 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
n = ib_umem_page_count(mr->umem);
shift = ilog2(mr->umem->page_size);
-
- err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
+ if (mr->umem->hugetlb) {
+ nhuge = ALIGN(n << shift, HPAGE_SIZE) >> HPAGE_SHIFT;
+ shift_huge = HPAGE_SHIFT;
+ err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
+ convert_access(access_flags), nhuge, shift_huge, &mr->mmr);
+ if (err)
+ goto err_umem;
+
+ err = mlx4_ib_umem_write_huge_mtt(dev, &mr->mmr.mtt, mr->umem, nhuge);
+ if (err) {
+ if (err != -EAGAIN)
+ goto err_mr;
+ else {
+ mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
+ goto regular_pages;
+ }
+ }
+ } else {
+regular_pages:
+ err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
- if (err)
- goto err_umem;
+ if (err)
+ goto err_umem;
- err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
- if (err)
- goto err_mr;
+ err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
+ if (err)
+ goto err_mr;
+ }
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
--
1.6.0.5
More information about the general
mailing list