[ofa-general][PATCH] mlx4: Completion EQ per cpu (MP support, Patch 10)

Yevgeny Petrilin yevgenyp at mellanox.co.il
Tue Apr 22 07:13:54 PDT 2008


>From 2a2d22208f6fdba4c0c2afdf0ed12ef07b93d661 Mon Sep 17 00:00:00 2001
From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
Date: Tue, 22 Apr 2008 16:39:47 +0300
Subject: [PATCH] mlx4: Completion EQ per cpu

Completion eq's are created per cpu. Created cq's are attached to an eq by
"Round Robin" algorithm, unless a specific eq was requested.

Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
---
 drivers/infiniband/hw/mlx4/cq.c |    2 +-
 drivers/net/mlx4/cq.c           |   19 ++++++++++++++++---
 drivers/net/mlx4/eq.c           |   39 ++++++++++++++++++++++++++-------------
 drivers/net/mlx4/main.c         |   14 ++++++++------
 drivers/net/mlx4/mlx4.h         |    6 ++++--
 include/linux/mlx4/device.h     |    3 ++-
 6 files changed, 57 insertions(+), 26 deletions(-)

diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 63daf52..732f812 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -221,7 +221,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
 	}

 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
-			    cq->db.dma, &cq->mcq, 0);
+			    cq->db.dma, &cq->mcq, vector, 0);
 	if (err)
 		goto err_dbmap;

diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index d893cc1..bbb4c7b 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(mlx4_cq_resize);

 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
-		  int collapsed)
+		  unsigned vector, int collapsed)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -227,7 +227,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,

 	cq_context->flags = cpu_to_be32(!!collapsed << 18);
 	cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
-	cq_context->comp_eqn        = priv->eq_table.eq[MLX4_EQ_COMP].eqn;
+
+	if (vector > priv->eq_table.num_comp_eqs) {
+		err = -EINVAL;
+		goto err_radix;
+	}
+
+	if (vector == 0) {
+		vector = priv->eq_table.last_comp_eq %
+			priv->eq_table.num_comp_eqs + 1;
+		priv->eq_table.last_comp_eq = vector;
+	}
+	cq->comp_eq_idx		    = MLX4_EQ_COMP_CPU0 + vector - 1;
+	cq_context->comp_eqn	    = priv->eq_table.eq[MLX4_EQ_COMP_CPU0 +
+							vector - 1].eqn;
 	cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;

 	mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -276,7 +289,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
 	if (err)
 		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);

-	synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq);
+	synchronize_irq(priv->eq_table.eq[cq->comp_eq_idx].irq);

 	spin_lock_irq(&cq_table->lock);
 	radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index e141a15..b4676db 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -265,7 +265,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)

 	writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);

-	for (i = 0; i < MLX4_NUM_EQ; ++i)
+	for (i = 0; i < MLX4_EQ_COMP_CPU0 + priv->eq_table.num_comp_eqs; ++i)
 		work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);

 	return IRQ_RETVAL(work);
@@ -482,7 +482,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)

 	if (eq_table->have_irq)
 		free_irq(dev->pdev->irq, dev);
-	for (i = 0; i < MLX4_NUM_EQ; ++i)
+	for (i = 0; i < MLX4_EQ_COMP_CPU0 + eq_table->num_comp_eqs; ++i)
 		if (eq_table->eq[i].have_irq)
 			free_irq(eq_table->eq[i].irq, eq_table->eq + i);
 }
@@ -553,6 +553,7 @@ void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
 int mlx4_init_eq_table(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
+	int req_eqs;
 	int err;
 	int i;

@@ -573,11 +574,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
 	priv->eq_table.clr_int  = priv->clr_base +
 		(priv->eq_table.inta_pin < 32 ? 4 : 0);

-	err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
-			     (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
-			     &priv->eq_table.eq[MLX4_EQ_COMP]);
-	if (err)
-		goto err_out_unmap;
+	priv->eq_table.num_comp_eqs = 0;
+	req_eqs = (dev->flags & MLX4_FLAG_MSI_X) ? num_online_cpus() : 1;
+	while (req_eqs) {
+		err = mlx4_create_eq(
+			dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
+			(dev->flags & MLX4_FLAG_MSI_X) ?
+			(MLX4_EQ_COMP_CPU0 + priv->eq_table.num_comp_eqs) : 0,
+			&priv->eq_table.eq[MLX4_EQ_COMP_CPU0 +
+			priv->eq_table.num_comp_eqs]);
+		if (err)
+			goto err_out_comp;
+
+		priv->eq_table.num_comp_eqs++;
+		req_eqs--;
+	}
+	priv->eq_table.last_comp_eq = 0;

 	err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
 			     (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
@@ -587,11 +599,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)

 	if (dev->flags & MLX4_FLAG_MSI_X) {
 		static const char *eq_name[] = {
-			[MLX4_EQ_COMP]  = DRV_NAME " (comp)",
+			[MLX4_EQ_COMP_CPU0...MLX4_NUM_EQ] = "comp_" DRV_NAME,
 			[MLX4_EQ_ASYNC] = DRV_NAME " (async)"
 		};

-		for (i = 0; i < MLX4_NUM_EQ; ++i) {
+		for (i = 0; i < MLX4_EQ_COMP_CPU0 +
+		      priv->eq_table.num_comp_eqs; ++i) {
 			err = request_irq(priv->eq_table.eq[i].irq,
 					  mlx4_msi_x_interrupt,
 					  0, eq_name[i], priv->eq_table.eq + i);
@@ -616,7 +629,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
 		mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
 			   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);

-	for (i = 0; i < MLX4_NUM_EQ; ++i)
+	for (i = 0; i < MLX4_EQ_COMP_CPU0 + priv->eq_table.num_comp_eqs; ++i)
 		eq_set_ci(&priv->eq_table.eq[i], 1);

 	return 0;
@@ -625,9 +638,9 @@ err_out_async:
 	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);

 err_out_comp:
-	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
+	for (i = 0; i < priv->eq_table.num_comp_eqs; ++i)
+		mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP_CPU0 + i]);

-err_out_unmap:
 	mlx4_unmap_clr_int(dev);
 	mlx4_free_irqs(dev);

@@ -646,7 +659,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)

 	mlx4_free_irqs(dev);

-	for (i = 0; i < MLX4_NUM_EQ; ++i)
+	for (i = 0; i < MLX4_EQ_COMP_CPU0 + priv->eq_table.num_comp_eqs; ++i)
 		mlx4_free_eq(dev, &priv->eq_table.eq[i]);

 	mlx4_unmap_clr_int(dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index e3fd4e9..aecb1f2 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -922,22 +922,24 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct msix_entry entries[MLX4_NUM_EQ];
+	int needed_vectors = MLX4_EQ_COMP_CPU0 + num_online_cpus();
 	int err;
 	int i;

 	if (msi_x) {
-		for (i = 0; i < MLX4_NUM_EQ; ++i)
+		for (i = 0; i < needed_vectors; ++i)
 			entries[i].entry = i;

-		err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
+		err = pci_enable_msix(dev->pdev, entries, needed_vectors);
 		if (err) {
 			if (err > 0)
-				mlx4_info(dev, "Only %d MSI-X vectors available, "
-					  "not using MSI-X\n", err);
+				mlx4_info(dev, "Only %d MSI-X vectors "
+					  "available, need %d. Not using MSI-X\n",
+					  err, needed_vectors);
 			goto no_msi;
 		}

-		for (i = 0; i < MLX4_NUM_EQ; ++i)
+		for (i = 0; i < needed_vectors; ++i)
 			priv->eq_table.eq[i].irq = entries[i].vector;

 		dev->flags |= MLX4_FLAG_MSI_X;
@@ -945,7 +947,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 	}

 no_msi:
-	for (i = 0; i < MLX4_NUM_EQ; ++i)
+	for (i = 0; i < needed_vectors; ++i)
 		priv->eq_table.eq[i].irq = dev->pdev->irq;
 }

diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index eff1c5a..2201a99 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -64,8 +64,8 @@ enum {

 enum {
 	MLX4_EQ_ASYNC,
-	MLX4_EQ_COMP,
-	MLX4_NUM_EQ
+	MLX4_EQ_COMP_CPU0,
+	MLX4_NUM_EQ = MLX4_EQ_COMP_CPU0 + NR_CPUS
 };

 enum {
@@ -211,6 +211,8 @@ struct mlx4_eq_table {
 	void __iomem	       *uar_map[(MLX4_NUM_EQ + 6) / 4];
 	u32			clr_mask;
 	struct mlx4_eq		eq[MLX4_NUM_EQ];
+	int			num_comp_eqs;
+	int			last_comp_eq;
 	u64			icm_virt;
 	struct page	       *icm_page;
 	dma_addr_t		icm_dma;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 93c17aa..673462c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -312,6 +312,7 @@ struct mlx4_cq {
 	int			arm_sn;

 	int			cqn;
+	int			comp_eq_idx;

 	atomic_t		refcount;
 	struct completion	free;
@@ -441,7 +442,7 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,

 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
-		  int collapsed);
+		  unsigned vector, int collapsed);
 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);

 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
-- 
1.5.4




More information about the general mailing list