[ofa-general][PATCH] mlx4_ib: Multi Protocol support

Yevgeny Petrilin yevgenyp at mellanox.co.il
Wed Apr 16 01:05:45 PDT 2008


Multi Protocol supplies the user with the ability to run
Infiniband and Ethernet protocols on the same HCA
(separately or at the same time).

Main changes to mlx4_ib:
         1.  Mlx4_ib driver queries the low level driver for number of IB ports.
         2.  Qps are being reserved prior to being allocated.
         3.  Cq allocation API change.

Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
Reviewed-by: Eli Cohen <eli at mellanox.co.il>
---
  drivers/infiniband/hw/mlx4/cq.c      |    2 +-
  drivers/infiniband/hw/mlx4/mad.c     |    6 +++---
  drivers/infiniband/hw/mlx4/main.c    |   15 ++++++++++++---
  drivers/infiniband/hw/mlx4/mlx4_ib.h |    2 ++
  drivers/infiniband/hw/mlx4/qp.c      |    9 +++++++++
  5 files changed, 27 insertions(+), 7 deletions(-)

diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 3557e7e..912b35c 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -221,7 +221,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
  	}

  	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
-			    cq->db.dma, &cq->mcq);
+			    cq->db.dma, &cq->mcq, vector, 0);
  	if (err)
  		goto err_dbmap;

diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 4c1e72f..d91ba56 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -297,7 +297,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
  	int p, q;
  	int ret;

-	for (p = 0; p < dev->dev->caps.num_ports; ++p)
+	for (p = 0; p < dev->num_ports; ++p)
  		for (q = 0; q <= 1; ++q) {
  			agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
  						      q ? IB_QPT_GSI : IB_QPT_SMI,
@@ -313,7 +313,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
  	return 0;

  err:
-	for (p = 0; p < dev->dev->caps.num_ports; ++p)
+	for (p = 0; p < dev->num_ports; ++p)
  		for (q = 0; q <= 1; ++q)
  			if (dev->send_agent[p][q])
  				ib_unregister_mad_agent(dev->send_agent[p][q]);
@@ -326,7 +326,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
  	struct ib_mad_agent *agent;
  	int p, q;

-	for (p = 0; p < dev->dev->caps.num_ports; ++p) {
+	for (p = 0; p < dev->num_ports; ++p) {
  		for (q = 0; q <= 1; ++q) {
  			agent = dev->send_agent[p][q];
  			dev->send_agent[p][q] = NULL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 136c76c..fd0b8c0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -112,7 +112,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,

  	props->max_mr_size	   = ~0ull;
  	props->page_size_cap	   = dev->dev->caps.page_size_cap;
-	props->max_qp		   = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
+	props->max_qp 		   = dev->dev->caps.num_qps -
+				     dev->dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
  	props->max_qp_wr	   = dev->dev->caps.max_wqes;
  	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
  					 dev->dev->caps.max_rq_sg);
@@ -552,11 +553,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
  	mutex_init(&ibdev->pgdir_mutex);

  	ibdev->dev = dev;
+	ibdev->ports_map = mlx4_get_ports_of_type(dev, MLX4_PORT_TYPE_IB);

  	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
  	ibdev->ib_dev.owner		= THIS_MODULE;
  	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
-	ibdev->ib_dev.phys_port_cnt	= dev->caps.num_ports;
+	ibdev->num_ports = 0;
+	foreach_port(i, ibdev->ports_map)
+		ibdev->num_ports++;
+	ibdev->ib_dev.phys_port_cnt	= ibdev->num_ports;
  	ibdev->ib_dev.num_comp_vectors	= 1;
  	ibdev->ib_dev.dma_device	= &dev->pdev->dev;

@@ -670,7 +675,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
  	struct mlx4_ib_dev *ibdev = ibdev_ptr;
  	int p;

-	for (p = 1; p <= dev->caps.num_ports; ++p)
+	for (p = 1; p <= ibdev->num_ports; ++p)
  		mlx4_CLOSE_PORT(dev, p);

  	mlx4_ib_mad_cleanup(ibdev);
@@ -685,6 +690,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
  			  enum mlx4_dev_event event, int port)
  {
  	struct ib_event ibev;
+	struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
+
+	if (port > ibdev->num_ports)
+		return;

  	switch (event) {
  	case MLX4_DEV_EVENT_PORT_UP:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 9e63732..7a8111c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -173,6 +173,8 @@ struct mlx4_ib_ah {
  struct mlx4_ib_dev {
  	struct ib_device	ib_dev;
  	struct mlx4_dev	       *dev;
+	u32		 	ports_map;
+	int			num_ports;
  	void __iomem	       *uar_map;

  	struct list_head	pgdir_list;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b75efae..59f7284 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -544,6 +544,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
  		}
  	}

+	if (!sqpn)
+		err = mlx4_qp_reserve_range(dev->dev, 1, 1, &sqpn);
+	if (err)
+		goto err_wrid;
+
  	err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
  	if (err)
  		goto err_wrid;
@@ -654,6 +659,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
  	mlx4_ib_unlock_cqs(send_cq, recv_cq);

  	mlx4_qp_free(dev->dev, &qp->mqp);
+
+	if (!is_sqp(dev, qp))
+		mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+
  	mlx4_mtt_cleanup(dev->dev, &qp->mtt);

  	if (is_user) {
-- 
1.5.4




More information about the general mailing list