[openib-general] [PATCH] 2/2 : ip over ib tx/rx split

Michael S. Tsirkin mst at mellanox.co.il
Thu Feb 3 10:02:33 PST 2005


I'm reposting this patch, its now tested. Together with the first part:
mthca send/receive q lock split, I am getting about 2% more bandwidth
with this patch.
I also checked interrupt rate and it is about the same, which is not
surprising, since interrupts from both cqs are coalesced by the eq polling
in mthca.

The next week I plan to implement avoiding cq req_notif for send cq,
along the lines that Ido suggested, for which this split is a prerequisite,
so I'll be very happy if at least this piece goes in. Without the mthca
piece, my experiments show this is a no pain no gain patch.

Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>

Index: ulp/ipoib/ipoib_verbs.c
===================================================================
--- ulp/ipoib/ipoib_verbs.c	(revision 1725)
+++ ulp/ipoib/ipoib_verbs.c	(working copy)
@@ -175,24 +175,31 @@ int ipoib_transport_dev_init(struct net_
 		return -ENODEV;
 	}
 
-	priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev,
-				IPOIB_TX_RING_SIZE + IPOIB_RX_RING_SIZE + 1);
-	if (IS_ERR(priv->cq)) {
-		printk(KERN_WARNING "%s: failed to create CQ\n", ca->name);
+	priv->tx_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev,
+				IPOIB_TX_RING_SIZE);
+	if (IS_ERR(priv->tx_cq)) {
+		printk(KERN_WARNING "%s: failed to create TX CQ\n", ca->name);
 		goto out_free_pd;
 	}
 
-	if (ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP))
-		goto out_free_cq;
+	priv->rx_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev,
+				IPOIB_RX_RING_SIZE);
+	if (IS_ERR(priv->rx_cq)) {
+		printk(KERN_WARNING "%s: failed to create RX CQ\n", ca->name);
+		goto out_free_tx_cq;
+	}
+
+	if (ib_req_notify_cq(priv->rx_cq, IB_CQ_NEXT_COMP))
+		goto out_free_rx_cq;
 
 	priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
 	if (IS_ERR(priv->mr)) {
 		printk(KERN_WARNING "%s: ib_get_dma_mr failed\n", ca->name);
-		goto out_free_cq;
+		goto out_free_rx_cq;
 	}
 
-	init_attr.send_cq = priv->cq;
-	init_attr.recv_cq = priv->cq,
+	init_attr.send_cq = priv->tx_cq;
+	init_attr.recv_cq = priv->rx_cq,
 
 	priv->qp = ib_create_qp(priv->pd, &init_attr);
 	if (IS_ERR(priv->qp)) {
@@ -216,8 +223,11 @@ int ipoib_transport_dev_init(struct net_
 out_free_mr:
 	ib_dereg_mr(priv->mr);
 
-out_free_cq:
-	ib_destroy_cq(priv->cq);
+out_free_rx_cq:
+	ib_destroy_cq(priv->rx_cq);
+
+out_free_tx_cq:
+	ib_destroy_cq(priv->tx_cq);
 
 out_free_pd:
 	ib_dealloc_pd(priv->pd);
@@ -239,8 +249,11 @@ void ipoib_transport_dev_cleanup(struct 
 	if (ib_dereg_mr(priv->mr))
 		ipoib_warn(priv, "ib_dereg_mr failed\n");
 
-	if (ib_destroy_cq(priv->cq))
-		ipoib_warn(priv, "ib_cq_destroy failed\n");
+	if (ib_destroy_cq(priv->rx_cq))
+		ipoib_warn(priv, "ib_cq_destroy for rx cq failed\n");
+
+	if (ib_destroy_cq(priv->tx_cq))
+		ipoib_warn(priv, "ib_cq_destroy for tx cq failed\n");
 
 	if (ib_dealloc_pd(priv->pd))
 		ipoib_warn(priv, "ib_dealloc_pd failed\n");
Index: ulp/ipoib/ipoib.h
===================================================================
--- ulp/ipoib/ipoib.h	(revision 1725)
+++ ulp/ipoib/ipoib.h	(working copy)
@@ -137,7 +137,8 @@ struct ipoib_dev_priv {
 	u16           	  pkey;
 	struct ib_pd  	 *pd;
 	struct ib_mr  	 *mr;
-	struct ib_cq  	 *cq;
+	struct ib_cq  	 *tx_cq;
+	struct ib_cq  	 *rx_cq;
 	struct ib_qp  	 *qp;
 	u32           	  qkey;
 

-- 
MST - Michael S. Tsirkin



More information about the general mailing list