[openib-general] [PATCH] (fixed) cqe lookup speedup

Michael S. Tsirkin mst at mellanox.co.il
Wed Jan 26 06:52:18 PST 2005


Sorry, here's a fixed patch (the previous one introduced a compiler
warning).

-----

The following patch should get us a speedup in cq polling,
along the lines of what I did for eq polling.
It works fine, but I dont have the fast hardware to check the
actual effect of it, though.

Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>

Index: hw/mthca/mthca_cq.c
===================================================================
--- hw/mthca/mthca_cq.c	(revision 1653)
+++ hw/mthca/mthca_cq.c	(working copy)
@@ -147,20 +147,21 @@ static inline struct mthca_cqe *get_cqe(
 			+ (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
 }
 
-static inline int cqe_sw(struct mthca_cq *cq, int i)
+static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)
 {
-	return !(MTHCA_CQ_ENTRY_OWNER_HW &
-		 get_cqe(cq, i)->owner);
+	struct mthca_cqe *cqe;
+	cqe = get_cqe(cq, i);
+	return (MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner) ? NULL : cqe;
 }
 
-static inline int next_cqe_sw(struct mthca_cq *cq)
+static inline struct mthca_cqe * next_cqe_sw(struct mthca_cq *cq)
 {
 	return cqe_sw(cq, cq->cons_index);
 }
 
-static inline void set_cqe_hw(struct mthca_cq *cq, int entry)
+static inline void set_cqe_hw(struct mthca_cqe *cqe)
 {
-	get_cqe(cq, entry)->owner = MTHCA_CQ_ENTRY_OWNER_HW;
+	cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
 }
 
 static inline void inc_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
@@ -388,7 +398,8 @@ static inline int mthca_poll_one(struct 
 	int free_cqe = 1;
 	int err = 0;
 
-	if (!next_cqe_sw(cq))
+	cqe = next_cqe_sw(cq);
+	if (!cqe)
 		return -EAGAIN;
 
 	/*
@@ -397,8 +408,6 @@ static inline int mthca_poll_one(struct 
 	 */
 	rmb();
 
-	cqe = get_cqe(cq, cq->cons_index);
-
 	if (0) {
 		mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
 			  cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
@@ -511,8 +520,8 @@ static inline int mthca_poll_one(struct 
 	entry->status = IB_WC_SUCCESS;
 
  out:
-	if (free_cqe) {
-		set_cqe_hw(cq, cq->cons_index);
+	if (likely(free_cqe)) {
+		set_cqe_hw(cqe);
 		++(*freed);
 		cq->cons_index = (cq->cons_index + 1) & cq->ibcq.cqe;
 	}
@@ -657,7 +666,7 @@ int mthca_init_cq(struct mthca_dev *dev,
 	}
 
 	for (i = 0; i < nent; ++i)
-		set_cqe_hw(cq, i);
+		set_cqe_hw(get_cqe(cq, i));
 
 	cq->cqn = mthca_alloc(&dev->cq_table.alloc);
 	if (cq->cqn == -1)
@@ -775,7 +784,7 @@ void mthca_free_cq(struct mthca_dev *dev
 		int j;
 
 		printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
-		       cq->cqn, cq->cons_index, next_cqe_sw(cq));
+		       cq->cqn, cq->cons_index, next_cqe_sw(cq)?1:0);
 		for (j = 0; j < 16; ++j)
 			printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
 	}
-- 
I dont speak for Mellanox



More information about the general mailing list