[openib-general] [PATCH] mthca: move code from post send to post receive

Michael S. Tsirkin mst at mellanox.co.il
Wed Dec 13 03:49:16 PST 2006


Place SQ wrid's first in wrid buffer, to eliminate an add operation
in the send datapath.

This keeps binary size constant, moving code from post send to post receive:
post send is a latency-sensitive operation, while post receive is done
beforehand, so it's not.  Additionally, a generic ULP mixing send and RDMA does
more post sends than post receives (RDMA does not have a matching post receive).

Signed-off-by: Jack Morgenstein <jackm at dev.mellanox.co.il>
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>

---

While unlikely to give a large gain, this makes sense to me.
Please consider for 2.6.20.

diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 149b369..433f9a8 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -537,8 +537,7 @@ static inline int mthca_poll_one(struct 
 		wq = &(*cur_qp)->sq;
 		wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
 			     >> wq->wqe_shift);
-		entry->wr_id = (*cur_qp)->wrid[wqe_index +
-					       (*cur_qp)->rq.max];
+		entry->wr_id = (*cur_qp)->wrid[wqe_index];
 	} else if ((*cur_qp)->ibqp.srq) {
 		struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
 		u32 wqe = be32_to_cpu(cqe->wqe);
@@ -558,7 +557,7 @@ static inline int mthca_poll_one(struct 
 		 */
 		if (unlikely(wqe_index < 0))
 			wqe_index = wq->max - 1;
-		entry->wr_id = (*cur_qp)->wrid[wqe_index];
+		entry->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->sq.max];
 	}
 
 	if (wq) {
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 6a7822e..9e6f715 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1690,7 +1690,7 @@ int mthca_tavor_post_send(struct ib_qp *
 			size += sizeof (struct mthca_data_seg) / 16;
 		}
 
-		qp->wrid[ind + qp->rq.max] = wr->wr_id;
+		qp->wrid[ind] = wr->wr_id;
 
 		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
 			mthca_err(dev, "opcode invalid\n");
@@ -1810,7 +1810,7 @@ int mthca_tavor_post_receive(struct ib_q
 			size += sizeof (struct mthca_data_seg) / 16;
 		}
 
-		qp->wrid[ind] = wr->wr_id;
+		qp->wrid[ind + qp->sq.max] = wr->wr_id;
 
 		((struct mthca_next_seg *) prev_wqe)->nda_op =
 			cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
@@ -2068,7 +2068,7 @@ int mthca_arbel_post_send(struct ib_qp *
 			size += sizeof (struct mthca_data_seg) / 16;
 		}
 
-		qp->wrid[ind + qp->rq.max] = wr->wr_id;
+		qp->wrid[ind] = wr->wr_id;
 
 		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
 			mthca_err(dev, "opcode invalid\n");
@@ -2192,7 +2192,7 @@ int mthca_arbel_post_receive(struct ib_q
 			((struct mthca_data_seg *) wqe)->addr = 0;
 		}
 
-		qp->wrid[ind] = wr->wr_id;
+		qp->wrid[ind + qp->sq.max] = wr->wr_id;
 
 		++ind;
 		if (unlikely(ind >= qp->rq.max))


-- 
MST




More information about the general mailing list