[openib-general] Re: [PATCH] libmthca: fix wqe post
Roland Dreier
rolandd at cisco.com
Tue Sep 13 11:16:27 PDT 2005
Viswanath> Once you generate a kernel patch, I can test out both
Viswanath> user and kernel mthca since I have the tests ready..
Excellent. I merged MST's patch, and applied the patch below to the
kernel. (So you can either update from svn or apply the patches)
Thanks for testing -- let me know if you still see problems.
Index: infiniband/hw/mthca/mthca_srq.c
===================================================================
--- infiniband/hw/mthca/mthca_srq.c (revision 3404)
+++ infiniband/hw/mthca/mthca_srq.c (working copy)
@@ -189,7 +189,6 @@ int mthca_alloc_srq(struct mthca_dev *de
srq->max = attr->max_wr;
srq->max_gs = attr->max_sge;
- srq->last = NULL;
srq->counter = 0;
if (mthca_is_memfree(dev))
@@ -264,6 +263,7 @@ int mthca_alloc_srq(struct mthca_dev *de
srq->first_free = 0;
srq->last_free = srq->max - 1;
+ srq->last = get_wqe(srq, srq->max - 1);
return 0;
@@ -446,13 +446,11 @@ int mthca_tavor_post_srq_recv(struct ib_
((struct mthca_data_seg *) wqe)->addr = 0;
}
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << srq->wqe_shift) | 1);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << srq->wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD);
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;
Index: infiniband/hw/mthca/mthca_qp.c
===================================================================
--- infiniband/hw/mthca/mthca_qp.c (revision 3404)
+++ infiniband/hw/mthca/mthca_qp.c (working copy)
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_w
wq->last_comp = wq->max - 1;
wq->head = 0;
wq->tail = 0;
- wq->last = NULL;
}
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct
}
}
+ qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+ qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
return 0;
}
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *
goto out;
}
- if (prev_wqe) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32(((ind << qp->sq.wqe_shift) +
- qp->send_wqe_offset) |
- mthca_opcode[wr->opcode]);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32(((ind << qp->sq.wqe_shift) +
+ qp->send_wqe_offset) |
+ mthca_opcode[wr->opcode]);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
if (!size0) {
size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_q
qp->wrid[ind] = wr->wr_id;
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!size0)
size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *
goto out;
}
- if (likely(prev_wqe)) {
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32(((ind << qp->sq.wqe_shift) +
- qp->send_wqe_offset) |
- mthca_opcode[wr->opcode]);
- wmb();
- ((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD | size);
- }
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32(((ind << qp->sq.wqe_shift) +
+ qp->send_wqe_offset) |
+ mthca_opcode[wr->opcode]);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!size0) {
size0 = size;
More information about the general
mailing list