[openib-general] [Patch ofed1.2 3/3]libehca: cleanup and adjust mmap
Stefan Roscher
ossrosch at linux.vnet.ibm.com
Fri Jan 26 08:48:32 PST 2007
Signed-off-by: Stefan Roscher <stefan.roscher at de.ibm.com>
---
diff -Nurp libehca_old/src/ehca_ureqs.c libehca_new/src/ehca_ureqs.c
--- libehca_old/src/ehca_ureqs.c 2007-01-26 14:27:03.000000000 +0100
+++ libehca_new/src/ehca_ureqs.c 2007-01-26 14:27:43.000000000 +0100
@@ -38,25 +38,20 @@
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
- * $Id: ehca_ureqs.c,v 1.5 2006/03/26 22:26:54 nguyen Exp $
*/
-#define DEB_PREFIX "reqs"
-
-#include "ehca_uclasses.h"
+#include <errno.h>
+#include <unistd.h>
+#include <netinet/in.h>
#include <infiniband/verbs.h>
+#include "ehca_uclasses.h"
#include "ehca_utools.h"
#include "hipz_fns_core.h"
#include "ehca_everbs.h"
#include "ehca_asm.h"
#include "ipzu_pt_fn.h"
-#include <errno.h>
-#include <unistd.h>
-#include <netinet/in.h>
-
static inline int write_rwqe(struct ipzu_queue *ipz_rqueue,
struct ehca_wqe *wqe_p,
struct ibv_recv_wr *recv_wr)
@@ -64,34 +59,35 @@ static inline int write_rwqe(struct ipzu
u8 cnt_ds;
if (unlikely((recv_wr->num_sge < 0) ||
(recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
- EDEB_ERR(4, "Invalid number of WQE SGE. "
- "num_sqe=%x max_nr_of_sg=%x",
- recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
- return (-EINVAL); /* invalid SG list length */
+ ehca_gen_err("Invalid number of WQE SGE. "
+ "num_sqe=%x max_nr_of_sg=%x",
+ recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
+ return -EINVAL; /* invalid SG list length */
}
clear_cacheline(wqe_p);
- clear_cacheline((u8 *) wqe_p + 32);
- clear_cacheline((u8 *) wqe_p + 64);
+ clear_cacheline((u8*)wqe_p + 32);
+ clear_cacheline((u8*)wqe_p + 64);
wqe_p->work_request_id = be64_to_cpu(recv_wr->wr_id);
wqe_p->nr_of_data_seg = recv_wr->num_sge;
for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
- be64_to_cpu(recv_wr->sg_list[cnt_ds].addr);
+ be64_to_cpu(recv_wr->sg_list[cnt_ds].addr);
wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
- ntohl(recv_wr->sg_list[cnt_ds].lkey);
+ ntohl(recv_wr->sg_list[cnt_ds].lkey);
wqe_p->u.all_rcv.sg_list[cnt_ds].length =
- ntohl(recv_wr->sg_list[cnt_ds].length);
+ ntohl(recv_wr->sg_list[cnt_ds].length);
}
- if (IS_EDEB_ON(7)) {
- EDEB(7, "RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
- EDEB_DMP(7, wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
+ if (unlikely(libehca_trace_on)) {
+ ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
+ ipz_rqueue);
+ ehca_dmp_dbg(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
}
- return (0);
+ return 0;
}
static inline int write_swqe(struct ehcau_qp *qp,
@@ -100,18 +96,18 @@ static inline int write_swqe(struct ehca
{
u32 idx;
u64 dma_length;
- struct ehcau_av *my_av = NULL;
+ struct ehcau_av *my_av;
u32 remote_qkey = send_wr->wr.ud.remote_qkey;
clear_cacheline(wqe_p);
- clear_cacheline((u8 *) wqe_p + 32);
+ clear_cacheline((u8 *)wqe_p + 32);
if (unlikely((send_wr->num_sge < 0) ||
(send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
- EDEB_ERR(4, "Invalid number of WQE SGE. "
- "num_sqe=%x max_nr_of_sg=%x",
- send_wr->num_sge, qp->ipz_rqueue.act_nr_of_sg);
- return (-EINVAL); /* invalid SG list length */
+ ehca_gen_err("Invalid number of WQE SGE. "
+ "num_sqe=%x max_nr_of_sg=%x",
+ send_wr->num_sge, qp->ipz_rqueue.act_nr_of_sg);
+ return -EINVAL; /* invalid SG list length */
}
wqe_p->work_request_id = be64_to_cpu(send_wr->wr_id);
@@ -129,16 +125,16 @@ static inline int write_swqe(struct ehca
wqe_p->optype = WQE_OPTYPE_RDMAREAD;
break;
default:
- EDEB_ERR(4, "Invalid opcode=%x", send_wr->opcode);
- return (-EINVAL); /* invalid opcode */
+ ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
+ return -EINVAL; /* invalid opcode */
}
wqe_p->wqef = (send_wr->opcode) & 0xF0;
wqe_p->wr_flag = 0;
- if (send_wr->send_flags & IBV_SEND_SIGNALED) {
+ if (send_wr->send_flags & IBV_SEND_SIGNALED)
wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
- }
+
if (send_wr->opcode == IBV_WR_SEND_WITH_IMM ||
send_wr->opcode == IBV_WR_RDMA_WRITE_WITH_IMM) {
@@ -152,71 +148,70 @@ static inline int write_swqe(struct ehca
switch (qp->qp_type) {
case IBV_QPT_UD:
/* IB 1.2 spec C10-15 compliance */
- if (send_wr->wr.ud.remote_qkey & 0x80000000) {
+ if (send_wr->wr.ud.remote_qkey & 0x80000000)
remote_qkey = qp->qkey;
- }
wqe_p->destination_qp_number =
- ntohl(send_wr->wr.ud.remote_qpn << 8);
+ ntohl(send_wr->wr.ud.remote_qpn << 8);
wqe_p->local_ee_context_qkey = ntohl(remote_qkey);
- if (send_wr->wr.ud.ah==NULL) {
- EDEB_ERR(4, "wr.ud.ah is NULL. qp=%p", qp);
- return (-EINVAL);
+ if (!send_wr->wr.ud.ah) {
+ ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
+ return -EINVAL;
}
my_av = container_of(send_wr->wr.ud.ah, struct ehcau_av, ib_ah);
wqe_p->u.ud_av.ud_av = my_av->av;
/* omitted check of IBV_SEND_INLINE
- since HW does not support it */
+ * since HW does not support it
+ */
for (idx = 0; idx < send_wr->num_sge; idx++) {
wqe_p->u.ud_av.sg_list[idx].vaddr =
- be64_to_cpu(send_wr->sg_list[idx].addr);
+ be64_to_cpu(send_wr->sg_list[idx].addr);
wqe_p->u.ud_av.sg_list[idx].lkey =
- ntohl(send_wr->sg_list[idx].lkey);
+ ntohl(send_wr->sg_list[idx].lkey);
wqe_p->u.ud_av.sg_list[idx].length =
- ntohl(send_wr->sg_list[idx].length);
+ ntohl(send_wr->sg_list[idx].length);
} /* eof for idx */
break;
case IBV_QPT_UC:
- if (send_wr->send_flags & IBV_SEND_FENCE) {
+ if (send_wr->send_flags & IBV_SEND_FENCE)
wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
- }
- /* no break is intential here */
+ /* no break is intentional here */
case IBV_QPT_RC:
/*@@TODO atomic???*/
wqe_p->u.nud.remote_virtual_adress =
- be64_to_cpu(send_wr->wr.rdma.remote_addr);
+ be64_to_cpu(send_wr->wr.rdma.remote_addr);
wqe_p->u.nud.rkey = ntohl(send_wr->wr.rdma.rkey);
/* omitted checking of IBV_SEND_INLINE
- since HW does not support it */
+ * since HW does not support it
+ */
dma_length = 0;
for (idx = 0; idx < send_wr->num_sge; idx++) {
wqe_p->u.nud.sg_list[idx].vaddr =
- be64_to_cpu(send_wr->sg_list[idx].addr);
+ be64_to_cpu(send_wr->sg_list[idx].addr);
wqe_p->u.nud.sg_list[idx].lkey =
- ntohl(send_wr->sg_list[idx].lkey);
+ ntohl(send_wr->sg_list[idx].lkey);
wqe_p->u.nud.sg_list[idx].length =
- ntohl(send_wr->sg_list[idx].length);
+ ntohl(send_wr->sg_list[idx].length);
dma_length += send_wr->sg_list[idx].length;
} /* eof idx */
wqe_p->u.nud.atomic_1st_op_dma_len = be64_to_cpu(dma_length);
-
break;
-
default:
- EDEB_ERR(4, "Invalid qptype=%x", qp->qp_type);
- return (-EINVAL);
+ ehca_gen_err("Invalid qptype=%x", qp->qp_type);
+ return -EINVAL;
}
- if (IS_EDEB_ON(7)) {
- EDEB(7, "SEND WQE written into queue qp=%p ", qp);
- EDEB_DMP(7, wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
+ if (unlikely(libehca_trace_on)) {
+ ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
+ ehca_dmp_dbg(wqe_p, 16 * (6 + wqe_p->nr_of_data_seg),
+ "send wqe");
}
- return (0);
+ return 0;
}
-/**
+/*
* map_ib_wc_status - convert cqe_status to ib_wc_status
*/
static inline void map_ib_wc_status(u32 cqe_status,
@@ -251,7 +246,8 @@ static inline void map_ib_wc_status(u32
switch ((cqe_status & 0x0000F800) >> 11) {
case 0x0:
/* PSN Sequence Error!
- couldn't find a matching VAPI status! */
+ * couldn't find a matching VAPI status!
+ */
*wc_status = IBV_WC_GENERAL_ERR;
break;
case 0x1:
@@ -297,44 +293,49 @@ static inline void map_ib_wc_status(u32
*wc_status = IBV_WC_FATAL_ERR;
}
- } else {
+ } else
*wc_status = IBV_WC_SUCCESS;
- }
}
int ehcau_post_send(struct ibv_qp *qp,
struct ibv_send_wr *send_wr,
struct ibv_send_wr **bad_send_wr)
{
- struct ehcau_qp *my_qp = NULL;
- struct ibv_send_wr *cur_send_wr = NULL;
- struct ehca_wqe *wqe_p = NULL;
+ struct ehcau_qp *my_qp;
+ struct ibv_send_wr *cur_send_wr;
+ struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int retcode = 0;
- EHCA_CHECK_ADR(qp);
+ if (!qp) {
+ ehca_gen_err("qp=%p check failed line %i", qp, __LINE__);
+ return -EFAULT;
+ }
my_qp = container_of(qp, struct ehcau_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EHCA_CHECK_ADR(send_wr);
- EDEB_EN(7, "ehcau_qp=%p qp_num=%x send_wr=%p bad_send_wr=%p",
- my_qp, qp->qp_num, send_wr, bad_send_wr);
+ if (!send_wr) {
+ ehca_gen_err("send_wr=%p check failed line %i",
+ send_wr, __LINE__);
+ return -EFAULT;
+ }
+ ehca_dbg(qp->context->device, "ehcau_qp=%p qp_num=%x send_wr=%p "
+ "bad_send_wr=%p", my_qp, qp->qp_num, send_wr, bad_send_wr);
/* LOCK the QUEUE */
ehcau_lock(&my_qp->lockvar_s);
/* loop processes list of send reqs */
- for (cur_send_wr = send_wr; cur_send_wr != NULL;
+ for (cur_send_wr = send_wr; cur_send_wr;
cur_send_wr = cur_send_wr->next) {
void *start_addr = my_qp->ipz_squeue.current_q_addr;
/* get pointer next to free WQE */
wqe_p = ipzu_qeit_get_inc(&my_qp->ipz_squeue);
- if (unlikely(wqe_p == NULL)) {
+ if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
- if (bad_send_wr != NULL) {
+ if (bad_send_wr)
*bad_send_wr = cur_send_wr;
- }
- if (wqe_cnt==0) {
+ if (!wqe_cnt) {
retcode = -ENOMEM;
- EDEB_ERR(4, "Too many posted WQEs qp_num=%x",
+ ehca_err(qp->context->device,
+ "Too many posted WQEs qp_num=%x",
qp->qp_num);
}
goto post_send_exit0;
@@ -342,20 +343,22 @@ int ehcau_post_send(struct ibv_qp *qp,
/* write a SEND WQE into the QUEUE */
retcode = write_swqe(my_qp, wqe_p, cur_send_wr);
/* if something failed, reset the
- free entry pointer to the start value */
- if (unlikely(retcode != 0)) {
+ * free entry pointer to the start value
+ */
+ if (unlikely(retcode)) {
my_qp->ipz_squeue.current_q_addr = start_addr;
*bad_send_wr = cur_send_wr;
- if (wqe_cnt==0) {
+ if (!wqe_cnt) {
retcode = -EINVAL;
- EDEB_ERR(4, "Could not write WQE qp_num=%x",
+ ehca_err(qp->context->device,
+ "Could not write WQE qp_num=%x",
qp->qp_num);
}
goto post_send_exit0;
}
wqe_cnt++;
- EDEB(7, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, qp->qp_num, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ my_qp, qp->qp_num, wqe_cnt);
} /* eof for cur_send_wr */
post_send_exit0:
@@ -363,8 +366,8 @@ int ehcau_post_send(struct ibv_qp *qp,
ehcau_unlock(&my_qp->lockvar_s);
asm_sync_mem(); /* serialize GAL register access */
hipz_update_SQA(my_qp, wqe_cnt);
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
- my_qp, qp->qp_num, retcode, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
+ my_qp, qp->qp_num, retcode, wqe_cnt);
return retcode;
}
@@ -372,36 +375,43 @@ int ehcau_post_recv(struct ibv_qp *qp,
struct ibv_recv_wr *recv_wr,
struct ibv_recv_wr **bad_recv_wr)
{
- struct ehcau_qp *my_qp = NULL;
- struct ibv_recv_wr *cur_recv_wr = NULL;
- struct ehca_wqe *wqe_p = NULL;
+ struct ehcau_qp *my_qp;
+ struct ibv_recv_wr *cur_recv_wr;
+ struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int retcode = 0;
- EHCA_CHECK_ADR(qp);
+ if (!qp) {
+ ehca_gen_err("qp=%p check failed line %i", qp, __LINE__);
+ return -EFAULT;
+ }
my_qp = container_of(qp, struct ehcau_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EHCA_CHECK_ADR(recv_wr);
- EDEB_EN(7, "ehca_qp=%p qp_num=%x recv_wr=%p bad_recv_wr=%p",
- my_qp, qp->qp_num, recv_wr, bad_recv_wr);
+ if (!recv_wr) {
+ ehca_gen_err("recv_wr=%p check failed line %i",
+ recv_wr, __LINE__);
+ return -EFAULT;
+ }
+ ehca_dbg(qp->context->device,
+ "ehca_qp=%p qp_num=%x recv_wr=%p bad_recv_wr=%p",
+ my_qp, qp->qp_num, recv_wr, bad_recv_wr);
/* LOCK the QUEUE */
ehcau_lock(&my_qp->lockvar_r);
/* loop processes list of send reqs */
- for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
+ for (cur_recv_wr = recv_wr; cur_recv_wr;
cur_recv_wr = cur_recv_wr->next) {
void *start_addr = my_qp->ipz_rqueue.current_q_addr;
/* get pointer next to free WQE */
wqe_p = ipzu_qeit_get_inc(&my_qp->ipz_rqueue);
- if (unlikely(wqe_p == NULL)) {
+ if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
- if (bad_recv_wr != NULL) {
+ if (bad_recv_wr)
*bad_recv_wr = cur_recv_wr;
- }
- if (wqe_cnt==0) {
+ if (!wqe_cnt) {
retcode = -ENOMEM;
- EDEB_ERR(4, "Too many posted WQEs qp_num=%x",
+ ehca_err(qp->context->device,
+ "Too many posted WQEs qp_num=%x",
qp->qp_num);
}
goto post_recv_exit0;
@@ -409,20 +419,22 @@ int ehcau_post_recv(struct ibv_qp *qp,
/* write a RECV WQE into the QUEUE */
retcode = write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
/* if something failed, reset the
- free entry pointer to the start value */
- if (unlikely(retcode != 0)) {
+ * free entry pointer to the start value
+ */
+ if (unlikely(retcode)) {
my_qp->ipz_rqueue.current_q_addr = start_addr;
*bad_recv_wr = cur_recv_wr;
- if (wqe_cnt==0) {
+ if (!wqe_cnt) {
retcode = -EINVAL;
- EDEB_ERR(4, "Could not write WQE qp_num=%x",
+ ehca_err(qp->context->device,
+ "Could not write WQE qp_num=%x",
qp->qp_num);
}
goto post_recv_exit0;
}
wqe_cnt++;
- EDEB(7, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, qp->qp_num, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ my_qp, qp->qp_num, wqe_cnt);
} /* eof for cur_recv_wr */
post_recv_exit0:
@@ -430,98 +442,109 @@ int ehcau_post_recv(struct ibv_qp *qp,
ehcau_unlock(&my_qp->lockvar_r);
asm_sync_mem(); /* serialize GAL register access */
hipz_update_RQA(my_qp, wqe_cnt);
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
- my_qp, qp->qp_num, retcode, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
+ my_qp, qp->qp_num, retcode, wqe_cnt);
return retcode;
}
-/**
+/*
* Table converts ehca wc opcode to ib
* Since we use zero to indicate invalid opcode, the actual ib opcode must
* be decremented!!!
*/
static const u8 ib_wc_opcode[255] = {
- [0x00] = 1, /* for daqp optype is always zero */
- [0x01] = IBV_WC_RECV+1,
- [0x02] = IBV_WC_RECV_RDMA_WITH_IMM+1,
- [0x04] = IBV_WC_BIND_MW+1,
- [0x08] = IBV_WC_FETCH_ADD+1,
- [0x10] = IBV_WC_COMP_SWAP+1,
- [0x20] = IBV_WC_RDMA_WRITE+1,
- [0x40] = IBV_WC_RDMA_READ+1,
- [0x80] = IBV_WC_SEND+1
+ [0x00] = 1, /* for daqp optype is always zero */
+ [0x01] = IBV_WC_RECV + 1,
+ [0x02] = IBV_WC_RECV_RDMA_WITH_IMM + 1,
+ [0x04] = IBV_WC_BIND_MW + 1,
+ [0x08] = IBV_WC_FETCH_ADD + 1,
+ [0x10] = IBV_WC_COMP_SWAP + 1,
+ [0x20] = IBV_WC_RDMA_WRITE + 1,
+ [0x40] = IBV_WC_RDMA_READ + 1,
+ [0x80] = IBV_WC_SEND + 1
};
-/** @brief internal function to poll one entry of cq
- */
+/* internal function to poll one entry of cq */
static inline int ehca_poll_cq_one(struct ibv_cq *cq, struct ibv_wc *wc)
{
int retcode = 0;
struct ehcau_cq *my_cq = container_of(cq, struct ehcau_cq, ib_cq);
- struct ehca_cqe *cqe = NULL;
+ struct ehca_cqe *cqe;
int cqe_count = 0;
- EDEB_EN(7, "ehca_cq=%p cq_num=%x wc=%p", my_cq, my_cq->cq_number, wc);
+ ehca_dbg(cq->context->device, "ehca_cq=%p cq_num=%x wc=%p",
+ my_cq, my_cq->cq_number, wc);
- poll_cq_one_read_cqe:
+poll_cq_one_read_cqe:
cqe = (struct ehca_cqe *)ipzu_qeit_get_inc_valid(&my_cq->ipz_queue);
- if (cqe == NULL) {
+ if (!cqe) {
retcode = -EAGAIN;
- EDEB(7, "Completion queue is empty ehca_cq=%p cq_num=%x "
- "retcode=%x", my_cq, my_cq->cq_number, retcode);
+ ehca_dbg(cq->context->device,
+ "Completion queue is empty ehca_cq=%p cq_num=%x "
+ "retcode=%x", my_cq, my_cq->cq_number, retcode);
goto poll_cq_one_exit0;
}
+
+ /* prevents loads being reordered across this point */
+ lwsync();
+
cqe_count++;
if (unlikely(cqe->status & 0x10)) { /* purge bit set */
- struct ehcau_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
- int purgeflag = 0;
- if (qp==NULL) { /* should not happen */
- EDEB_ERR(4, "cq_num=%x qp_num=%x "
+ struct ehcau_qp *qp = ehca_cq_get_qp(my_cq,
+ cqe->local_qp_number);
+ int purgeflag;
+ if (!qp) { /* should not happen */
+ ehca_err(cq->context->device, "cq_num=%x qp_num=%x "
"could not find qp -> ignore cqe",
my_cq->cq_number, cqe->local_qp_number);
- EDEB_DMP(4, cqe, 64, "cq_num=%x qp_num=%x",
- my_cq->cq_number, cqe->local_qp_number);
+ ehca_dmp_err(cqe, 64, "cq_num=%x qp_num=%x",
+ my_cq->cq_number, cqe->local_qp_number);
/* ignore this purged cqe */
goto poll_cq_one_read_cqe;
}
ehcau_lock(&qp->lockvar_s);
purgeflag = qp->sqerr_purgeflag;
ehcau_unlock(&qp->lockvar_s);
- if (purgeflag!=0) {
- EDEB(6, "Got CQE with purged bit qp_num=%x src_qp=%x",
- cqe->local_qp_number, cqe->remote_qp_number);
- EDEB_DMP(6, cqe, 64, "qp_num=%x src_qp=%x",
+ if (purgeflag) {
+ ehca_dbg(cq->context->device,
+ "Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number);
+ ehca_dmp_dbg(cqe, 64, "qp_num=%x src_qp=%x",
+ cqe->local_qp_number,
+ cqe->remote_qp_number);
/* ignore this to avoid double cqes of bad wqe
- that caused sqe and turn off purge flag */
+ * that caused sqe and turn off purge flag
+ */
qp->sqerr_purgeflag = 0;
goto poll_cq_one_read_cqe;
}
}
/* tracing cqe */
- if (IS_EDEB_ON(7)) {
- EDEB(7, "Received COMPLETION ehca_cq=%p cq_num=%x -----",
- my_cq, my_cq->cq_number);
- EDEB_DMP(7, cqe, 64, "ehca_cq=%p cq_num=%x",
+ if (unlikely(libehca_trace_on)) {
+ ehca_dbg(cq->context->device,
+ "Received COMPLETION ehca_cq=%p cq_num=%x -----",
+ my_cq, my_cq->cq_number);
+ ehca_dmp_dbg(cqe, 64, "ehca_cq=%p cq_num=%x",
+ my_cq, my_cq->cq_number);
+ ehca_dbg(cq->context->device,
+ "ehca_cq=%p cq_num=%x -------------------------",
my_cq, my_cq->cq_number);
- EDEB(7, "ehca_cq=%p cq_num=%x -------------------------",
- my_cq, my_cq->cq_number);
}
/* we got a completion! */
wc->wr_id = cqe->work_request_id;
/* eval ib_wc_opcode */
- wc->opcode = ib_wc_opcode[cqe->optype]-1;
+ wc->opcode = ib_wc_opcode[cqe->optype] - 1;
if (unlikely(wc->opcode == -1)) {
/* no error code required, but do trace out */
- EDEB_ERR(4, "Invalid cqe->OPType=%x cqe->status=%x "
- "ehca_cq=%p cq_num=%x",
+ ehca_err(cq->context->device, "Invalid cqe->OPType=%x "
+ "cqe->status=%x ehca_cq=%p cq_num=%x",
cqe->optype, cqe->status, my_cq, my_cq->cq_number);
/* dump cqe for other infos */
- EDEB_DMP(4, cqe, 64, "ehca_cq=%p cq_num=%x",
- my_cq, my_cq->cq_number);
+ ehca_dmp_err(cqe, 64, "ehca_cq=%p cq_num=%x",
+ my_cq, my_cq->cq_number);
/* update also queue adder to throw away this entry!!! */
goto poll_cq_one_exit0;
}
@@ -530,9 +553,8 @@ static inline int ehca_poll_cq_one(struc
if (unlikely(cqe->status & 0x80000000)) { /* complete with errors */
map_ib_wc_status(cqe->status, &wc->status);
wc->vendor_err = wc->status;
- } else {
+ } else
wc->status = IBV_WC_SUCCESS;
- }
wc->qp_num = cqe->local_qp_number;
wc->byte_len = cqe->nr_bytes_transferred;
@@ -544,191 +566,224 @@ static inline int ehca_poll_cq_one(struc
wc->imm_data = cqe->immediate_data;
wc->sl = cqe->service_level;
- poll_cq_one_exit0:
- if (cqe_count>0) {
+poll_cq_one_exit0:
+ if (cqe_count > 0)
hipz_update_FECA(my_cq, cqe_count);
- }
- EDEB_EX(7, "retcode=%x ehca_cq=%p cq_number=%x wc=%p "
- "status=%x opcode=%x qp_num=%x byte_len=%x",
- retcode, my_cq, my_cq->cq_number, wc, wc->status,
- wc->opcode, wc->qp_num, wc->byte_len);
- return (retcode);
+ ehca_dbg(cq->context->device, "retcode=%x ehca_cq=%p cq_number=%x "
+ "wc=%p status=%x opcode=%x qp_num=%x byte_len=%x",
+ retcode, my_cq, my_cq->cq_number, wc, wc->status,
+ wc->opcode, wc->qp_num, wc->byte_len);
+ return retcode;
}
int ehcau_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
{
- EHCA_CHECK_CQ(cq);
- EHCA_CHECK_ADR(wc);
+ if (!cq) {
+ ehca_gen_err("cq=%p check failed line %i", cq, __LINE__);
+ return -EFAULT;
+ }
+ if (!wc) {
+ ehca_gen_err("wc=%p check failed line %i", wc, __LINE__);
+ return -EFAULT;
+ }
int retcode = 0;
struct ehcau_cq *my_cq = container_of(cq, struct ehcau_cq, ib_cq);
- EHCA_CHECK_CQ(my_cq);
if (num_entries < 1) {
- EDEB_ERR(4, "ehcau_cq=%p, invalid num_entries=%d",
- my_cq, num_entries);
+ ehca_err(cq->context->device, "ehcau_cq=%p, "
+ "invalid num_entries=%d", my_cq, num_entries);
return -EINVAL;
}
- EDEB_EN(7, "ehcau_cq=%p num_entries=%d wc=%p", my_cq, num_entries, wc);
+ ehca_dbg(cq->context->device, "ehcau_cq=%p num_entries=%d wc=%p",
+ my_cq, num_entries, wc);
int nr = 0;
struct ibv_wc *current_wc = wc;
ehcau_lock(&my_cq->lockvar);
for (nr = 0; nr < num_entries; nr++) {
retcode = ehca_poll_cq_one(cq, current_wc);
- if (0 != retcode) {
+ if (retcode)
break;
- }
current_wc++;
} /* eof for nr */
ehcau_unlock(&my_cq->lockvar);
- if (-EAGAIN == retcode || 0 == retcode) {
+ if (retcode == -EAGAIN || !retcode)
retcode = nr;
- }
- EDEB_EX(7, "retcode=%x cq_number=%x wc=%p nr_entries=%d",
- retcode, my_cq->cq_number, wc, nr);
- return (retcode);
+ ehca_dbg(cq->context->device, "retcode=%x cq_number=%x wc=%p "
+ "nr_entries=%d", retcode, my_cq->cq_number, wc, nr);
+ return retcode;
}
int ehcau_req_notify_cq(struct ibv_cq *cq, int solicited)
{
- int retcode = 0;
- struct ehcau_cq *my_cq = NULL;
+ struct ehcau_cq *my_cq;
- EHCA_CHECK_CQ(cq);
+ if (!cq) {
+ ehca_gen_err("cq=%p check failed line %i", cq, __LINE__);
+ return -EFAULT;
+ }
my_cq = container_of(cq, struct ehcau_cq, ib_cq);
- EHCA_CHECK_CQ(my_cq);
- EDEB_EN(7, "ehcau_cq=%p solicited=%x", my_cq, solicited);
+ ehca_dbg(cq->context->device, "ehcau_cq=%p solicited=%x",
+ my_cq, solicited);
- if (solicited != 0) { /* IB_CQ_SOLICITED */
+ if (solicited) /* IB_CQ_SOLICITED */
hipz_set_CQx_N0(my_cq, 1);
- } else { /* IB_CQ_NEXT_COMP */
+ else /* IB_CQ_NEXT_COMP */
hipz_set_CQx_N1(my_cq, 1);
- }
- EDEB_EX(7, "ehcau_cq=%p retcode=%x", cq, retcode);
-
- return (retcode);
+ ehca_dbg(cq->context->device, "ehcau_cq=%p", cq);
+ return 0;
}
int ehcau_send_wr_trigger(struct ibv_qp *qp, int wqe_count)
{
- struct ehcau_qp *my_qp = NULL;
+ struct ehcau_qp *my_qp;
- EHCA_CHECK_ADR(qp);
+ if (!qp) {
+ ehca_gen_err("qp=%p check failed line %i", qp, __LINE__);
+ return -EFAULT;
+ }
my_qp = container_of(qp, struct ehcau_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EDEB_EN(7, "ehca_qp=%p wqe_count=%x", my_qp, wqe_count);
+ ehca_dbg(qp->context->device, "ehca_qp=%p wqe_count=%x",
+ my_qp, wqe_count);
asm_sync_mem(); /* serialize GAL register access */
hipz_update_SQA(my_qp, wqe_count);
- EDEB_EX(7, "ehca_qp=%p wqe_count=%x", my_qp, wqe_count);
+ ehca_dbg(qp->context->device, "ehca_qp=%p wqe_count=%x",
+ my_qp, wqe_count);
return 0;
}
int ehcau_recv_wr_trigger(struct ibv_qp *qp, int wqe_count)
{
- struct ehcau_qp *my_qp = NULL;
+ struct ehcau_qp *my_qp;
- EHCA_CHECK_ADR(qp);
+ if (!qp) {
+ ehca_gen_err("qp=%p check failed line %i", qp, __LINE__);
+ return -EFAULT;
+ }
my_qp = container_of(qp, struct ehcau_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EDEB_EN(7, "ehca_qp=%p wqe_count=%x", my_qp, wqe_count);
+ ehca_dbg(qp->context->device, "ehca_qp=%p wqe_count=%x",
+ my_qp, wqe_count);
asm_sync_mem(); /* serialize GAL register access */
hipz_update_RQA(my_qp, wqe_count);
- EDEB_EX(7, "ehca_qp=%p wqe_count=%x", my_qp, wqe_count);
+ ehca_dbg(qp->context->device, "ehca_qp=%p wqe_count=%x",
+ my_qp, wqe_count);
return 0;
}
int ehcau_write_swqe(void *wqe, struct ibv_qp *qp, struct ibv_send_wr *send_wr,
struct ibv_send_wr **bad_wr)
{
- struct ehcau_qp *my_qp = NULL;
- int retcode = 0;
- struct ibv_send_wr *cur_send_wr = NULL;
+ struct ehcau_qp *my_qp;
+ int retcode;
+ struct ibv_send_wr *cur_send_wr;
int wqe_cnt = 0;
- struct ehca_wqe *wqe_p = NULL;
+ struct ehca_wqe *wqe_p;
- EHCA_CHECK_ADR(qp);
+ if (!qp) {
+ ehca_gen_err("qp=%p check failed line %i", qp, __LINE__);
+ return -EFAULT;
+ }
my_qp = container_of(qp, struct ehcau_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EHCA_CHECK_QP(wqe);
- EHCA_CHECK_ADR(send_wr);
+ if (!wqe) {
+ ehca_gen_err("wqe=%p check failed line %i", wqe, __LINE__);
+ return -EFAULT;
+ }
+ if (!send_wr) {
+ ehca_gen_err("send_wr=%p check failed line %i",
+ send_wr, __LINE__);
+ return -EFAULT;
+ }
- EDEB_EN(7, "ehcau_qp=%p wqe=%p send_wr=%p bad_wr=%p",
- my_qp, wqe, send_wr, bad_wr);
+ ehca_dbg(qp->context->device, "ehcau_qp=%p wqe=%p send_wr=%p bad_wr=%p",
+ my_qp, wqe, send_wr, bad_wr);
/* LOCK the QUEUE */
ehcau_lock(&my_qp->lockvar_s);
/* loop processes list of send reqs */
wqe_p = (struct ehca_wqe*)wqe;
- for (cur_send_wr = send_wr; cur_send_wr != NULL;
+ for (cur_send_wr = send_wr; cur_send_wr;
cur_send_wr = cur_send_wr->next) {
/* write a SEND WQE into the QUEUE */
retcode = write_swqe(my_qp, wqe_p, cur_send_wr);
/* if something failed, leave loop */
- if (unlikely(retcode != 0)) {
+ if (unlikely(retcode)) {
*bad_wr = cur_send_wr;
break;
}
wqe_cnt++;
wqe_p++;
- EDEB(7, "ehca_qp %p wqe_cnt %d", my_qp, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp %p wqe_cnt %d",
+ my_qp, wqe_cnt);
} /* eof for cur_send_wr */
retcode = wqe_cnt;
/* UNLOCK the QUEUE */
ehcau_unlock(&my_qp->lockvar_s);
- EDEB_EX(7, "ehca_qp=%p ret=%x wqe_cnt=%d", my_qp, retcode, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp=%p ret=%x wqe_cnt=%d",
+ my_qp, retcode, wqe_cnt);
return retcode;
}
int ehcau_write_rwqe(void *wqe, struct ibv_qp *qp, struct ibv_recv_wr *recv_wr,
struct ibv_recv_wr **bad_wr)
{
- struct ehcau_qp *my_qp = NULL;
- int retcode = 0;
- struct ibv_recv_wr *cur_recv_wr = NULL;
+ struct ehcau_qp *my_qp;
+ int retcode;
+ struct ibv_recv_wr *cur_recv_wr;
int wqe_cnt = 0;
- struct ehca_wqe *wqe_p = NULL;
+ struct ehca_wqe *wqe_p;
- EHCA_CHECK_ADR(qp);
+ if (!qp) {
+ ehca_gen_err("qp=%p check failed line %i", qp, __LINE__);
+ return -EFAULT;
+ }
my_qp = container_of(qp, struct ehcau_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EHCA_CHECK_ADR(wqe);
- EHCA_CHECK_ADR(recv_wr);
+ if (!wqe) {
+ ehca_gen_err("wqe=%p check failed line %i", wqe, __LINE__);
+ return -EFAULT;
+ }
+ if (!recv_wr) {
+ ehca_gen_err("recv_wr=%p check failed line %i",
+ recv_wr, __LINE__);
+ return -EFAULT;
+ }
- EDEB_EN(7, "ehca_qp=%p wqe=%p recv_wr=%p bad_wr=%p",
- my_qp, wqe, recv_wr, bad_wr);
+ ehca_dbg(qp->context->device, "ehca_qp=%p wqe=%p recv_wr=%p bad_wr=%p",
+ my_qp, wqe, recv_wr, bad_wr);
/* LOCK the QUEUE */
ehcau_lock(&my_qp->lockvar_r);
/* loop processes list of send reqs */
wqe_p = (struct ehca_wqe*)wqe;
- for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
+ for (cur_recv_wr = recv_wr; cur_recv_wr;
cur_recv_wr = cur_recv_wr->next) {
/* write a RECV WQE into the QUEUE */
retcode = write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
/* if something failed, leave loop */
- if (unlikely(retcode != 0)) {
+ if (unlikely(retcode)) {
*bad_wr = cur_recv_wr;
break;
}
wqe_cnt++;
wqe_p++;
- EDEB(7, "ehca_qp %p wqe_cnt %d", my_qp, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp %p wqe_cnt %d",
+ my_qp, wqe_cnt);
} /* eof for cur_recv_wr */
retcode = wqe_cnt;
/* UNLOCK the QUEUE */
ehcau_unlock(&my_qp->lockvar_r);
- EDEB_EX(7, "ehca_qp=%p ret=%x wqe_cnt=%d", my_qp, retcode, wqe_cnt);
+ ehca_dbg(qp->context->device, "ehca_qp=%p ret=%x wqe_cnt=%d",
+ my_qp, retcode, wqe_cnt);
return retcode;
}
More information about the general
mailing list