[openib-general] [PATCH] ehca for OFED 1.1-rc4
Hoang-Nam Nguyen
HNGUYEN at de.ibm.com
Thu Sep 7 14:42:58 PDT 2006
Hello Tziporet!
Below is a patch of ehca against the ofed git tree branch ehca-branch in
order to upgrade it to the same code level of Roland's git tree branch
for-2.6.19, which has been posted for a while. The main code changes are:
- Replace the "huge" EDEB macro by a simpler wrapper based on dev_err/dbg
- Remove superfluous variables initialization and arguments checking
- Replace struct ehca_module by static member variables in appropriate
files, where they are accessed
- Rename module name to ib_ehca.ko
Thanks!
Nam Nguyen
Signed-off-by: Hoang-Nam Nguyen <hnguyen at de.ibm.com>
---
Kconfig | 14
Makefile | 9
ehca_av.c | 128 ++----
ehca_classes.h | 27 -
ehca_cq.c | 222 +++++------
ehca_eq.c | 71 ---
ehca_hca.c | 103 +----
ehca_irq.c | 221 +++--------
ehca_main.c | 491 ++++++++----------------
ehca_mcast.c | 119 +----
ehca_mrmw.c | 1113
++++++++++++++++++++++----------------------------------
ehca_mrmw.h | 3
ehca_pd.c | 60 +--
ehca_qp.c | 572 ++++++++++++----------------
ehca_reqs.c | 219 ++++-------
ehca_sqp.c | 50 --
ehca_tools.h | 337 ++--------------
ehca_uverbs.c | 278 ++++++-------
hcp_if.c | 834 ++++++++++++-----------------------------
hcp_phyp.c | 26 -
hcp_phyp.h | 10
hipz_fns_core.h | 44 --
ipz_pt_fn.c | 37 -
ipz_pt_fn.h | 7
24 files changed, 1781 insertions(+), 3214 deletions(-)
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/Kconfig
linux-2.6/drivers/infiniband/hw/ehca/Kconfig
--- linux-2.6_orig/drivers/infiniband/hw/ehca/Kconfig 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/Kconfig 2006-08-30
20:00:16.000000000 +0200
@@ -1,12 +1,16 @@
config INFINIBAND_EHCA
- tristate "eHCA support"
- depends on IBMEBUS && INFINIBAND
- ---help---
- This is a low level device driver for the IBM GX based Host
channel
- adapters (HCAs).
+ tristate "eHCA support"
+ depends on IBMEBUS && INFINIBAND
+ ---help---
+ This driver supports the IBM pSeries eHCA InfiniBand adapter.
+
+ To compile the driver as a module, choose M here. The module
+ will be called ib_ehca.
config INFINIBAND_EHCA_SCALING
bool "Scaling support (EXPERIMENTAL)"
depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU &&
EXPERIMENTAL
---help---
eHCA scaling support schedules the CQ callbacks to different CPUs.
+
+ To enable this feature choose Y here.
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/Makefile
linux-2.6/drivers/infiniband/hw/ehca/Makefile
--- linux-2.6_orig/drivers/infiniband/hw/ehca/Makefile 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/Makefile 2006-08-30
20:00:17.000000000 +0200
@@ -8,11 +8,10 @@
#
# This source code is distributed under a dual license of GPL v2.0 and
OpenIB BSD.
-obj-$(CONFIG_INFINIBAND_EHCA) += hcad_mod.o
+obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
-hcad_mod-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o
ehca_eq.o \
- ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o
ehca_irq.o \
- ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
+ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o
ehca_eq.o \
+ ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o
ehca_irq.o \
+ ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
-CFLAGS += -DEHCA_USE_HCALL -DEHCA_USE_HCALL_KERNEL
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_av.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_av.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_av.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_av.c 2006-08-30
20:00:16.000000000 +0200
@@ -42,34 +42,26 @@
*/
-#define DEB_PREFIX "ehav"
-
#include <asm/current.h>
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
+static struct kmem_cache *av_cache;
+
struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr
*ah_attr)
{
- extern struct ehca_module ehca_module;
- extern int ehca_static_rate;
- int ret = 0;
- struct ehca_av *av = NULL;
- struct ehca_shca *shca = NULL;
-
- EHCA_CHECK_PD_P(pd);
- EHCA_CHECK_ADR_P(ah_attr);
+ int ret;
+ struct ehca_av *av;
+ struct ehca_shca *shca = container_of(pd->device, struct
ehca_shca,
+ ib_device);
- shca = container_of(pd->device, struct ehca_shca, ib_device);
-
- EDEB_EN(7, "pd=%p ah_attr=%p", pd, ah_attr);
-
- av = kmem_cache_alloc(ehca_module.cache_av, SLAB_KERNEL);
+ av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
if (!av) {
- EDEB_ERR(4, "Out of memory pd=%p ah_attr=%p", pd,
ah_attr);
- ret = -ENOMEM;
- goto create_ah_exit0;
+ ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
+ pd, ah_attr);
+ return ERR_PTR(-ENOMEM);
}
av->av.sl = ah_attr->sl;
@@ -89,10 +81,6 @@ struct ib_ah *ehca_create_ah(struct ib_p
} else
av->av.ipd = ehca_static_rate;
- EDEB(7, "IPD av->av.ipd set =%x ah_attr->static_rate=%x "
- "shca_ib_rate=%x ",av->av.ipd, ah_attr->static_rate,
- shca->sport[ah_attr->port_num].rate);
-
av->av.lnh = ah_attr->ah_flags;
av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
@@ -104,7 +92,7 @@ struct ib_ah *ehca_create_ah(struct ib_p
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
- int rc = 0;
+ int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
@@ -112,7 +100,7 @@ struct ib_ah *ehca_create_ah(struct ib_p
&port_attr);
if (rc) { /* invalid port number */
ret = -EINVAL;
- EDEB_ERR(4, "Invalid port number "
+ ehca_err(pd->device, "Invalid port number "
"ehca_query_port() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
@@ -123,7 +111,7 @@ struct ib_ah *ehca_create_ah(struct ib_p
ah_attr->grh.sgid_index, &gid);
if (rc) {
ret = -EINVAL;
- EDEB_ERR(4, "Failed to retrieve sgid "
+ ehca_err(pd->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
@@ -137,37 +125,24 @@ struct ib_ah *ehca_create_ah(struct ib_p
memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
- EHCA_REGISTER_AV(device, pd);
-
- EDEB_EX(7, "pd=%p ah_attr=%p av=%p", pd, ah_attr, av);
return &av->ib_ah;
create_ah_exit1:
- kmem_cache_free(ehca_module.cache_av, av);
-
-create_ah_exit0:
- EDEB_EX(7, "ret=%x pd=%p ah_attr=%p", ret, pd, ah_attr);
+ kmem_cache_free(av_cache, av);
return ERR_PTR(ret);
}
int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
- struct ehca_av *av = NULL;
+ struct ehca_av *av;
struct ehca_ud_av new_ehca_av;
- struct ehca_pd *my_pd = NULL;
+ struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd,
ib_pd);
u32 cur_pid = current->tgid;
- int ret = 0;
-
- EHCA_CHECK_AV(ah);
- EHCA_CHECK_ADR(ah_attr);
- EDEB_EN(7, "ah=%p ah_attr=%p", ah, ah_attr);
-
- my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
return -EINVAL;
}
@@ -189,33 +164,31 @@ int ehca_modify_ah(struct ib_ah *ah, str
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
- int rc = 0;
+ int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(ah->device, ah_attr->port_num,
&port_attr);
if (rc) { /* invalid port number */
- ret = -EINVAL;
- EDEB_ERR(4, "Invalid port number "
+ ehca_err(ah->device, "Invalid port number "
"ehca_query_port() returned %x "
"ah=%p ah_attr=%p port_num=%x",
rc, ah, ah_attr, ah_attr->port_num);
- goto modify_ah_exit1;
+ return -EINVAL;
}
memset(&gid, 0, sizeof(gid));
rc = ehca_query_gid(ah->device,
ah_attr->port_num,
ah_attr->grh.sgid_index, &gid);
if (rc) {
- ret = -EINVAL;
- EDEB_ERR(4, "Failed to retrieve sgid "
+ ehca_err(ah->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"ah=%p ah_attr=%p port_num=%x "
"sgid_index=%x",
rc, ah, ah_attr, ah_attr->port_num,
ah_attr->grh.sgid_index);
- goto modify_ah_exit1;
+ return -EINVAL;
}
memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
}
@@ -228,33 +201,22 @@ int ehca_modify_ah(struct ib_ah *ah, str
av = container_of(ah, struct ehca_av, ib_ah);
av->av = new_ehca_av;
-modify_ah_exit1:
- EDEB_EX(7, "ret=%x ah=%p ah_attr=%p", ret, ah, ah_attr);
-
- return ret;
+ return 0;
}
int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
- int ret = 0;
- struct ehca_av *av = NULL;
- struct ehca_pd *my_pd = NULL;
+ struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
+ struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd,
ib_pd);
u32 cur_pid = current->tgid;
- EHCA_CHECK_AV(ah);
- EHCA_CHECK_ADR(ah_attr);
-
- EDEB_EN(7, "ah=%p ah_attr=%p", ah, ah_attr);
-
- my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
return -EINVAL;
}
- av = container_of(ah, struct ehca_av, ib_ah);
memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
sizeof(ah_attr->grh.dgid));
ah_attr->sl = av->av.sl;
@@ -271,33 +233,39 @@ int ehca_query_ah(struct ib_ah *ah, stru
ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
av->av.grh.word_0);
- EDEB_EX(7, "ah=%p ah_attr=%p ret=%x", ah, ah_attr, ret);
- return ret;
+ return 0;
}
int ehca_destroy_ah(struct ib_ah *ah)
{
- extern struct ehca_module ehca_module;
- struct ehca_pd *my_pd = NULL;
+ struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd,
ib_pd);
u32 cur_pid = current->tgid;
- int ret = 0;
-
- EHCA_CHECK_AV(ah);
- EHCA_DEREGISTER_AV(ah);
-
- EDEB_EN(7, "ah=%p", ah);
- my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
return -EINVAL;
}
- kmem_cache_free(ehca_module.cache_av,
- container_of(ah, struct ehca_av, ib_ah));
+ kmem_cache_free(av_cache, container_of(ah, struct ehca_av,
ib_ah));
- EDEB_EX(7, "ret=%x ah=%p", ret, ah);
- return ret;
+ return 0;
+}
+
+int ehca_init_av_cache(void)
+{
+ av_cache = kmem_cache_create("ehca_cache_av",
+ sizeof(struct ehca_av), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!av_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_av_cache(void)
+{
+ if (av_cache)
+ kmem_cache_destroy(av_cache);
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_classes.h
linux-2.6/drivers/infiniband/hw/ehca/ehca_classes.h
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_classes.h 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_classes.h 2006-08-30
20:00:16.000000000 +0200
@@ -63,18 +63,6 @@ struct ehca_av;
#include "ehca_irq.h"
-struct ehca_module {
- struct list_head shca_list;
- spinlock_t shca_lock;
- struct timer_list timer;
- kmem_cache_t *cache_pd;
- kmem_cache_t *cache_cq;
- kmem_cache_t *cache_qp;
- kmem_cache_t *cache_av;
- kmem_cache_t *cache_mr;
- kmem_cache_t *cache_mw;
-};
-
struct ehca_eq {
u32 length;
struct ipz_queue ipz_queue;
@@ -274,11 +262,26 @@ int ehca_shca_delete(struct ehca_shca *m
struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
+int ehca_init_pd_cache(void);
+void ehca_cleanup_pd_cache(void);
+int ehca_init_cq_cache(void);
+void ehca_cleanup_cq_cache(void);
+int ehca_init_qp_cache(void);
+void ehca_cleanup_qp_cache(void);
+int ehca_init_av_cache(void);
+void ehca_cleanup_av_cache(void);
+int ehca_init_mrmw_cache(void);
+void ehca_cleanup_mrmw_cache(void);
+
extern spinlock_t ehca_qp_idr_lock;
extern spinlock_t ehca_cq_idr_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
+extern int ehca_static_rate;
+extern int ehca_port_act_time;
+extern int ehca_use_hp_mr;
+
struct ipzu_queue_resp {
u64 queue; /* points to first queue entry */
u32 qe_size; /* queue entry size */
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_cq.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_cq.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_cq.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_cq.c 2006-08-30
20:00:17.000000000 +0200
@@ -43,8 +43,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "e_cq"
-
#include <asm/current.h>
#include "ehca_iverbs.h"
@@ -52,17 +50,20 @@
#include "ehca_irq.h"
#include "hcp_if.h"
+static struct kmem_cache *cq_cache;
+
int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
{
unsigned int qp_num = qp->real_qp_num;
unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
- unsigned long spl_flags = 0;
+ unsigned long spl_flags;
spin_lock_irqsave(&cq->spinlock, spl_flags);
hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
spin_unlock_irqrestore(&cq->spinlock, spl_flags);
- EDEB(7, "cq_num=%x real_qp_num=%x", cq->cq_number, qp_num);
+ ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
+ cq->cq_number, qp_num);
return 0;
}
@@ -71,26 +72,27 @@ int ehca_cq_unassign_qp(struct ehca_cq *
{
int ret = -EINVAL;
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
- struct hlist_node *iter = NULL;
- struct ehca_qp *qp = NULL;
- unsigned long spl_flags = 0;
+ struct hlist_node *iter;
+ struct ehca_qp *qp;
+ unsigned long spl_flags;
spin_lock_irqsave(&cq->spinlock, spl_flags);
hlist_for_each(iter, &cq->qp_hashtab[key]) {
qp = hlist_entry(iter, struct ehca_qp, list_entries);
if (qp->real_qp_num == real_qp_num) {
hlist_del(iter);
- EDEB(7, "removed qp from cq .cq_num=%x
real_qp_num=%x",
- cq->cq_number, real_qp_num);
+ ehca_dbg(cq->ib_cq.device,
+ "removed qp from cq .cq_num=%x
real_qp_num=%x",
+ cq->cq_number, real_qp_num);
ret = 0;
break;
}
}
spin_unlock_irqrestore(&cq->spinlock, spl_flags);
- if (ret) {
- EDEB_ERR(4, "qp not found cq_num=%x real_qp_num=%x",
+ if (ret)
+ ehca_err(cq->ib_cq.device,
+ "qp not found cq_num=%x real_qp_num=%x",
cq->cq_number, real_qp_num);
- }
return ret;
}
@@ -99,8 +101,8 @@ struct ehca_qp* ehca_cq_get_qp(struct eh
{
struct ehca_qp *ret = NULL;
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
- struct hlist_node *iter = NULL;
- struct ehca_qp *qp = NULL;
+ struct hlist_node *iter;
+ struct ehca_qp *qp;
hlist_for_each(iter, &cq->qp_hashtab[key]) {
qp = hlist_entry(iter, struct ehca_qp, list_entries);
if (qp->real_qp_num == real_qp_num) {
@@ -115,37 +117,28 @@ struct ib_cq *ehca_create_cq(struct ib_d
struct ib_ucontext *context,
struct ib_udata *udata)
{
- extern struct ehca_module ehca_module;
- struct ib_cq *cq = NULL;
- struct ehca_cq *my_cq = NULL;
- struct ehca_shca *shca = NULL;
+ static const u32 additional_cqe = 20;
+ struct ib_cq *cq;
+ struct ehca_cq *my_cq;
+ struct ehca_shca *shca =
+ container_of(device, struct ehca_shca, ib_device);
struct ipz_adapter_handle adapter_handle;
- /* h_call's out parameters */
- struct ehca_alloc_cq_parms param;
- u32 counter = 0;
- void *vpage = NULL;
- u64 rpage = 0;
+ struct ehca_alloc_cq_parms param; /* h_call's out parameters */
struct h_galpa gal;
- u64 cqx_fec = 0;
- u64 h_ret = 0;
- int ipz_rc = 0;
- int ret = 0;
- const u32 additional_cqe=20;
- int i= 0;
+ void *vpage;
+ u32 counter;
+ u64 rpage, cqx_fec, h_ret;
+ int ipz_rc, ret, i;
unsigned long flags;
- EHCA_CHECK_DEVICE_P(device);
- EDEB_EN(7, "device=%p cqe=%x context=%p", device, cqe, context);
-
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL);
- my_cq = kmem_cache_alloc(ehca_module.cache_cq, SLAB_KERNEL);
+ my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
if (!my_cq) {
- cq = ERR_PTR(-ENOMEM);
- EDEB_ERR(4, "Out of memory for ehca_cq struct device=%p",
+ ehca_err(device, "Out of memory for ehca_cq struct
device=%p",
device);
- goto create_cq_exit0;
+ return ERR_PTR(-ENOMEM);
}
memset(my_cq, 0, sizeof(struct ehca_cq));
@@ -158,17 +151,14 @@ struct ib_cq *ehca_create_cq(struct ib_d
cq = &my_cq->ib_cq;
- shca = container_of(device, struct ehca_shca, ib_device);
adapter_handle = shca->ipz_hca_handle;
param.eq_handle = shca->eq.ipz_eq_handle;
-
do {
if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
cq = ERR_PTR(-ENOMEM);
- EDEB_ERR(4,
- "Can't reserve idr resources. "
- "device=%p", device);
+ ehca_err(device, "Can't reserve idr nr.
device=%p",
+ device);
goto create_cq_exit1;
}
@@ -180,9 +170,8 @@ struct ib_cq *ehca_create_cq(struct ib_d
if (ret) {
cq = ERR_PTR(-ENOMEM);
- EDEB_ERR(4,
- "Can't allocate new idr entry. "
- "device=%p", device);
+ ehca_err(device, "Can't allocate new idr entry.
device=%p",
+ device);
goto create_cq_exit1;
}
@@ -194,7 +183,7 @@ struct ib_cq *ehca_create_cq(struct ib_d
h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, ¶m);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4,"hipz_h_alloc_resource_cq() failed "
+ ehca_err(device, "hipz_h_alloc_resource_cq() failed "
"h_ret=%lx device=%p", h_ret, device);
cq = ERR_PTR(ehca2ib_return_code(h_ret));
goto create_cq_exit2;
@@ -203,9 +192,8 @@ struct ib_cq *ehca_create_cq(struct ib_d
ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
EHCA_PAGESIZE, sizeof(struct ehca_cqe),
0);
if (!ipz_rc) {
- EDEB_ERR(4,
- "ipz_queue_ctor() failed "
- "ipz_rc=%x device=%p", ipz_rc, device);
+ ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x
device=%p",
+ ipz_rc, device);
cq = ERR_PTR(-EINVAL);
goto create_cq_exit3;
}
@@ -213,7 +201,7 @@ struct ib_cq *ehca_create_cq(struct ib_d
for (counter = 0; counter < param.act_pages; counter++) {
vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
if (!vpage) {
- EDEB_ERR(4, "ipz_qpageit_get_inc() "
+ ehca_err(device, "ipz_qpageit_get_inc() "
"returns NULL device=%p", device);
cq = ERR_PTR(-EAGAIN);
goto create_cq_exit4;
@@ -231,10 +219,9 @@ struct ib_cq *ehca_create_cq(struct ib_d
kernel);
if (h_ret < H_SUCCESS) {
- EDEB_ERR(4, "hipz_h_register_rpage_cq() failed "
- "ehca_cq=%p cq_num=%x h_ret=%lx "
- "counter=%i act_pages=%i",
- my_cq, my_cq->cq_number,
+ ehca_err(device, "hipz_h_register_rpage_cq()
failed "
+ "ehca_cq=%p cq_num=%x h_ret=%lx
counter=%i "
+ "act_pages=%i", my_cq, my_cq->cq_number,
h_ret, counter, param.act_pages);
cq = ERR_PTR(-EINVAL);
goto create_cq_exit4;
@@ -243,16 +230,16 @@ struct ib_cq *ehca_create_cq(struct ib_d
if (counter == (param.act_pages - 1)) {
vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
if ((h_ret != H_SUCCESS) || vpage) {
- EDEB_ERR(4, "Registration of pages not "
+ ehca_err(device, "Registration of pages
not "
"complete ehca_cq=%p cq_num=%x "
- "h_ret=%lx",
- my_cq, my_cq->cq_number, h_ret);
+ "h_ret=%lx", my_cq,
my_cq->cq_number,
+ h_ret);
cq = ERR_PTR(-EAGAIN);
goto create_cq_exit4;
}
} else {
if (h_ret != H_PAGE_REGISTERED) {
- EDEB_ERR(4, "Registration of page failed "
+ ehca_err(device, "Registration of page
failed "
"ehca_cq=%p cq_num=%x h_ret=%lx"
"counter=%i act_pages=%i",
my_cq, my_cq->cq_number,
@@ -267,8 +254,8 @@ struct ib_cq *ehca_create_cq(struct ib_d
gal = my_cq->galpas.kernel;
cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
- EDEB(8, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
- my_cq, my_cq->cq_number, cqx_fec);
+ ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
+ my_cq, my_cq->cq_number, cqx_fec);
my_cq->ib_cq.cqe = my_cq->nr_of_entries =
param.act_nr_of_entries - additional_cqe;
@@ -280,7 +267,7 @@ struct ib_cq *ehca_create_cq(struct ib_d
if (context) {
struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
struct ehca_create_cq_resp resp;
- struct vm_area_struct *vma = NULL;
+ struct vm_area_struct *vma;
memset(&resp, 0, sizeof(resp));
resp.cq_number = my_cq->cq_number;
resp.token = my_cq->token;
@@ -294,7 +281,7 @@ struct ib_cq *ehca_create_cq(struct ib_d
(void**)&resp.ipz_queue.queue,
&vma);
if (ret) {
- EDEB_ERR(4, "Could not mmap queue pages");
+ ehca_err(device, "Could not mmap queue pages");
cq = ERR_PTR(ret);
goto create_cq_exit4;
}
@@ -304,19 +291,17 @@ struct ib_cq *ehca_create_cq(struct ib_d
(void**)&resp.galpas.kernel.fw_handle,
&vma);
if (ret) {
- EDEB_ERR(4, "Could not mmap fw_handle");
+ ehca_err(device, "Could not mmap fw_handle");
cq = ERR_PTR(ret);
goto create_cq_exit5;
}
my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- EDEB_ERR(4, "Copy to udata failed.");
+ ehca_err(device, "Copy to udata failed.");
goto create_cq_exit6;
}
}
- EDEB_EX(7,"retcode=%p ehca_cq=%p cq_num=%x cq_size=%x",
- cq, my_cq, my_cq->cq_number, param.act_nr_of_entries);
return cq;
create_cq_exit6:
@@ -331,8 +316,8 @@ create_cq_exit4:
create_cq_exit3:
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
if (h_ret != H_SUCCESS)
- EDEB(4, "hipz_h_destroy_cq() failed ehca_cq=%p cq_num=%x "
- "h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
+ ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
+ "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number,
h_ret);
create_cq_exit2:
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
@@ -340,36 +325,24 @@ create_cq_exit2:
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
create_cq_exit1:
- kmem_cache_free(ehca_module.cache_cq, my_cq);
+ kmem_cache_free(cq_cache, my_cq);
-create_cq_exit0:
- EDEB_EX(4, "An error has occured retcode=%p", cq);
return cq;
}
int ehca_destroy_cq(struct ib_cq *cq)
{
- extern struct ehca_module ehca_module;
- u64 h_ret = 0;
- int ret = 0;
- struct ehca_cq *my_cq = NULL;
- int cq_num = 0;
- struct ib_device *device = NULL;
- struct ehca_shca *shca = NULL;
- struct ipz_adapter_handle adapter_handle;
+ u64 h_ret;
+ int ret;
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ int cq_num = my_cq->cq_number;
+ struct ib_device *device = cq->device;
+ struct ehca_shca *shca = container_of(device, struct ehca_shca,
+ ib_device);
+ struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
u32 cur_pid = current->tgid;
unsigned long flags;
- EHCA_CHECK_CQ(cq);
- my_cq = container_of(cq, struct ehca_cq, ib_cq);
- cq_num = my_cq->cq_number;
- device = cq->device;
- EHCA_CHECK_DEVICE(device);
- shca = container_of(device, struct ehca_shca, ib_device);
- adapter_handle = shca->ipz_hca_handle;
- EDEB_EN(7, "ehca_cq=%p cq_num=%x",
- my_cq, my_cq->cq_number);
-
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
while (my_cq->nr_callbacks)
yield();
@@ -378,7 +351,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_cq->ownpid);
return -EINVAL;
}
@@ -386,64 +359,69 @@ int ehca_destroy_cq(struct ib_cq *cq)
/* un-mmap if vma alloc */
if (my_cq->uspace_queue ) {
ret = ehca_munmap(my_cq->uspace_queue,
- my_cq->ipz_queue.queue_length);
+ my_cq->ipz_queue.queue_length);
+ if (ret)
+ ehca_err(device, "Could not munmap queue
ehca_cq=%p "
+ "cq_num=%x", my_cq, cq_num);
ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+ if (ret)
+ ehca_err(device, "Could not munmap fwh ehca_cq=%p
"
+ "cq_num=%x", my_cq, cq_num);
}
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
if (h_ret == H_R_STATE) {
/* cq in err: read err data and destroy it forcibly */
- EDEB(4, "ehca_cq=%p cq_num=%x ressource=%lx in err state.
"
- "Try to delete it forcibly.",
- my_cq, my_cq->cq_number,
my_cq->ipz_cq_handle.handle);
+ ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in
err "
+ "state. Try to delete it forcibly.",
+ my_cq, cq_num, my_cq->ipz_cq_handle.handle);
ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
if (h_ret == H_SUCCESS)
- EDEB(4, "ehca_cq=%p cq_num=%x deleted
successfully.",
- my_cq, my_cq->cq_number);
+ ehca_dbg(device, "cq_num=%x deleted
successfully.",
+ cq_num);
}
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4,"hipz_h_destroy_cq() failed "
- "h_ret=%lx ehca_cq=%p cq_num=%x",
- h_ret, my_cq, my_cq->cq_number);
- ret = ehca2ib_return_code(h_ret);
- goto destroy_cq_exit0;
+ ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx "
+ "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
+ return ehca2ib_return_code(h_ret);
}
ipz_queue_dtor(&my_cq->ipz_queue);
- kmem_cache_free(ehca_module.cache_cq, my_cq);
+ kmem_cache_free(cq_cache, my_cq);
-destroy_cq_exit0:
- EDEB_EX(7, "ehca_cq=%p cq_num=%x ret=%x ",
- my_cq, cq_num, ret);
- return ret;
+ return 0;
}
int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
{
- int ret = 0;
- struct ehca_cq *my_cq = NULL;
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
u32 cur_pid = current->tgid;
- if (unlikely(!cq)) {
- EDEB_ERR(4, "cq is NULL");
- return -EFAULT;
- }
-
- my_cq = container_of(cq, struct ehca_cq, ib_cq);
- EDEB_EN(7, "ehca_cq=%p cq_num=%x",
- my_cq, my_cq->cq_number);
-
if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_cq->ownpid);
return -EINVAL;
}
/* TODO: proper resize needs to be done */
- ret = -EFAULT;
- EDEB_ERR(4, "not implemented yet");
+ ehca_err(cq->device, "not implemented yet");
- EDEB_EX(7, "ehca_cq=%p cq_num=%x",
- my_cq, my_cq->cq_number);
- return ret;
+ return -EFAULT;
+}
+
+int ehca_init_cq_cache(void)
+{
+ cq_cache = kmem_cache_create("ehca_cache_cq",
+ sizeof(struct ehca_cq), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!cq_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_cq_cache(void)
+{
+ if (cq_cache)
+ kmem_cache_destroy(cq_cache);
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_eq.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_eq.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_eq.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_eq.c 2006-08-30
20:00:16.000000000 +0200
@@ -43,8 +43,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "e_eq"
-
#include "ehca_classes.h"
#include "ehca_irq.h"
#include "ehca_iverbs.h"
@@ -56,24 +54,21 @@ int ehca_create_eq(struct ehca_shca *shc
struct ehca_eq *eq,
const enum ehca_eq_type type, const u32 length)
{
- u64 ret = H_SUCCESS;
- u32 nr_pages = 0;
+ u64 ret;
+ u32 nr_pages;
u32 i;
- void *vpage = NULL;
-
- EDEB_EN(7, "shca=%p eq=%p length=%x", shca, eq, length);
- EHCA_CHECK_ADR(shca);
- EHCA_CHECK_ADR(eq);
+ void *vpage;
+ struct ib_device *ib_dev = &shca->ib_device;
spin_lock_init(&eq->spinlock);
eq->is_initialized = 0;
if (type != EHCA_EQ && type != EHCA_NEQ) {
- EDEB_ERR(4, "Invalid EQ type %x. eq=%p", type, eq);
+ ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
return -EINVAL;
}
- if (length == 0) {
- EDEB_ERR(4, "EQ length must not be zero. eq=%p", eq);
+ if (!length) {
+ ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
return -EINVAL;
}
@@ -86,14 +81,14 @@ int ehca_create_eq(struct ehca_shca *shc
&nr_pages, &eq->ist);
if (ret != H_SUCCESS) {
- EDEB_ERR(4, "Can't allocate EQ / NEQ. eq=%p", eq);
+ ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
return -EINVAL;
}
ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages,
EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0);
if (!ret) {
- EDEB_ERR(4, "Can't allocate EQ pages. eq=%p", eq);
+ ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
goto create_eq_exit1;
}
@@ -130,7 +125,7 @@ int ehca_create_eq(struct ehca_shca *shc
SA_INTERRUPT, "ehca_eq",
(void *)shca);
if (ret < 0)
- EDEB_ERR(4, "Can't map interrupt handler.");
+ ehca_err(ib_dev, "Can't map interrupt handler.");
tasklet_init(&eq->interrupt_task, ehca_tasklet_eq,
(long)shca);
} else if (type == EHCA_NEQ) {
@@ -138,15 +133,13 @@ int ehca_create_eq(struct ehca_shca *shc
SA_INTERRUPT, "ehca_neq",
(void *)shca);
if (ret < 0)
- EDEB_ERR(4, "Can't map interrupt handler.");
+ ehca_err(ib_dev, "Can't map interrupt handler.");
tasklet_init(&eq->interrupt_task, ehca_tasklet_neq,
(long)shca);
}
eq->is_initialized = 1;
- EDEB_EX(7, "ret=%lx", ret);
-
return 0;
create_eq_exit2:
@@ -155,53 +148,25 @@ create_eq_exit2:
create_eq_exit1:
hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
- EDEB_EX(7, "ret=%lx", ret);
-
return -EINVAL;
}
void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
{
- unsigned long flags = 0;
- void *eqe = NULL;
-
- EDEB_EN(7, "shca=%p eq=%p", shca, eq);
- EHCA_CHECK_ADR_P(shca);
- EHCA_CHECK_EQ_P(eq);
+ unsigned long flags;
+ void *eqe;
spin_lock_irqsave(&eq->spinlock, flags);
eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
- EDEB_EX(7, "eq=%p eqe=%p", eq, eqe);
-
return eqe;
}
-void ehca_poll_eqs(unsigned long data)
-{
- struct ehca_shca *shca;
- struct ehca_module *module = (struct ehca_module*)data;
-
- spin_lock(&module->shca_lock);
- list_for_each_entry(shca, &module->shca_list, shca_list) {
- if (shca->eq.is_initialized)
- ehca_tasklet_eq((unsigned long)(void*)shca);
- }
- mod_timer(&module->timer, jiffies + HZ);
- spin_unlock(&module->shca_lock);
-
- return;
-}
-
int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
{
- unsigned long flags = 0;
- u64 h_ret = H_SUCCESS;
-
- EDEB_EN(7, "shca=%p eq=%p", shca, eq);
- EHCA_CHECK_ADR(shca);
- EHCA_CHECK_EQ(eq);
+ unsigned long flags;
+ u64 h_ret;
spin_lock_irqsave(&eq->spinlock, flags);
ibmebus_free_irq(NULL, eq->ist, (void *)shca);
@@ -211,12 +176,10 @@ int ehca_destroy_eq(struct ehca_shca *sh
spin_unlock_irqrestore(&eq->spinlock, flags);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "Can't free EQ resources.");
+ ehca_err(&shca->ib_device, "Can't free EQ resources.");
return -EINVAL;
}
ipz_queue_dtor(&eq->ipz_queue);
- EDEB_EX(7, "h_ret=%lx", h_ret);
-
- return h_ret;
+ return 0;
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_hca.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_hca.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_hca.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_hca.c 2006-08-30
20:00:16.000000000 +0200
@@ -39,36 +39,29 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#undef DEB_PREFIX
-#define DEB_PREFIX "shca"
-
#include "ehca_tools.h"
-
#include "hcp_if.h"
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr
*props)
{
int ret = 0;
- struct ehca_shca *shca;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+ ib_device);
struct hipz_query_hca *rblock;
- EDEB_EN(7, "");
-
- memset(props, 0, sizeof(struct ib_device_attr));
- shca = container_of(ibdev, struct ehca_shca, ib_device);
-
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!rblock) {
- EDEB_ERR(4, "Can't allocate rblock memory.");
- ret = -ENOMEM;
- goto query_device0;
+ ehca_err(&shca->ib_device, "Can't allocate rblock
memory.");
+ return -ENOMEM;
}
if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
- EDEB_ERR(4, "Can't query device properties");
+ ehca_err(&shca->ib_device, "Can't query device
properties");
ret = -EINVAL;
goto query_device1;
}
+
+ memset(props, 0, sizeof(struct ib_device_attr));
props->fw_ver = rblock->hw_ver;
props->max_mr_size = rblock->max_mr_size;
props->vendor_id = rblock->vendor_id >> 8;
@@ -105,9 +98,6 @@ int ehca_query_device(struct ib_device *
query_device1:
kfree(rblock);
-query_device0:
- EDEB_EX(7, "ret=%x", ret);
-
return ret;
}
@@ -115,27 +105,23 @@ int ehca_query_port(struct ib_device *ib
u8 port, struct ib_port_attr *props)
{
int ret = 0;
- struct ehca_shca *shca;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+ ib_device);
struct hipz_query_port *rblock;
- EDEB_EN(7, "port=%x", port);
-
- memset(props, 0, sizeof(struct ib_port_attr));
- shca = container_of(ibdev, struct ehca_shca, ib_device);
-
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!rblock) {
- EDEB_ERR(4, "Can't allocate rblock memory.");
- ret = -ENOMEM;
- goto query_port0;
+ ehca_err(&shca->ib_device, "Can't allocate rblock
memory.");
+ return -ENOMEM;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) !=
H_SUCCESS) {
- EDEB_ERR(4, "Can't query port properties");
+ ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_port1;
}
+ memset(props, 0, sizeof(struct ib_port_attr));
props->state = rblock->state;
switch (rblock->max_mtu) {
@@ -155,7 +141,9 @@ int ehca_query_port(struct ib_device *ib
props->active_mtu = props->max_mtu = IB_MTU_4096;
break;
default:
- EDEB_ERR(4, "Unknown MTU size: %x.", rblock->max_mtu);
+ ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
+ rblock->max_mtu);
+ break;
}
props->gid_tbl_len = rblock->gid_tbl_len;
@@ -176,37 +164,28 @@ int ehca_query_port(struct ib_device *ib
query_port1:
kfree(rblock);
-query_port0:
- EDEB_EX(7, "ret=%x", ret);
-
return ret;
}
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16
*pkey)
{
int ret = 0;
- struct ehca_shca *shca;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
- EDEB_EN(7, "port=%x index=%x", port, index);
-
if (index > 16) {
- EDEB_ERR(4, "Invalid index: %x.", index);
- ret = -EINVAL;
- goto query_pkey0;
+ ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+ return -EINVAL;
}
- shca = container_of(ibdev, struct ehca_shca, ib_device);
-
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!rblock) {
- EDEB_ERR(4, "Can't allocate rblock memory.");
- ret = -ENOMEM;
- goto query_pkey0;
+ ehca_err(&shca->ib_device, "Can't allocate rblock
memory.");
+ return -ENOMEM;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) !=
H_SUCCESS) {
- EDEB_ERR(4, "Can't query port properties");
+ ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_pkey1;
}
@@ -216,9 +195,6 @@ int ehca_query_pkey(struct ib_device *ib
query_pkey1:
kfree(rblock);
-query_pkey0:
- EDEB_EX(7, "ret=%x", ret);
-
return ret;
}
@@ -226,28 +202,23 @@ int ehca_query_gid(struct ib_device *ibd
int index, union ib_gid *gid)
{
int ret = 0;
- struct ehca_shca *shca;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+ ib_device);
struct hipz_query_port *rblock;
- EDEB_EN(7, "port=%x index=%x", port, index);
-
if (index > 255) {
- EDEB_ERR(4, "Invalid index: %x.", index);
- ret = -EINVAL;
- goto query_gid0;
+ ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+ return -EINVAL;
}
- shca = container_of(ibdev, struct ehca_shca, ib_device);
-
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!rblock) {
- EDEB_ERR(4, "Can't allocate rblock memory.");
- ret = -ENOMEM;
- goto query_gid0;
+ ehca_err(&shca->ib_device, "Can't allocate rblock
memory.");
+ return -ENOMEM;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) !=
H_SUCCESS) {
- EDEB_ERR(4, "Can't query port properties");
+ ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_gid1;
}
@@ -258,11 +229,6 @@ int ehca_query_gid(struct ib_device *ibd
query_gid1:
kfree(rblock);
-query_gid0:
- EDEB_EX(7, "ret=%x GID=%lx%lx", ret,
- *(u64 *) & gid->raw[0],
- *(u64 *) & gid->raw[8]);
-
return ret;
}
@@ -270,13 +236,6 @@ int ehca_modify_port(struct ib_device *i
u8 port, int port_modify_mask,
struct ib_port_modify *props)
{
- int ret = 0;
-
- EDEB_EN(7, "port=%x", port);
-
- /* Not implemented yet. */
-
- EDEB_EX(7, "ret=%x", ret);
-
- return ret;
+ /* Not implemented yet */
+ return -EFAULT;
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_irq.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_irq.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_irq.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_irq.c 2006-08-30
20:00:16.000000000 +0200
@@ -39,8 +39,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "eirq"
-
#include "ehca_classes.h"
#include "ehca_irq.h"
#include "ehca_iverbs.h"
@@ -64,15 +62,17 @@
#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
#define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
static void queue_comp_task(struct ehca_cq *__cq);
static struct ehca_comp_pool* pool;
static struct notifier_block comp_pool_callback_nb;
+#endif
+
static inline void comp_event_callback(struct ehca_cq *cq)
{
- EDEB_EN(7, "cq=%p", cq);
-
if (!cq->ib_cq.comp_handler)
return;
@@ -80,8 +80,6 @@ static inline void comp_event_callback(s
cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
spin_unlock(&cq->cb_lock);
- EDEB_EX(7, "cq=%p", cq);
-
return;
}
@@ -91,9 +89,6 @@ static void print_error_data(struct ehca
u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
u64 resource = rblock[1];
- EDEB_EN(7, "shca=%p data=%p rblock=%p length=%x",
- shca, data, rblock, length);
-
switch (type) {
case 0x1: /* Queue Pair */
{
@@ -103,7 +98,8 @@ static void print_error_data(struct ehca
if (rblock[6] == 0)
return;
- EDEB_ERR(4, "QP 0x%x (resource=%lx) has errors.",
+ ehca_err(&shca->ib_device,
+ "QP 0x%x (resource=%lx) has errors.",
qp->ib_qp.qp_num, resource);
break;
}
@@ -111,25 +107,25 @@ static void print_error_data(struct ehca
{
struct ehca_cq *cq = (struct ehca_cq*)data;
- EDEB_ERR(4, "CQ 0x%x (resource=%lx) has errors.",
+ ehca_err(&shca->ib_device,
+ "CQ 0x%x (resource=%lx) has errors.",
cq->cq_number, resource);
break;
}
default:
- EDEB_ERR(4, "Unknown errror type: %lx on %s.",
+ ehca_err(&shca->ib_device,
+ "Unknown errror type: %lx on %s.",
type, shca->ib_device.name);
break;
}
- EDEB_ERR(4, "Error data is available: %lx.", resource);
- EDEB_ERR(4, "EHCA ----- error data begin "
+ ehca_err(&shca->ib_device, "Error data is available: %lx.",
resource);
+ ehca_err(&shca->ib_device, "EHCA ----- error data begin "
"---------------------------------------------------");
- EDEB_DMP(4, rblock, length, "resource=%lx", resource);
- EDEB_ERR(4, "EHCA ----- error data end "
+ ehca_dmp(rblock, length, "resource=%lx", resource);
+ ehca_err(&shca->ib_device, "EHCA ----- error data end "
"----------------------------------------------------");
- EDEB_EX(7, "");
-
return;
}
@@ -137,15 +133,13 @@ int ehca_error_data(struct ehca_shca *sh
u64 resource)
{
- unsigned long ret = 0;
+ unsigned long ret;
u64 *rblock;
unsigned long block_count;
- EDEB_EN(7, "shca=%p data=%p resource=%lx", shca, data, resource);
-
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!rblock) {
- EDEB_ERR(4, "Cannot allocate rblock memory.");
+ ehca_err(&shca->ib_device, "Cannot allocate rblock
memory.");
ret = -ENOMEM;
goto error_data1;
}
@@ -156,7 +150,8 @@ int ehca_error_data(struct ehca_shca *sh
&block_count);
if (ret == H_R_STATE) {
- EDEB_ERR(4, "No error data is available: %lx.", resource);
+ ehca_err(&shca->ib_device,
+ "No error data is available: %lx.", resource);
}
else if (ret == H_SUCCESS) {
int length;
@@ -169,7 +164,8 @@ int ehca_error_data(struct ehca_shca *sh
print_error_data(shca, data, rblock, length);
}
else {
- EDEB_ERR(4, "Error data could not be fetched: %lx",
resource);
+ ehca_err(&shca->ib_device,
+ "Error data could not be fetched: %lx",
resource);
}
kfree(rblock);
@@ -188,8 +184,6 @@ static void qp_event_callback(struct ehc
unsigned long flags;
u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
- EDEB_EN(7, "eqe=%lx", eqe);
-
spin_lock_irqsave(&ehca_qp_idr_lock, flags);
qp = idr_find(&ehca_qp_idr, token);
spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
@@ -209,8 +203,6 @@ static void qp_event_callback(struct ehc
qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
- EDEB_EX(7, "qp=%p", qp);
-
return;
}
@@ -221,8 +213,6 @@ static void cq_event_callback(struct ehc
unsigned long flags;
u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
- EDEB_EN(7, "eqe=%lx", eqe);
-
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
cq = idr_find(&ehca_cq_idr, token);
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -232,8 +222,6 @@ static void cq_event_callback(struct ehc
ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
- EDEB_EX(7, "cq=%p", cq);
-
return;
}
@@ -241,8 +229,6 @@ static void parse_identifier(struct ehca
{
u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
- EDEB_EN(7, "shca=%p eqe=%lx", shca, eqe);
-
switch (identifier) {
case 0x02: /* path migrated */
qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
@@ -262,41 +248,39 @@ static void parse_identifier(struct ehca
cq_event_callback(shca, eqe);
break;
case 0x09: /* MRMWPTE error */
- EDEB_ERR(4, "MRMWPTE error.");
+ ehca_err(&shca->ib_device, "MRMWPTE error.");
break;
case 0x0A: /* port event */
- EDEB_ERR(4, "Port event.");
+ ehca_err(&shca->ib_device, "Port event.");
break;
case 0x0B: /* MR access error */
- EDEB_ERR(4, "MR access error.");
+ ehca_err(&shca->ib_device, "MR access error.");
break;
case 0x0C: /* EQ error */
- EDEB_ERR(4, "EQ error.");
+ ehca_err(&shca->ib_device, "EQ error.");
break;
case 0x0D: /* P/Q_Key mismatch */
- EDEB_ERR(4, "P/Q_Key mismatch.");
+ ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
break;
case 0x10: /* sampling complete */
- EDEB_ERR(4, "Sampling complete.");
+ ehca_err(&shca->ib_device, "Sampling complete.");
break;
case 0x11: /* unaffiliated access error */
- EDEB_ERR(4, "Unaffiliated access error.");
+ ehca_err(&shca->ib_device, "Unaffiliated access error.");
break;
case 0x12: /* path migrating error */
- EDEB_ERR(4, "Path migration error.");
+ ehca_err(&shca->ib_device, "Path migration error.");
break;
case 0x13: /* interface trace stopped */
- EDEB_ERR(4, "Interface trace stopped.");
+ ehca_err(&shca->ib_device, "Interface trace stopped.");
break;
case 0x14: /* first error capture info available */
default:
- EDEB_ERR(4, "Unknown identifier: %x on %s.",
+ ehca_err(&shca->ib_device, "Unknown identifier: %x on
%s.",
identifier, shca->ib_device.name);
break;
}
- EDEB_EX(7, "eqe=%lx identifier=%x", eqe, identifier);
-
return;
}
@@ -306,21 +290,19 @@ static void parse_ec(struct ehca_shca *s
u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
- EDEB_EN(7, "shca=%p eqe=%lx", shca, eqe);
-
switch (ec) {
case 0x30: /* port availability change */
if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
- EDEB(4, "%s: port %x is active.",
- shca->ib_device.name, port);
+ ehca_info(&shca->ib_device,
+ "port %x is active.", port);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ACTIVE;
event.element.port_num = port;
shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
ib_dispatch_event(&event);
} else {
- EDEB(4, "%s: port %x is inactive.",
- shca->ib_device.name, port);
+ ehca_info(&shca->ib_device,
+ "port %x is inactive.", port);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ERR;
event.element.port_num = port;
@@ -333,19 +315,19 @@ static void parse_ec(struct ehca_shca *s
* disruptive change is caused by
* LID, PKEY or SM change
*/
- EDEB(4, "EHCA disruptive port %x "
- "configuration change.", port);
+ ehca_warn(&shca->ib_device,
+ "disruptive port %x configuration change",
port);
- EDEB(4, "%s: port %x is inactive.",
- shca->ib_device.name, port);
+ ehca_info(&shca->ib_device,
+ "port %x is inactive.", port);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ERR;
event.element.port_num = port;
shca->sport[port - 1].port_state = IB_PORT_DOWN;
ib_dispatch_event(&event);
- EDEB(4, "%s: port %x is active.",
- shca->ib_device.name, port);
+ ehca_info(&shca->ib_device,
+ "port %x is active.", port);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ACTIVE;
event.element.port_num = port;
@@ -353,34 +335,27 @@ static void parse_ec(struct ehca_shca *s
ib_dispatch_event(&event);
break;
case 0x32: /* adapter malfunction */
- EDEB_ERR(4, "Adapter malfunction.");
+ ehca_err(&shca->ib_device, "Adapter malfunction.");
break;
case 0x33: /* trace stopped */
- EDEB_ERR(4, "Traced stopped.");
+ ehca_err(&shca->ib_device, "Traced stopped.");
break;
default:
- EDEB_ERR(4, "Unknown event code: %x on %s.",
+ ehca_err(&shca->ib_device, "Unknown event code: %x on
%s.",
ec, shca->ib_device.name);
break;
}
- EDEB_EN(7, "eqe=%lx ec=%x", eqe, ec);
-
return;
}
static inline void reset_eq_pending(struct ehca_cq *cq)
{
- u64 CQx_EP = 0;
+ u64 CQx_EP;
struct h_galpa gal = cq->galpas.kernel;
- EDEB_EN(7, "cq=%p", cq);
-
hipz_galpa_store_cq(gal, cqx_ep, 0x0);
CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
- EDEB(7, "CQx_EP=%lx", CQx_EP);
-
- EDEB_EX(7, "cq=%p", cq);
return;
}
@@ -389,12 +364,8 @@ irqreturn_t ehca_interrupt_neq(int irq,
{
struct ehca_shca *shca = (struct ehca_shca*)dev_id;
- EDEB_EN(7, "dev_id=%p", dev_id);
-
tasklet_hi_schedule(&shca->neq.interrupt_task);
- EDEB_EX(7, "");
-
return IRQ_HANDLED;
}
@@ -402,9 +373,7 @@ void ehca_tasklet_neq(unsigned long data
{
struct ehca_shca *shca = (struct ehca_shca*)data;
struct ehca_eqe *eqe;
- u64 ret = H_SUCCESS;
-
- EDEB_EN(7, "shca=%p", shca);
+ u64 ret;
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
@@ -419,9 +388,7 @@ void ehca_tasklet_neq(unsigned long data
shca->neq.ipz_eq_handle,
0xFFFFFFFFFFFFFFFFL);
if (ret != H_SUCCESS)
- EDEB_ERR(4, "Can't clear notification events.");
-
- EDEB_EX(7, "shca=%p", shca);
+ ehca_err(&shca->ib_device, "Can't clear notification
events.");
return;
}
@@ -430,12 +397,8 @@ irqreturn_t ehca_interrupt_eq(int irq, v
{
struct ehca_shca *shca = (struct ehca_shca*)dev_id;
- EDEB_EN(7, "dev_id=%p", dev_id);
-
tasklet_hi_schedule(&shca->eq.interrupt_task);
- EDEB_EX(7, "");
-
return IRQ_HANDLED;
}
@@ -446,8 +409,6 @@ void ehca_tasklet_eq(unsigned long data)
int int_state;
int query_cnt = 0;
- EDEB_EN(7, "shca=%p", shca);
-
do {
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
@@ -460,17 +421,18 @@ void ehca_tasklet_eq(unsigned long data)
while (eqe) {
u64 eqe_value = eqe->entry;
- EDEB(7, "eqe_value=%lx", eqe_value);
+ ehca_dbg(&shca->ib_device,
+ "eqe_value=%lx", eqe_value);
/* TODO: better structure */
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT,
eqe_value)) {
- extern struct idr ehca_cq_idr;
unsigned long flags;
u32 token;
struct ehca_cq *cq;
- EDEB(6, "... completion event");
+ ehca_dbg(&shca->ib_device,
+ "... completion event");
token =
EHCA_BMASK_GET(EQE_CQ_TOKEN,
eqe_value);
@@ -494,7 +456,8 @@ void ehca_tasklet_eq(unsigned long data)
comp_event_callback(cq);
#endif
} else {
- EDEB(6, "... non completion
event");
+ ehca_dbg(&shca->ib_device,
+ "... non completion
event");
parse_identifier(shca, eqe_value);
}
eqe =
@@ -518,29 +481,25 @@ void ehca_tasklet_eq(unsigned long data)
}
} while (int_state != 0);
- EDEB_EX(7, "shca=%p", shca);
-
return;
}
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
{
unsigned long flags_last_cpu;
- EDEB_DMP(7, &cpu_online_map, sizeof(cpumask_t), "");
+ if (ehca_debug_level)
+ ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu);
pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map);
-
if (pool->last_cpu == NR_CPUS)
- pool->last_cpu = 0;
- if (!cpu_online(pool->last_cpu))
- pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map);
-
+ pool->last_cpu = first_cpu(cpu_online_map);
spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu);
- // return pool->last_cpu;
- return 1;
+ return pool->last_cpu;
}
static void __queue_comp_task(struct ehca_cq *__cq,
@@ -549,8 +508,6 @@ static void __queue_comp_task(struct ehc
unsigned long flags_cct;
unsigned long flags_cq;
- EDEB_EN(7, "__cq=%p cct=%p", __cq, cct);
-
spin_lock_irqsave(&cct->task_lock, flags_cct);
spin_lock_irqsave(&__cq->task_lock, flags_cq);
@@ -565,10 +522,6 @@ static void __queue_comp_task(struct ehc
spin_unlock_irqrestore(&__cq->task_lock, flags_cq);
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
-
-
- EDEB_EX(7, "");
-
}
static void queue_comp_task(struct ehca_cq *__cq)
@@ -580,10 +533,6 @@ static void queue_comp_task(struct ehca_
cpu = get_cpu();
cpu_id = find_next_online_cpu(pool);
- EDEB_EN(7, "pool=%p cq=%p cq_nr=%x CPU=%x:%x:%x:%x",
- pool, __cq, __cq->cq_number,
- cpu, cpu_id, num_online_cpus(), num_possible_cpus());
-
BUG_ON(!cpu_online(cpu_id));
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
@@ -597,20 +546,15 @@ static void queue_comp_task(struct ehca_
put_cpu();
- EDEB_EX(7, "cct=%p", cct);
-
return;
}
static void run_comp_task(struct ehca_cpu_comp_task* cct)
{
- struct ehca_cq *cq = NULL;
+ struct ehca_cq *cq;
unsigned long flags_cct;
unsigned long flags_cq;
-
- EDEB_EN(7, "cct=%p", cct);
-
spin_lock_irqsave(&cct->task_lock, flags_cct);
while (!list_empty(&cct->cq_list)) {
@@ -631,8 +575,6 @@ static void run_comp_task(struct ehca_cp
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
- EDEB_EX(7, "cct=%p cq=%p", cct, cq);
-
return;
}
@@ -641,8 +583,6 @@ static int comp_task(void *__cct)
struct ehca_cpu_comp_task* cct = __cct;
DECLARE_WAITQUEUE(wait, current);
- EDEB_EN(7, "cct=%p", cct);
-
set_current_state(TASK_INTERRUPTIBLE);
while(!kthread_should_stop()) {
add_wait_queue(&cct->wait_queue, &wait);
@@ -661,8 +601,6 @@ static int comp_task(void *__cct)
}
__set_current_state(TASK_RUNNING);
- EDEB_EX(7, "");
-
return 0;
}
@@ -671,16 +609,12 @@ static struct task_struct *create_comp_t
{
struct ehca_cpu_comp_task *cct;
- EDEB_EN(7, "cpu=%d:%d", cpu, NR_CPUS);
-
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
spin_lock_init(&cct->task_lock);
INIT_LIST_HEAD(&cct->cq_list);
init_waitqueue_head(&cct->wait_queue);
cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
- EDEB_EX(7, "cct/%d=%p", cpu, cct);
-
return cct->task;
}
@@ -691,8 +625,6 @@ static void destroy_comp_task(struct ehc
struct task_struct *task;
unsigned long flags_cct;
- EDEB_EN(7, "pool=%p cpu=%d:%d", pool, cpu, NR_CPUS);
-
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
spin_lock_irqsave(&cct->task_lock, flags_cct);
@@ -706,8 +638,6 @@ static void destroy_comp_task(struct ehc
if (task)
kthread_stop(task);
- EDEB_EX(7, "");
-
return;
}
@@ -719,8 +649,6 @@ static void take_over_work(struct ehca_c
struct ehca_cq *cq;
unsigned long flags_cct;
- EDEB_EN(7, "cpu=%x", cpu);
-
spin_lock_irqsave(&cct->task_lock, flags_cct);
list_splice_init(&cct->cq_list, &list);
@@ -735,8 +663,6 @@ static void take_over_work(struct ehca_c
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
- EDEB_EX(7, "");
-
}
static int comp_pool_callback(struct notifier_block *nfb,
@@ -746,55 +672,50 @@ static int comp_pool_callback(struct not
unsigned int cpu = (unsigned long)hcpu;
struct ehca_cpu_comp_task *cct;
- EDEB_EN(7, "CPU number changed (action=%lx)", action);
-
switch (action) {
case CPU_UP_PREPARE:
- EDEB(4, "CPU: %x (CPU_PREPARE)", cpu);
+ ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
if(!create_comp_task(pool, cpu)) {
- EDEB_ERR(4, "Can't create comp_task for cpu: %x",
cpu);
+ ehca_gen_err("Can't create comp_task for cpu: %x",
cpu);
return NOTIFY_BAD;
}
break;
case CPU_UP_CANCELED:
- EDEB(4, "CPU: %x (CPU_CANCELED)", cpu);
+ ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
kthread_bind(cct->task, any_online_cpu(cpu_online_map));
destroy_comp_task(pool, cpu);
break;
case CPU_ONLINE:
- EDEB(4, "CPU: %x (CPU_ONLINE)", cpu);
+ ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
kthread_bind(cct->task, cpu);
wake_up_process(cct->task);
break;
case CPU_DOWN_PREPARE:
- EDEB(4, "CPU: %x (CPU_DOWN_PREPARE)", cpu);
+ ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
break;
case CPU_DOWN_FAILED:
- EDEB(4, "CPU: %x (CPU_DOWN_FAILED)", cpu);
+ ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
break;
case CPU_DEAD:
- EDEB(4, "CPU: %x (CPU_DEAD)", cpu);
+ ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
destroy_comp_task(pool, cpu);
take_over_work(pool, cpu);
break;
}
- EDEB_EX(7, "CPU number changed");
-
return NOTIFY_OK;
}
+#endif
+
int ehca_create_comp_pool(void)
{
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
int cpu;
struct task_struct *task;
- EDEB_EN(7, "");
-
-
pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
if (pool == NULL)
return -ENOMEM;
@@ -819,8 +740,6 @@ int ehca_create_comp_pool(void)
comp_pool_callback_nb.notifier_call = comp_pool_callback;
comp_pool_callback_nb.priority =0;
register_cpu_notifier(&comp_pool_callback_nb);
-
- EDEB_EX(7, "pool=%p", pool);
#endif
return 0;
@@ -831,16 +750,12 @@ void ehca_destroy_comp_pool(void)
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
int i;
- EDEB_EN(7, "pool=%p", pool);
-
unregister_cpu_notifier(&comp_pool_callback_nb);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
destroy_comp_task(pool, i);
}
-
- EDEB_EN(7, "");
#endif
return;
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_main.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_main.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_main.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_main.c 2006-08-30
20:00:17.000000000 +0200
@@ -4,6 +4,7 @@
* module start stop, hca detection
*
* Authors: Heiko J Schick <schickhj at de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen at de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
@@ -38,8 +39,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "shca"
-
#include "ehca_classes.h"
#include "ehca_iverbs.h"
#include "ehca_mrmw.h"
@@ -49,10 +48,10 @@
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch at de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION("SVNEHCA_0012");
+MODULE_VERSION("SVNEHCA_0015");
int ehca_open_aqp1 = 0;
-int ehca_debug_level = -1;
+int ehca_debug_level = 0;
int ehca_hw_level = 0;
int ehca_nr_ports = 2;
int ehca_use_hp_mr = 0;
@@ -73,7 +72,7 @@ MODULE_PARM_DESC(open_aqp1,
"AQP1 on startup (0: no (default), 1: yes)");
MODULE_PARM_DESC(debug_level,
"debug level"
- " (0: node, 6: only errors (default), 9: all)");
+ " (0: no debug traces (default), 1: with debug traces)");
MODULE_PARM_DESC(hw_level,
"hardware level"
" (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
@@ -89,170 +88,74 @@ MODULE_PARM_DESC(poll_all_eqs,
MODULE_PARM_DESC(static_rate,
"set permanent static rate (default: disabled)");
-/*
- * This external trace mask controls what will end up in the
- * kernel ring buffer. Number 6 means, that everything between
- * 0 and 5 will be stored.
- */
-u8 ehca_edeb_mask[EHCA_EDEB_TRACE_MASK_SIZE]={6, 6, 6, 6,
- 6, 6, 6, 6,
- 6, 6, 6, 6,
- 6, 6, 6, 6,
- 6, 6, 6, 6,
- 6, 6, 6, 6,
- 6, 6, 6, 6,
- 6, 6, 0, 0};
-
spinlock_t ehca_qp_idr_lock;
spinlock_t ehca_cq_idr_lock;
DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
-struct ehca_module ehca_module;
-
-void ehca_init_trace(void)
-{
- EDEB_EN(7, "");
+static struct list_head shca_list; /* list of all registered ehcas */
+static spinlock_t shca_list_lock;
- if (ehca_debug_level != -1) {
- int i;
- for (i = 0; i < EHCA_EDEB_TRACE_MASK_SIZE; i++)
- ehca_edeb_mask[i] = ehca_debug_level;
- }
-
- EDEB_EX(7, "");
-}
+static struct timer_list poll_eqs_timer;
-int ehca_create_slab_caches(struct ehca_module *ehca_module)
+static int ehca_create_slab_caches(void)
{
- int ret = 0;
-
- EDEB_EN(7, "");
+ int ret;
- ehca_module->cache_pd =
- kmem_cache_create("ehca_cache_pd",
- sizeof(struct ehca_pd),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!ehca_module->cache_pd) {
- EDEB_ERR(4, "Cannot create PD SLAB cache.");
- ret = -ENOMEM;
- goto create_slab_caches1;
+ ret = ehca_init_pd_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create PD SLAB cache.");
+ return ret;
}
- ehca_module->cache_cq =
- kmem_cache_create("ehca_cache_cq",
- sizeof(struct ehca_cq),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!ehca_module->cache_cq) {
- EDEB_ERR(4, "Cannot create CQ SLAB cache.");
- ret = -ENOMEM;
+ ret = ehca_init_cq_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create CQ SLAB cache.");
goto create_slab_caches2;
}
- ehca_module->cache_qp =
- kmem_cache_create("ehca_cache_qp",
- sizeof(struct ehca_qp),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!ehca_module->cache_qp) {
- EDEB_ERR(4, "Cannot create QP SLAB cache.");
- ret = -ENOMEM;
+ ret = ehca_init_qp_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create QP SLAB cache.");
goto create_slab_caches3;
}
- ehca_module->cache_av =
- kmem_cache_create("ehca_cache_av",
- sizeof(struct ehca_av),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!ehca_module->cache_av) {
- EDEB_ERR(4, "Cannot create AV SLAB cache.");
- ret = -ENOMEM;
+ ret = ehca_init_av_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create AV SLAB cache.");
goto create_slab_caches4;
}
- ehca_module->cache_mw =
- kmem_cache_create("ehca_cache_mw",
- sizeof(struct ehca_mw),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!ehca_module->cache_mw) {
- EDEB_ERR(4, "Cannot create MW SLAB cache.");
- ret = -ENOMEM;
+ ret = ehca_init_mrmw_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create MR&MW SLAB cache.");
goto create_slab_caches5;
}
- ehca_module->cache_mr =
- kmem_cache_create("ehca_cache_mr",
- sizeof(struct ehca_mr),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!ehca_module->cache_mr) {
- EDEB_ERR(4, "Cannot create MR SLAB cache.");
- ret = -ENOMEM;
- goto create_slab_caches6;
- }
-
- EDEB_EX(7, "ret=%x", ret);
-
- return ret;
-
-create_slab_caches6:
- kmem_cache_destroy(ehca_module->cache_mw);
+ return 0;
create_slab_caches5:
- kmem_cache_destroy(ehca_module->cache_av);
+ ehca_cleanup_av_cache();
create_slab_caches4:
- kmem_cache_destroy(ehca_module->cache_qp);
+ ehca_cleanup_qp_cache();
create_slab_caches3:
- kmem_cache_destroy(ehca_module->cache_cq);
+ ehca_cleanup_cq_cache();
create_slab_caches2:
- kmem_cache_destroy(ehca_module->cache_pd);
-
-create_slab_caches1:
- EDEB_EX(7, "ret=%x", ret);
+ ehca_cleanup_pd_cache();
return ret;
}
-int ehca_destroy_slab_caches(struct ehca_module *ehca_module)
+static void ehca_destroy_slab_caches(void)
{
- int ret;
-
- EDEB_EN(7, "");
-
- ret = kmem_cache_destroy(ehca_module->cache_pd);
- if (ret)
- EDEB_ERR(4, "Cannot destroy PD SLAB cache. ret=%x", ret);
-
- ret = kmem_cache_destroy(ehca_module->cache_cq);
- if (ret)
- EDEB_ERR(4, "Cannot destroy CQ SLAB cache. ret=%x", ret);
-
- ret = kmem_cache_destroy(ehca_module->cache_qp);
- if (ret)
- EDEB_ERR(4, "Cannot destroy QP SLAB cache. ret=%x", ret);
-
- ret = kmem_cache_destroy(ehca_module->cache_av);
- if (ret)
- EDEB_ERR(4, "Cannot destroy AV SLAB cache. ret=%x", ret);
-
- ret = kmem_cache_destroy(ehca_module->cache_mw);
- if (ret)
- EDEB_ERR(4, "Cannot destroy MW SLAB cache. ret=%x", ret);
-
- ret = kmem_cache_destroy(ehca_module->cache_mr);
- if (ret)
- EDEB_ERR(4, "Cannot destroy MR SLAB cache. ret=%x", ret);
-
- EDEB_EX(7, "");
-
- return 0;
+ ehca_cleanup_mrmw_cache();
+ ehca_cleanup_av_cache();
+ ehca_cleanup_qp_cache();
+ ehca_cleanup_cq_cache();
+ ehca_cleanup_pd_cache();
}
#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
@@ -260,22 +163,20 @@ int ehca_destroy_slab_caches(struct ehca
int ehca_sense_attributes(struct ehca_shca *shca)
{
- int ret = -EINVAL;
- u64 h_ret = H_SUCCESS;
+ int ret = 0;
+ u64 h_ret;
struct hipz_query_hca *rblock;
- EDEB_EN(7, "shca=%p", shca);
-
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!rblock) {
- EDEB_ERR(4, "Cannot allocate rblock memory.");
- ret = -ENOMEM;
- goto num_ports0;
+ ehca_gen_err("Cannot allocate rblock memory.");
+ return -ENOMEM;
}
h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "Cannot query device properties. h_ret=%lx",
h_ret);
+ ehca_gen_err("Cannot query device properties. h_ret=%lx",
+ h_ret);
ret = -EPERM;
goto num_ports1;
}
@@ -285,7 +186,7 @@ int ehca_sense_attributes(struct ehca_sh
else
shca->num_ports = (u8)rblock->num_ports;
- EDEB(6, " ... found %x ports", rblock->num_ports);
+ ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
if (ehca_hw_level == 0) {
u32 hcaaver;
@@ -294,8 +195,7 @@ int ehca_sense_attributes(struct ehca_sh
hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
- EDEB(6, " ... hardware version=%x:%x",
- hcaaver, revid);
+ ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver,
revid);
if ((hcaaver == 1) && (revid == 0))
shca->hw_level = 0;
@@ -304,58 +204,43 @@ int ehca_sense_attributes(struct ehca_sh
else if ((hcaaver == 1) && (revid == 2))
shca->hw_level = 2;
}
- EDEB(6, " ... hardware level=%x", shca->hw_level);
+ ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
shca->sport[0].rate = IB_RATE_30_GBPS;
shca->sport[1].rate = IB_RATE_30_GBPS;
- ret = 0;
-
num_ports1:
kfree(rblock);
-
-num_ports0:
- EDEB_EX(7, "ret=%x", ret);
-
return ret;
}
-static int init_node_guid(struct ehca_shca* shca)
+static int init_node_guid(struct ehca_shca *shca)
{
int ret = 0;
struct hipz_query_hca *rblock;
- EDEB_EN(7, "");
-
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!rblock) {
- EDEB_ERR(4, "Can't allocate rblock memory.");
- ret = -ENOMEM;
- goto init_node_guid0;
+ ehca_err(&shca->ib_device, "Can't allocate rblock
memory.");
+ return -ENOMEM;
}
if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
- EDEB_ERR(4, "Can't query device properties");
+ ehca_err(&shca->ib_device, "Can't query device
properties");
ret = -EINVAL;
goto init_node_guid1;
}
- memcpy(&shca->ib_device.node_guid, &rblock->node_guid,
(sizeof(u64)));
+ memcpy(&shca->ib_device.node_guid, &rblock->node_guid,
sizeof(u64));
init_node_guid1:
kfree(rblock);
-
-init_node_guid0:
- EDEB_EX(7, "node_guid=%lx ret=%x", shca->ib_device.node_guid,
ret);
-
return ret;
}
int ehca_register_device(struct ehca_shca *shca)
{
- int ret = 0;
-
- EDEB_EN(7, "shca=%p", shca);
+ int ret;
ret = init_node_guid(shca);
if (ret)
@@ -383,7 +268,7 @@ int ehca_register_device(struct ehca_shc
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
- shca->ib_device.node_type = IB_NODE_CA;
+ shca->ib_device.node_type = RDMA_NODE_IB_CA;
shca->ib_device.phys_port_cnt = shca->num_ports;
shca->ib_device.dma_device =
&shca->ibmebus_dev->ofdev.dev;
shca->ib_device.query_device = ehca_query_device;
@@ -432,38 +317,35 @@ int ehca_register_device(struct ehca_shc
shca->ib_device.mmap = ehca_mmap;
ret = ib_register_device(&shca->ib_device);
-
- EDEB_EX(7, "ret=%x", ret);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "ib_register_device() failed ret=%x", ret);
return ret;
}
static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
{
- struct ehca_sport *sport;
+ struct ehca_sport *sport = &shca->sport[port - 1];
struct ib_cq *ibcq;
struct ib_qp *ibqp;
struct ib_qp_init_attr qp_init_attr;
- int ret = 0;
-
- EDEB_EN(7, "shca=%p port=%x", shca, port);
-
- sport = &shca->sport[port - 1];
+ int ret;
if (sport->ibcq_aqp1) {
- EDEB_ERR(4, "AQP1 CQ is already created.");
+ ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
return -EPERM;
}
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1),
10);
if (IS_ERR(ibcq)) {
- EDEB_ERR(4, "Cannot create AQP1 CQ.");
+ ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
return PTR_ERR(ibcq);
}
sport->ibcq_aqp1 = ibcq;
if (sport->ibqp_aqp1) {
- EDEB_ERR(4, "AQP1 QP is already created.");
+ ehca_err(&shca->ib_device, "AQP1 QP is already created.");
ret = -EPERM;
goto create_aqp1;
}
@@ -484,84 +366,62 @@ static int ehca_create_aqp1(struct ehca_
ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
if (IS_ERR(ibqp)) {
- EDEB_ERR(4, "Cannot create AQP1 QP.");
+ ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
ret = PTR_ERR(ibqp);
goto create_aqp1;
}
sport->ibqp_aqp1 = ibqp;
- goto create_aqp0;
+ return 0;
create_aqp1:
ib_destroy_cq(sport->ibcq_aqp1);
-
-create_aqp0:
- EDEB_EX(7, "ret=%x", ret);
-
return ret;
}
static int ehca_destroy_aqp1(struct ehca_sport *sport)
{
- int ret = 0;
-
- EDEB_EN(7, "sport=%p", sport);
+ int ret;
ret = ib_destroy_qp(sport->ibqp_aqp1);
if (ret) {
- EDEB_ERR(4, "Cannot destroy AQP1 QP. ret=%x", ret);
- goto destroy_aqp1;
+ ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret);
+ return ret;
}
ret = ib_destroy_cq(sport->ibcq_aqp1);
if (ret)
- EDEB_ERR(4, "Cannot destroy AQP1 CQ. ret=%x", ret);
-
-destroy_aqp1:
- EDEB_EX(7, "ret=%x", ret);
+ ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret);
return ret;
}
-static ssize_t ehca_show_debug_mask(struct device_driver *ddp, char *buf)
+static ssize_t ehca_show_debug_level(struct device_driver *ddp, char
*buf)
{
- int i;
- int total = 0;
- total += snprintf(buf + total, PAGE_SIZE - total, "%d",
- ehca_edeb_mask[0]);
- for (i = 1; i < EHCA_EDEB_TRACE_MASK_SIZE; i++) {
- total += snprintf(buf + total, PAGE_SIZE - total, "%d",
- ehca_edeb_mask[i]);
- }
-
- total += snprintf(buf + total, PAGE_SIZE - total, "\n");
-
- return total;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ehca_debug_level);
}
-static ssize_t ehca_store_debug_mask(struct device_driver *ddp,
- const char *buf, size_t count)
+static ssize_t ehca_store_debug_level(struct device_driver *ddp,
+ const char *buf, size_t count)
{
- int i;
- for (i = 0; i < EHCA_EDEB_TRACE_MASK_SIZE; i++) {
- char value = buf[i] - '0';
- if ((value <= 9) && (count >= i)) {
- ehca_edeb_mask[i] = value;
- }
- }
- return count;
+ int value = (*buf) - '0';
+ if (value >= 0 && value <= 9)
+ ehca_debug_level = value;
+ return 1;
}
-DRIVER_ATTR(debug_mask, S_IRUSR | S_IWUSR,
- ehca_show_debug_mask, ehca_store_debug_mask);
+
+DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
+ ehca_show_debug_level, ehca_store_debug_level);
void ehca_create_driver_sysfs(struct ibmebus_driver *drv)
{
- driver_create_file(&drv->driver, &driver_attr_debug_mask);
+ driver_create_file(&drv->driver, &driver_attr_debug_level);
}
void ehca_remove_driver_sysfs(struct ibmebus_driver *drv)
{
- driver_remove_file(&drv->driver, &driver_attr_debug_mask);
+ driver_remove_file(&drv->driver, &driver_attr_debug_level);
}
#define EHCA_RESOURCE_ATTR(name) \
@@ -577,14 +437,14 @@ static ssize_t ehca_show_##name(struct
\
rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \
if (!rblock) { \
- EDEB_ERR(4, "Can't allocate rblock memory."); \
+ dev_err(dev, "Can't allocate rblock memory."); \
return 0; \
} \
\
if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
\
- EDEB_ERR(4, "Can't query device properties"); \
- kfree(rblock); \
- return 0; \
+ dev_err(dev, "Can't query device properties"); \
+ kfree(rblock); \
+ return 0; \
} \
\
data = rblock->name; \
@@ -669,26 +529,24 @@ static int __devinit ehca_probe(struct i
struct ehca_shca *shca;
u64 *handle;
struct ib_pd *ibpd;
- int ret = 0;
-
- EDEB_EN(7, "");
+ int ret;
handle = (u64 *)get_property(dev->ofdev.node, "ibm,hca-handle",
NULL);
if (!handle) {
- EDEB_ERR(4, "Cannot get eHCA handle for adapter: %s.",
- dev->ofdev.node->full_name);
+ ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
+ dev->ofdev.node->full_name);
return -ENODEV;
}
if (!(*handle)) {
- EDEB_ERR(4, "Wrong eHCA handle for adapter: %s.",
- dev->ofdev.node->full_name);
+ ehca_gen_err("Wrong eHCA handle for adapter: %s.",
+ dev->ofdev.node->full_name);
return -ENODEV;
}
shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
- if (shca == NULL) {
- EDEB_ERR(4, "Cannot allocate shca memory.");
+ if (!shca) {
+ ehca_gen_err("Cannot allocate shca memory.");
return -ENOMEM;
}
@@ -698,29 +556,35 @@ static int __devinit ehca_probe(struct i
ret = ehca_sense_attributes(shca);
if (ret < 0) {
- EDEB_ERR(4, "Cannot sense eHCA attributes.");
+ ehca_gen_err("Cannot sense eHCA attributes.");
+ goto probe1;
+ }
+
+ ret = ehca_register_device(shca);
+ if (ret) {
+ ehca_gen_err("Cannot register Infiniband device");
goto probe1;
}
/* create event queues */
ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048);
if (ret) {
- EDEB_ERR(4, "Cannot create EQ.");
- goto probe1;
+ ehca_err(&shca->ib_device, "Cannot create EQ.");
+ goto probe2;
}
ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
if (ret) {
- EDEB_ERR(4, "Cannot create NEQ.");
- goto probe2;
+ ehca_err(&shca->ib_device, "Cannot create NEQ.");
+ goto probe3;
}
/* create internal protection domain */
ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
if (IS_ERR(ibpd)) {
- EDEB_ERR(4, "Cannot create internal PD.");
+ ehca_err(&shca->ib_device, "Cannot create internal PD.");
ret = PTR_ERR(ibpd);
- goto probe3;
+ goto probe4;
}
shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
@@ -730,13 +594,8 @@ static int __devinit ehca_probe(struct i
ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
if (ret) {
- EDEB_ERR(4, "Cannot create internal MR. ret=%x", ret);
- goto probe4;
- }
-
- ret = ehca_register_device(shca);
- if (ret) {
- EDEB_ERR(4, "Cannot register Infiniband device.");
+ ehca_err(&shca->ib_device, "Cannot create internal MR
ret=%x",
+ ret);
goto probe5;
}
@@ -745,7 +604,8 @@ static int __devinit ehca_probe(struct i
shca->sport[0].port_state = IB_PORT_DOWN;
ret = ehca_create_aqp1(shca, 1);
if (ret) {
- EDEB_ERR(4, "Cannot create AQP1 for port 1.");
+ ehca_err(&shca->ib_device,
+ "Cannot create AQP1 for port 1.");
goto probe6;
}
}
@@ -755,54 +615,56 @@ static int __devinit ehca_probe(struct i
shca->sport[1].port_state = IB_PORT_DOWN;
ret = ehca_create_aqp1(shca, 2);
if (ret) {
- EDEB_ERR(4, "Cannot create AQP1 for port 2.");
+ ehca_err(&shca->ib_device,
+ "Cannot create AQP1 for port 2.");
goto probe7;
}
}
ehca_create_device_sysfs(dev);
- spin_lock(&ehca_module.shca_lock);
- list_add(&shca->shca_list, &ehca_module.shca_list);
- spin_unlock(&ehca_module.shca_lock);
-
- EDEB_EX(7, "ret=%x", ret);
+ spin_lock(&shca_list_lock);
+ list_add(&shca->shca_list, &shca_list);
+ spin_unlock(&shca_list_lock);
return 0;
probe7:
ret = ehca_destroy_aqp1(&shca->sport[0]);
if (ret)
- EDEB_ERR(4, "Cannot destroy AQP1 for port 1. ret=%x",
ret);
+ ehca_err(&shca->ib_device,
+ "Cannot destroy AQP1 for port 1. ret=%x", ret);
probe6:
- ib_unregister_device(&shca->ib_device);
+ ret = ehca_dereg_internal_maxmr(shca);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal MR. ret=%x", ret);
probe5:
- ret = ehca_dereg_internal_maxmr(shca);
+ ret = ehca_dealloc_pd(&shca->pd->ib_pd);
if (ret)
- EDEB_ERR(4, "Cannot destroy internal MR. ret=%x", ret);
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal PD. ret=%x", ret);
probe4:
- ret = ehca_dealloc_pd(&shca->pd->ib_pd);
- if (ret != 0)
- EDEB_ERR(4, "Cannot destroy internal PD. ret=%x", ret);
+ ret = ehca_destroy_eq(shca, &shca->neq);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy NEQ. ret=%x", ret);
probe3:
- ret = ehca_destroy_eq(shca, &shca->neq);
- if (ret != 0)
- EDEB_ERR(4, "Cannot destroy NEQ. ret=%x", ret);
+ ret = ehca_destroy_eq(shca, &shca->eq);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy EQ. ret=%x", ret);
probe2:
- ret = ehca_destroy_eq(shca, &shca->eq);
- if (ret != 0)
- EDEB_ERR(4, "Cannot destroy EQ. ret=%x", ret);
+ ib_unregister_device(&shca->ib_device);
probe1:
ib_dealloc_device(&shca->ib_device);
- EDEB_EX(4, "ret=%x", ret);
-
return -EINVAL;
}
@@ -811,18 +673,16 @@ static int __devexit ehca_remove(struct
struct ehca_shca *shca = dev->ofdev.dev.driver_data;
int ret;
- EDEB_EN(7, "shca=%p", shca);
-
ehca_remove_device_sysfs(dev);
if (ehca_open_aqp1 == 1) {
int i;
-
for (i = 0; i < shca->num_ports; i++) {
ret = ehca_destroy_aqp1(&shca->sport[i]);
- if (ret != 0)
- EDEB_ERR(4, "Cannot destroy AQP1 for port
%x."
- " ret=%x", ret, i);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy AQP1 for port %x
"
+ "ret=%x", ret, i);
}
}
@@ -830,27 +690,27 @@ static int __devexit ehca_remove(struct
ret = ehca_dereg_internal_maxmr(shca);
if (ret)
- EDEB_ERR(4, "Cannot destroy internal MR. ret=%x", ret);
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal MR. ret=%x", ret);
ret = ehca_dealloc_pd(&shca->pd->ib_pd);
if (ret)
- EDEB_ERR(4, "Cannot destroy internal PD. ret=%x", ret);
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal PD. ret=%x", ret);
ret = ehca_destroy_eq(shca, &shca->eq);
if (ret)
- EDEB_ERR(4, "Cannot destroy EQ. ret=%x", ret);
+ ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%x",
ret);
ret = ehca_destroy_eq(shca, &shca->neq);
if (ret)
- EDEB_ERR(4, "Canot destroy NEQ. ret=%x", ret);
+ ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%x",
ret);
ib_dealloc_device(&shca->ib_device);
- spin_lock(&ehca_module.shca_lock);
+ spin_lock(&shca_list_lock);
list_del(&shca->shca_list);
- spin_unlock(&ehca_module.shca_lock);
-
- EDEB_EX(7, "ret=%x", ret);
+ spin_unlock(&shca_list_lock);
return ret;
}
@@ -871,37 +731,46 @@ static struct ibmebus_driver ehca_driver
.remove = ehca_remove,
};
+void ehca_poll_eqs(unsigned long data)
+{
+ struct ehca_shca *shca;
+
+ spin_lock(&shca_list_lock);
+ list_for_each_entry(shca, &shca_list, shca_list) {
+ if (shca->eq.is_initialized)
+ ehca_tasklet_eq((unsigned long)(void*)shca);
+ }
+ mod_timer(&poll_eqs_timer, jiffies + HZ);
+ spin_unlock(&shca_list_lock);
+}
+
int __init ehca_module_init(void)
{
- int ret = 0;
+ int ret;
printk(KERN_INFO "eHCA Infiniband Device Driver "
- "(Rel.: SVNEHCA_0012)\n");
- EDEB_EN(7, "");
-
+ "(Rel.: SVNEHCA_0015)\n");
idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock);
spin_lock_init(&ehca_cq_idr_lock);
- INIT_LIST_HEAD(&ehca_module.shca_list);
- spin_lock_init(&ehca_module.shca_lock);
-
- ehca_init_trace();
+ INIT_LIST_HEAD(&shca_list);
+ spin_lock_init(&shca_list_lock);
if ((ret = ehca_create_comp_pool())) {
- EDEB_ERR(4, "Cannot create comp pool.");
- goto module_init0;
+ ehca_gen_err("Cannot create comp pool.");
+ return ret;
}
- if ((ret = ehca_create_slab_caches(&ehca_module))) {
- EDEB_ERR(4, "Cannot create SLAB caches");
+ if ((ret = ehca_create_slab_caches())) {
+ ehca_gen_err("Cannot create SLAB caches");
ret = -ENOMEM;
goto module_init1;
}
if ((ret = ibmebus_register_driver(&ehca_driver))) {
- EDEB_ERR(4, "Cannot register eHCA device driver");
+ ehca_gen_err("Cannot register eHCA device driver");
ret = -EINVAL;
goto module_init2;
}
@@ -909,49 +778,39 @@ int __init ehca_module_init(void)
ehca_create_driver_sysfs(&ehca_driver);
if (ehca_poll_all_eqs != 1) {
- EDEB_ERR(4, "WARNING!!!");
- EDEB_ERR(4, "It is possible to lose interrupts.");
+ ehca_gen_err("WARNING!!!");
+ ehca_gen_err("It is possible to lose interrupts.");
} else {
- init_timer(&ehca_module.timer);
- ehca_module.timer.function = ehca_poll_eqs;
- ehca_module.timer.data = (unsigned long)&ehca_module;
- ehca_module.timer.expires = jiffies + HZ;
- add_timer(&ehca_module.timer);
+ init_timer(&poll_eqs_timer);
+ poll_eqs_timer.function = ehca_poll_eqs;
+ poll_eqs_timer.expires = jiffies + HZ;
+ add_timer(&poll_eqs_timer);
}
- goto module_init0;
+ return 0;
module_init2:
- ehca_destroy_slab_caches(&ehca_module);
+ ehca_destroy_slab_caches();
module_init1:
ehca_destroy_comp_pool();
-
-module_init0:
- EDEB_EX(7, "ret=%x", ret);
-
return ret;
};
void __exit ehca_module_exit(void)
{
- EDEB_EN(7, "");
-
if (ehca_poll_all_eqs == 1)
- del_timer_sync(&ehca_module.timer);
+ del_timer_sync(&poll_eqs_timer);
ehca_remove_driver_sysfs(&ehca_driver);
ibmebus_unregister_driver(&ehca_driver);
- if (ehca_destroy_slab_caches(&ehca_module) != 0)
- EDEB_ERR(4, "Cannot destroy SLAB caches");
+ ehca_destroy_slab_caches();
ehca_destroy_comp_pool();
idr_destroy(&ehca_cq_idr);
idr_destroy(&ehca_qp_idr);
-
- EDEB_EX(7, "");
};
module_init(ehca_module_init);
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_mcast.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_mcast.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_mcast.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_mcast.c 2006-08-30
20:00:16.000000000 +0200
@@ -42,54 +42,38 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "mcas"
-
#include <linux/module.h>
#include <linux/err.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_qes.h"
#include "ehca_iverbs.h"
-
#include "hcp_if.h"
#define MAX_MC_LID 0xFFFE
#define MIN_MC_LID 0xC000 /* Multicast limits */
#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
-#define EHCA_VALID_MULTICAST_LID(lid) (((lid) >= MIN_MC_LID) && ((lid)
<= MAX_MC_LID))
+#define EHCA_VALID_MULTICAST_LID(lid) \
+ (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- struct ehca_qp *my_qp = NULL;
- struct ehca_shca *shca = NULL;
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca = container_of(ibqp->device, struct
ehca_shca,
+ ib_device);
union ib_gid my_gid;
- u64 subnet_prefix;
- u64 interface_id;
- u64 h_ret = H_SUCCESS;
- int ret = 0;
-
- EHCA_CHECK_ADR(ibqp);
- EHCA_CHECK_ADR(gid);
-
- my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ u64 subnet_prefix, interface_id, h_ret;
- EHCA_CHECK_QP(my_qp);
if (ibqp->qp_type != IB_QPT_UD) {
- EDEB_ERR(4, "invalid qp_type %x gid, ret=%x",
- ibqp->qp_type, EINVAL);
+ ehca_err(ibqp->device, "invalid qp_type=%x",
ibqp->qp_type);
return -EINVAL;
}
- shca = container_of(ibqp->pd->device, struct ehca_shca,
ib_device);
- EHCA_CHECK_ADR(shca);
-
if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
- EDEB_ERR(4, "gid is not valid mulitcast gid ret=%x",
- EINVAL);
+ ehca_err(ibqp->device, "invalid mulitcast gid");
return -EINVAL;
} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
- EDEB_ERR(4, "lid=%x is not valid mulitcast lid ret=%x",
- lid, EINVAL);
+ ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
return -EINVAL;
}
@@ -101,100 +85,47 @@ int ehca_attach_mcast(struct ib_qp *ibqp
my_qp->ipz_qp_handle,
my_qp->galpas.kernel,
lid, subnet_prefix, interface_id);
- if (h_ret != H_SUCCESS) {
- EDEB_ERR(4,
+ if (h_ret != H_SUCCESS)
+ ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed
"
"h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
- }
- ret = ehca2ib_return_code(h_ret);
- EDEB_EX(7, "mcast attach ret=%x\n"
- "ehca_qp=%p qp_num=%x lid=%x\n"
- "my_gid= %x %x %x %x\n"
- " %x %x %x %x\n"
- " %x %x %x %x\n"
- " %x %x %x %x\n",
- ret, my_qp, ibqp->qp_num, lid,
- my_gid.raw[0], my_gid.raw[1],
- my_gid.raw[2], my_gid.raw[3],
- my_gid.raw[4], my_gid.raw[5],
- my_gid.raw[6], my_gid.raw[7],
- my_gid.raw[8], my_gid.raw[9],
- my_gid.raw[10], my_gid.raw[11],
- my_gid.raw[12], my_gid.raw[13],
- my_gid.raw[14], my_gid.raw[15]);
-
- return ret;
+ return ehca2ib_return_code(h_ret);
}
int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- struct ehca_qp *my_qp = NULL;
- struct ehca_shca *shca = NULL;
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca = container_of(ibqp->pd->device,
+ struct ehca_shca,
ib_device);
union ib_gid my_gid;
- u64 subnet_prefix;
- u64 interface_id;
- u64 h_ret = H_SUCCESS;
- int ret = 0;
-
- EHCA_CHECK_ADR(ibqp);
- EHCA_CHECK_ADR(gid);
+ u64 subnet_prefix, interface_id, h_ret;
- my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
- EHCA_CHECK_QP(my_qp);
if (ibqp->qp_type != IB_QPT_UD) {
- EDEB_ERR(4, "invalid qp_type %x gid, ret=%x",
- ibqp->qp_type, EINVAL);
+ ehca_err(ibqp->device, "invalid qp_type %x",
ibqp->qp_type);
return -EINVAL;
}
- shca = container_of(ibqp->pd->device, struct ehca_shca,
ib_device);
- EHCA_CHECK_ADR(shca);
-
if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
- EDEB_ERR(4, "gid is not valid mulitcast gid ret=%x",
- EINVAL);
+ ehca_err(ibqp->device, "invalid mulitcast gid");
return -EINVAL;
} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
- EDEB_ERR(4, "lid=%x is not valid mulitcast lid ret=%x",
- lid, EINVAL);
+ ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
return -EINVAL;
}
- EDEB_EN(7, "dgid=%p qp_numl=%x lid=%x",
- gid, ibqp->qp_num, lid);
-
memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
interface_id = be64_to_cpu(my_gid.global.interface_id);
h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
- my_qp->ipz_qp_handle,
- my_qp->galpas.kernel,
- lid, subnet_prefix, interface_id);
- if (h_ret != H_SUCCESS) {
- EDEB_ERR(4,
+ my_qp->ipz_qp_handle,
+ my_qp->galpas.kernel,
+ lid, subnet_prefix, interface_id);
+ if (h_ret != H_SUCCESS)
+ ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed
"
"h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
- }
- ret = ehca2ib_return_code(h_ret);
-
- EDEB_EX(7, "mcast detach ret=%x\n"
- "ehca_qp=%p qp_num=%x lid=%x\n"
- "my_gid= %x %x %x %x\n"
- " %x %x %x %x\n"
- " %x %x %x %x\n"
- " %x %x %x %x\n",
- ret, my_qp, ibqp->qp_num, lid,
- my_gid.raw[0], my_gid.raw[1],
- my_gid.raw[2], my_gid.raw[3],
- my_gid.raw[4], my_gid.raw[5],
- my_gid.raw[6], my_gid.raw[7],
- my_gid.raw[8], my_gid.raw[9],
- my_gid.raw[10], my_gid.raw[11],
- my_gid.raw[12], my_gid.raw[13],
- my_gid.raw[14], my_gid.raw[15]);
- return ret;
+ return ehca2ib_return_code(h_ret);
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_mrmw.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_mrmw.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_mrmw.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_mrmw.c 2006-08-30
20:00:16.000000000 +0200
@@ -39,9 +39,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#undef DEB_PREFIX
-#define DEB_PREFIX "mrmw"
-
#include <asm/current.h>
#include "ehca_iverbs.h"
@@ -49,78 +46,62 @@
#include "hcp_if.h"
#include "hipz_hw.h"
-extern int ehca_use_hp_mr;
+static struct kmem_cache *mr_cache;
+static struct kmem_cache *mw_cache;
static struct ehca_mr *ehca_mr_new(void)
{
- extern struct ehca_module ehca_module;
struct ehca_mr *me;
- me = kmem_cache_alloc(ehca_module.cache_mr, SLAB_KERNEL);
+ me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
if (me) {
memset(me, 0, sizeof(struct ehca_mr));
spin_lock_init(&me->mrlock);
- EDEB_EX(7, "ehca_mr=%p sizeof(ehca_mr_t)=%x", me,
- (u32) sizeof(struct ehca_mr));
- } else {
- EDEB_ERR(3, "alloc failed");
- }
+ } else
+ ehca_gen_err("alloc failed");
return me;
}
static void ehca_mr_delete(struct ehca_mr *me)
{
- extern struct ehca_module ehca_module;
-
- kmem_cache_free(ehca_module.cache_mr, me);
+ kmem_cache_free(mr_cache, me);
}
static struct ehca_mw *ehca_mw_new(void)
{
- extern struct ehca_module ehca_module;
struct ehca_mw *me;
- me = kmem_cache_alloc(ehca_module.cache_mw, SLAB_KERNEL);
+ me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
if (me) {
memset(me, 0, sizeof(struct ehca_mw));
spin_lock_init(&me->mwlock);
- EDEB_EX(7, "ehca_mw=%p sizeof(ehca_mw_t)=%x", me,
- (u32) sizeof(struct ehca_mw));
- } else {
- EDEB_ERR(3, "alloc failed");
- }
+ } else
+ ehca_gen_err("alloc failed");
return me;
}
static void ehca_mw_delete(struct ehca_mw *me)
{
- extern struct ehca_module ehca_module;
-
- kmem_cache_free(ehca_module.cache_mw, me);
+ kmem_cache_free(mw_cache, me);
}
/*----------------------------------------------------------------------*/
struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
{
- struct ib_mr *ib_mr = NULL;
- int ret = 0;
- struct ehca_mr *e_maxmr = NULL;
- struct ehca_pd *e_pd = NULL;
- struct ehca_shca *shca = NULL;
-
- EDEB_EN(7, "pd=%p mr_access_flags=%x", pd, mr_access_flags);
-
- EHCA_CHECK_PD_P(pd);
- e_pd = container_of(pd, struct ehca_pd, ib_pd);
- shca = container_of(pd->device, struct ehca_shca, ib_device);
+ struct ib_mr *ib_mr;
+ int ret;
+ struct ehca_mr *e_maxmr;
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
if (shca->maxmr) {
e_maxmr = ehca_mr_new();
if (!e_maxmr) {
- EDEB_ERR(4, "out of memory");
+ ehca_err(&shca->ib_device, "out of memory");
ib_mr = ERR_PTR(-ENOMEM);
goto get_dma_mr_exit0;
}
@@ -135,18 +116,15 @@ struct ib_mr *ehca_get_dma_mr(struct ib_
}
ib_mr = &e_maxmr->ib.ib_mr;
} else {
- EDEB_ERR(4, "no internal max-MR exist!");
+ ehca_err(&shca->ib_device, "no internal max-MR exist!");
ib_mr = ERR_PTR(-EINVAL);
goto get_dma_mr_exit0;
}
get_dma_mr_exit0:
if (IS_ERR(ib_mr))
- EDEB_EX(4, "rc=%lx pd=%p mr_access_flags=%x ",
- PTR_ERR(ib_mr), pd, mr_access_flags);
- else
- EDEB_EX(7, "ib_mr=%p lkey=%x rkey=%x",
- ib_mr, ib_mr->lkey, ib_mr->rkey);
+ ehca_err(&shca->ib_device, "rc=%lx pd=%p
mr_access_flags=%x ",
+ PTR_ERR(ib_mr), pd, mr_access_flags);
return ib_mr;
} /* end ehca_get_dma_mr() */
@@ -158,23 +136,20 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
int mr_access_flags,
u64 *iova_start)
{
- struct ib_mr *ib_mr = NULL;
- int ret = 0;
- struct ehca_mr *e_mr = NULL;
- struct ehca_shca *shca = NULL;
- struct ehca_pd *e_pd = NULL;
- u64 size = 0;
+ struct ib_mr *ib_mr;
+ int ret;
+ struct ehca_mr *e_mr;
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+ u64 size;
struct ehca_mr_pginfo
pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
- u32 num_pages_mr = 0;
- u32 num_pages_4k = 0; /* 4k portion "pages" */
+ u32 num_pages_mr;
+ u32 num_pages_4k; /* 4k portion "pages" */
- EDEB_EN(7, "pd=%p phys_buf_array=%p num_phys_buf=%x "
- "mr_access_flags=%x iova_start=%p", pd, phys_buf_array,
- num_phys_buf, mr_access_flags, iova_start);
-
- EHCA_CHECK_PD_P(pd);
- if ((num_phys_buf <= 0) || ehca_adr_bad(phys_buf_array)) {
- EDEB_ERR(4, "bad input values: num_phys_buf=%x "
+ if ((num_phys_buf <= 0) || !phys_buf_array) {
+ ehca_err(pd->device, "bad input values: num_phys_buf=%x "
"phys_buf_array=%p", num_phys_buf,
phys_buf_array);
ib_mr = ERR_PTR(-EINVAL);
goto reg_phys_mr_exit0;
@@ -187,7 +162,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
* Remote Write Access requires Local Write Access
* Remote Atomic Access requires Local Write Access
*/
- EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+ ehca_err(pd->device, "bad input values:
mr_access_flags=%x",
mr_access_flags);
ib_mr = ERR_PTR(-EINVAL);
goto reg_phys_mr_exit0;
@@ -202,18 +177,15 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
}
if ((size == 0) ||
(((u64)iova_start + size) < (u64)iova_start)) {
- EDEB_ERR(4, "bad input values: size=%lx iova_start=%p",
+ ehca_err(pd->device, "bad input values: size=%lx
iova_start=%p",
size, iova_start);
ib_mr = ERR_PTR(-EINVAL);
goto reg_phys_mr_exit0;
}
- e_pd = container_of(pd, struct ehca_pd, ib_pd);
- shca = container_of(pd->device, struct ehca_shca, ib_device);
-
e_mr = ehca_mr_new();
if (!e_mr) {
- EDEB_ERR(4, "out of memory");
+ ehca_err(pd->device, "out of memory");
ib_mr = ERR_PTR(-ENOMEM);
goto reg_phys_mr_exit0;
}
@@ -253,20 +225,16 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
}
/* successful registration of all pages */
- ib_mr = &e_mr->ib.ib_mr;
- goto reg_phys_mr_exit0;
+ return &e_mr->ib.ib_mr;
reg_phys_mr_exit1:
ehca_mr_delete(e_mr);
reg_phys_mr_exit0:
if (IS_ERR(ib_mr))
- EDEB_EX(4, "rc=%lx pd=%p phys_buf_array=%p "
- "num_phys_buf=%x mr_access_flags=%x
iova_start=%p",
- PTR_ERR(ib_mr), pd, phys_buf_array,
- num_phys_buf, mr_access_flags, iova_start);
- else
- EDEB_EX(7, "ib_mr=%p lkey=%x rkey=%x",
- ib_mr, ib_mr->lkey, ib_mr->rkey);
+ ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
+ "num_phys_buf=%x mr_access_flags=%x
iova_start=%p",
+ PTR_ERR(ib_mr), pd, phys_buf_array,
+ num_phys_buf, mr_access_flags, iova_start);
return ib_mr;
} /* end ehca_reg_phys_mr() */
@@ -277,21 +245,22 @@ struct ib_mr *ehca_reg_user_mr(struct ib
int mr_access_flags,
struct ib_udata *udata)
{
- struct ib_mr *ib_mr = NULL;
- struct ehca_mr *e_mr = NULL;
- struct ehca_shca *shca = NULL;
- struct ehca_pd *e_pd = NULL;
+ struct ib_mr *ib_mr;
+ struct ehca_mr *e_mr;
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_mr_pginfo
pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
- int ret = 0;
- u32 num_pages_mr = 0;
- u32 num_pages_4k = 0; /* 4k portion "pages" */
-
- EDEB_EN(7, "pd=%p region=%p mr_access_flags=%x udata=%p",
- pd, region, mr_access_flags, udata);
-
- EHCA_CHECK_PD_P(pd);
- if (ehca_adr_bad(region)) {
- EDEB_ERR(4, "bad input values: region=%p", region);
+ int ret;
+ u32 num_pages_mr;
+ u32 num_pages_4k; /* 4k portion "pages" */
+
+ if (!pd) {
+ ehca_gen_err("bad pd=%p", pd);
+ return ERR_PTR(-EFAULT);
+ }
+ if (!region) {
+ ehca_err(pd->device, "bad input values: region=%p",
region);
ib_mr = ERR_PTR(-EINVAL);
goto reg_user_mr_exit0;
}
@@ -303,36 +272,29 @@ struct ib_mr *ehca_reg_user_mr(struct ib
* Remote Write Access requires Local Write Access
* Remote Atomic Access requires Local Write Access
*/
- EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+ ehca_err(pd->device, "bad input values:
mr_access_flags=%x",
mr_access_flags);
ib_mr = ERR_PTR(-EINVAL);
goto reg_user_mr_exit0;
}
- EDEB(7, "user_base=%lx virt_base=%lx length=%lx offset=%x
page_size=%x "
- "chunk_list.next=%p",
- region->user_base, region->virt_base, region->length,
- region->offset, region->page_size, region->chunk_list.next);
if (region->page_size != PAGE_SIZE) {
- EDEB_ERR(4, "page size not supported,
region->page_size=%x",
- region->page_size);
+ ehca_err(pd->device, "page size not supported, "
+ "region->page_size=%x", region->page_size);
ib_mr = ERR_PTR(-EINVAL);
goto reg_user_mr_exit0;
}
if ((region->length == 0) ||
((region->virt_base + region->length) < region->virt_base)) {
- EDEB_ERR(4, "bad input values: length=%lx virt_base=%lx",
- region->length, region->virt_base);
+ ehca_err(pd->device, "bad input values: length=%lx "
+ "virt_base=%lx", region->length,
region->virt_base);
ib_mr = ERR_PTR(-EINVAL);
goto reg_user_mr_exit0;
}
- e_pd = container_of(pd, struct ehca_pd, ib_pd);
- shca = container_of(pd->device, struct ehca_shca, ib_device);
-
e_mr = ehca_mr_new();
if (!e_mr) {
- EDEB_ERR(4, "out of memory");
+ ehca_err(pd->device, "out of memory");
ib_mr = ERR_PTR(-ENOMEM);
goto reg_user_mr_exit0;
}
@@ -362,19 +324,15 @@ struct ib_mr *ehca_reg_user_mr(struct ib
}
/* successful registration of all pages */
- ib_mr = &e_mr->ib.ib_mr;
- goto reg_user_mr_exit0;
+ return &e_mr->ib.ib_mr;
reg_user_mr_exit1:
ehca_mr_delete(e_mr);
reg_user_mr_exit0:
if (IS_ERR(ib_mr))
- EDEB_EX(4, "rc=%lx pd=%p region=%p mr_access_flags=%x "
- "udata=%p",
- PTR_ERR(ib_mr), pd, region, mr_access_flags,
udata);
- else
- EDEB_EX(7, "ib_mr=%p lkey=%x rkey=%x",
- ib_mr, ib_mr->lkey, ib_mr->rkey);
+ ehca_err(pd->device, "rc=%lx pd=%p region=%p
mr_access_flags=%x"
+ " udata=%p",
+ PTR_ERR(ib_mr), pd, region, mr_access_flags,
udata);
return ib_mr;
} /* end ehca_reg_user_mr() */
@@ -388,32 +346,26 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
int mr_access_flags,
u64 *iova_start)
{
- int ret = 0;
- struct ehca_shca *shca = NULL;
- struct ehca_mr *e_mr = NULL;
- u64 new_size = 0;
- u64 *new_start = NULL;
- u32 new_acl = 0;
- struct ehca_pd *new_pd = NULL;
- u32 tmp_lkey = 0;
- u32 tmp_rkey = 0;
+ int ret;
+
+ struct ehca_shca *shca =
+ container_of(mr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+ struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd,
ib_pd);
+ u64 new_size;
+ u64 *new_start;
+ u32 new_acl;
+ struct ehca_pd *new_pd;
+ u32 tmp_lkey, tmp_rkey;
unsigned long sl_flags;
u32 num_pages_mr = 0;
u32 num_pages_4k = 0; /* 4k portion "pages" */
struct ehca_mr_pginfo
pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
- struct ehca_pd *my_pd = NULL;
u32 cur_pid = current->tgid;
- EDEB_EN(7, "mr=%p mr_rereg_mask=%x pd=%p phys_buf_array=%p "
- "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
- mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf,
- mr_access_flags, iova_start);
-
- EHCA_CHECK_MR(mr);
- my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
(my_pd->ownpid != cur_pid)) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
ret = -EINVAL;
goto rereg_phys_mr_exit0;
@@ -421,15 +373,19 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
/* TODO not supported, because PHYP rereg hCall needs
pages */
- EDEB_ERR(4, "rereg without IB_MR_REREG_TRANS not supported
yet,"
- " mr_rereg_mask=%x", mr_rereg_mask);
+ ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not
"
+ "supported yet, mr_rereg_mask=%x",
mr_rereg_mask);
ret = -EINVAL;
goto rereg_phys_mr_exit0;
}
- e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
if (mr_rereg_mask & IB_MR_REREG_PD) {
- EHCA_CHECK_PD(pd);
+ if (!pd) {
+ ehca_err(mr->device, "rereg with bad pd, pd=%p "
+ "mr_rereg_mask=%x", pd, mr_rereg_mask);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
}
if ((mr_rereg_mask &
@@ -439,12 +395,10 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
goto rereg_phys_mr_exit0;
}
- shca = container_of(mr->device, struct ehca_shca, ib_device);
-
/* check other parameters */
if (e_mr == shca->maxmr) {
/* should be impossible, however reject to be sure */
- EDEB_ERR(3, "rereg internal max-MR impossible, mr=%p "
+ ehca_err(mr->device, "rereg internal max-MR impossible,
mr=%p "
"shca->maxmr=%p mr->lkey=%x",
mr, shca->maxmr, mr->lkey);
ret = -EINVAL;
@@ -452,14 +406,14 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
}
if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e.
addr/size */
if (e_mr->flags & EHCA_MR_FLAG_FMR) {
- EDEB_ERR(4, "not supported for FMR, mr=%p
flags=%x",
- mr, e_mr->flags);
+ ehca_err(mr->device, "not supported for FMR, mr=%p
"
+ "flags=%x", mr, e_mr->flags);
ret = -EINVAL;
goto rereg_phys_mr_exit0;
}
- if (ehca_adr_bad(phys_buf_array) || num_phys_buf <= 0) {
- EDEB_ERR(4, "bad input values: mr_rereg_mask=%x "
- "phys_buf_array=%p num_phys_buf=%x",
+ if (!phys_buf_array || num_phys_buf <= 0) {
+ ehca_err(mr->device, "bad input values:
mr_rereg_mask=%x"
+ " phys_buf_array=%p num_phys_buf=%x",
mr_rereg_mask, phys_buf_array,
num_phys_buf);
ret = -EINVAL;
goto rereg_phys_mr_exit0;
@@ -474,7 +428,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
* Remote Write Access requires Local Write Access
* Remote Atomic Access requires Local Write Access
*/
- EDEB_ERR(4, "bad input values: mr_rereg_mask=%x "
+ ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
"mr_access_flags=%x", mr_rereg_mask,
mr_access_flags);
ret = -EINVAL;
goto rereg_phys_mr_exit0;
@@ -497,7 +451,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
goto rereg_phys_mr_exit1;
if ((new_size == 0) ||
(((u64)iova_start + new_size) < (u64)iova_start)) {
- EDEB_ERR(4, "bad input values: new_size=%lx "
+ ehca_err(mr->device, "bad input values:
new_size=%lx "
"iova_start=%p", new_size, iova_start);
ret = -EINVAL;
goto rereg_phys_mr_exit1;
@@ -519,10 +473,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
if (mr_rereg_mask & IB_MR_REREG_PD)
new_pd = container_of(pd, struct ehca_pd, ib_pd);
- EDEB(7, "mr=%p new_start=%p new_size=%lx new_acl=%x new_pd=%p "
- "num_pages_mr=%x num_pages_4k=%x", e_mr, new_start, new_size,
- new_acl, new_pd, num_pages_mr, num_pages_4k);
-
ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
if (ret)
@@ -538,17 +488,11 @@ rereg_phys_mr_exit1:
spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
rereg_phys_mr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
- "phys_buf_array=%p num_phys_buf=%x
mr_access_flags=%x "
- "iova_start=%p",
- ret, mr, mr_rereg_mask, pd, phys_buf_array,
- num_phys_buf, mr_access_flags, iova_start);
- else
- EDEB_EX(7, "mr=%p mr_rereg_mask=%x pd=%p phys_buf_array=%p
"
- "num_phys_buf=%x mr_access_flags=%x
iova_start=%p",
- mr, mr_rereg_mask, pd, phys_buf_array,
num_phys_buf,
- mr_access_flags, iova_start);
-
+ ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p
"
+ "phys_buf_array=%p num_phys_buf=%x
mr_access_flags=%x "
+ "iova_start=%p",
+ ret, mr, mr_rereg_mask, pd, phys_buf_array,
+ num_phys_buf, mr_access_flags, iova_start);
return ret;
} /* end ehca_rereg_phys_mr() */
@@ -557,47 +501,36 @@ rereg_phys_mr_exit0:
int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
{
int ret = 0;
- u64 h_ret = H_SUCCESS;
- struct ehca_shca *shca = NULL;
- struct ehca_mr *e_mr = NULL;
- struct ehca_pd *my_pd = NULL;
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(mr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+ struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd,
ib_pd);
u32 cur_pid = current->tgid;
unsigned long sl_flags;
struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
- EDEB_EN(7, "mr=%p mr_attr=%p", mr, mr_attr);
-
- EHCA_CHECK_MR(mr);
-
- my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
(my_pd->ownpid != cur_pid)) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
ret = -EINVAL;
goto query_mr_exit0;
}
- e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
- if (ehca_adr_bad(mr_attr)) {
- EDEB_ERR(4, "bad input values: mr_attr=%p", mr_attr);
- ret = -EINVAL;
- goto query_mr_exit0;
- }
if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
- EDEB_ERR(4, "not supported for FMR, mr=%p e_mr=%p "
+ ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p
"
"e_mr->flags=%x", mr, e_mr, e_mr->flags);
ret = -EINVAL;
goto query_mr_exit0;
}
- shca = container_of(mr->device, struct ehca_shca, ib_device);
memset(mr_attr, 0, sizeof(struct ib_mr_attr));
spin_lock_irqsave(&e_mr->mrlock, sl_flags);
h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_mr_query failed, h_ret=%lx mr=%p "
+ ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx
mr=%p "
"hca_hndl=%lx mr_hndl=%lx lkey=%x",
h_ret, mr, shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle, mr->lkey);
@@ -615,13 +548,8 @@ query_mr_exit1:
spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
query_mr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x mr=%p mr_attr=%p", ret, mr, mr_attr);
- else
- EDEB_EX(7, "pd=%p device_virt_addr=%lx size=%lx "
- "mr_access_flags=%x lkey=%x rkey=%x",
- mr_attr->pd, mr_attr->device_virt_addr,
- mr_attr->size, mr_attr->mr_access_flags,
- mr_attr->lkey, mr_attr->rkey);
+ ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
+ ret, mr, mr_attr);
return ret;
} /* end ehca_query_mr() */
@@ -630,35 +558,29 @@ query_mr_exit0:
int ehca_dereg_mr(struct ib_mr *mr)
{
int ret = 0;
- u64 h_ret = H_SUCCESS;
- struct ehca_shca *shca = NULL;
- struct ehca_mr *e_mr = NULL;
- struct ehca_pd *my_pd = NULL;
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(mr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+ struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd,
ib_pd);
u32 cur_pid = current->tgid;
- EDEB_EN(7, "mr=%p", mr);
-
- EHCA_CHECK_MR(mr);
- my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
(my_pd->ownpid != cur_pid)) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
ret = -EINVAL;
goto dereg_mr_exit0;
}
- e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
- shca = container_of(mr->device, struct ehca_shca, ib_device);
-
if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
- EDEB_ERR(4, "not supported for FMR, mr=%p e_mr=%p "
+ ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p
"
"e_mr->flags=%x", mr, e_mr, e_mr->flags);
ret = -EINVAL;
goto dereg_mr_exit0;
} else if (e_mr == shca->maxmr) {
/* should be impossible, however reject to be sure */
- EDEB_ERR(3, "dereg internal max-MR impossible, mr=%p "
+ ehca_err(mr->device, "dereg internal max-MR impossible,
mr=%p "
"shca->maxmr=%p mr->lkey=%x",
mr, shca->maxmr, mr->lkey);
ret = -EINVAL;
@@ -668,8 +590,8 @@ int ehca_dereg_mr(struct ib_mr *mr)
/* TODO: BUSY: MR still has bound window(s) */
h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx shca=%p
e_mr=%p"
- " hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+ ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx
shca=%p "
+ "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle, mr->lkey);
ret = ehca_mrmw_map_hrc_free_mr(h_ret);
@@ -681,9 +603,7 @@ int ehca_dereg_mr(struct ib_mr *mr)
dereg_mr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x mr=%p", ret, mr);
- else
- EDEB_EX(7, "");
+ ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
return ret;
} /* end ehca_dereg_mr() */
@@ -691,19 +611,14 @@ dereg_mr_exit0:
struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
{
- struct ib_mw *ib_mw = NULL;
- u64 h_ret = H_SUCCESS;
- struct ehca_shca *shca = NULL;
- struct ehca_mw *e_mw = NULL;
- struct ehca_pd *e_pd = NULL;
+ struct ib_mw *ib_mw;
+ u64 h_ret;
+ struct ehca_mw *e_mw;
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
struct ehca_mw_hipzout_parms hipzout = {{0},0};
- EDEB_EN(7, "pd=%p", pd);
-
- EHCA_CHECK_PD_P(pd);
- e_pd = container_of(pd, struct ehca_pd, ib_pd);
- shca = container_of(pd->device, struct ehca_shca, ib_device);
-
e_mw = ehca_mw_new();
if (!e_mw) {
ib_mw = ERR_PTR(-ENOMEM);
@@ -713,25 +628,22 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd
h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
e_pd->fw_pd, &hipzout);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_mw_allocate failed, h_ret=%lx shca=%p "
- "hca_hndl=%lx mw=%p", h_ret, shca,
- shca->ipz_hca_handle.handle, e_mw);
+ ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
+ "shca=%p hca_hndl=%lx mw=%p",
+ h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
goto alloc_mw_exit1;
}
/* successful MW allocation */
e_mw->ipz_mw_handle = hipzout.handle;
e_mw->ib_mw.rkey = hipzout.rkey;
- ib_mw = &e_mw->ib_mw;
- goto alloc_mw_exit0;
+ return &e_mw->ib_mw;
alloc_mw_exit1:
ehca_mw_delete(e_mw);
alloc_mw_exit0:
if (IS_ERR(ib_mw))
- EDEB_EX(4, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
- else
- EDEB_EX(7, "ib_mw=%p rkey=%x", ib_mw, ib_mw->rkey);
+ ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
return ib_mw;
} /* end ehca_alloc_mw() */
@@ -741,55 +653,32 @@ int ehca_bind_mw(struct ib_qp *qp,
struct ib_mw *mw,
struct ib_mw_bind *mw_bind)
{
- int ret = 0;
-
/* TODO: not supported up to now */
- EDEB_ERR(4, "bind MW currently not supported by HCAD");
- ret = -EPERM;
- goto bind_mw_exit0;
+ ehca_gen_err("bind MW currently not supported by HCAD");
-bind_mw_exit0:
- if (ret)
- EDEB_EX(4, "ret=%x qp=%p mw=%p mw_bind=%p",
- ret, qp, mw, mw_bind);
- else
- EDEB_EX(7, "qp=%p mw=%p mw_bind=%p", qp, mw, mw_bind);
- return ret;
+ return -EPERM;
} /* end ehca_bind_mw() */
/*----------------------------------------------------------------------*/
int ehca_dealloc_mw(struct ib_mw *mw)
{
- int ret = 0;
- u64 h_ret = H_SUCCESS;
- struct ehca_shca *shca = NULL;
- struct ehca_mw *e_mw = NULL;
-
- EDEB_EN(7, "mw=%p", mw);
-
- EHCA_CHECK_MW(mw);
- e_mw = container_of(mw, struct ehca_mw, ib_mw);
- shca = container_of(mw->device, struct ehca_shca, ib_device);
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(mw->device, struct ehca_shca, ib_device);
+ struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_free_mw failed, h_ret=%lx shca=%p mw=%p
"
- "rkey=%x hca_hndl=%lx mw_hndl=%lx",
+ ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx
shca=%p "
+ "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
h_ret, shca, mw, mw->rkey,
shca->ipz_hca_handle.handle,
e_mw->ipz_mw_handle.handle);
- ret = ehca_mrmw_map_hrc_free_mw(h_ret);
- goto dealloc_mw_exit0;
+ return ehca_mrmw_map_hrc_free_mw(h_ret);
}
/* successful deallocation */
ehca_mw_delete(e_mw);
-
-dealloc_mw_exit0:
- if (ret)
- EDEB_EX(4, "ret=%x mw=%p", ret, mw);
- else
- EDEB_EX(7, "");
- return ret;
+ return 0;
} /* end ehca_dealloc_mw() */
/*----------------------------------------------------------------------*/
@@ -798,28 +687,15 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_
int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
- struct ib_fmr *ib_fmr = NULL;
- struct ehca_shca *shca = NULL;
- struct ehca_mr *e_fmr = NULL;
- int ret = 0;
- struct ehca_pd *e_pd = NULL;
- u32 tmp_lkey = 0;
- u32 tmp_rkey = 0;
+ struct ib_fmr *ib_fmr;
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_mr *e_fmr;
+ int ret;
+ u32 tmp_lkey, tmp_rkey;
struct ehca_mr_pginfo
pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
- EDEB_EN(7, "pd=%p mr_access_flags=%x fmr_attr=%p",
- pd, mr_access_flags, fmr_attr);
-
- EHCA_CHECK_PD_P(pd);
- if (ehca_adr_bad(fmr_attr)) {
- EDEB_ERR(4, "bad input values: fmr_attr=%p", fmr_attr);
- ib_fmr = ERR_PTR(-EINVAL);
- goto alloc_fmr_exit0;
- }
-
- EDEB(7, "max_pages=%x max_maps=%x page_shift=%x",
- fmr_attr->max_pages, fmr_attr->max_maps,
fmr_attr->page_shift);
-
/* check other parameters */
if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
!(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
@@ -829,19 +705,19 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_
* Remote Write Access requires Local Write Access
* Remote Atomic Access requires Local Write Access
*/
- EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+ ehca_err(pd->device, "bad input values:
mr_access_flags=%x",
mr_access_flags);
ib_fmr = ERR_PTR(-EINVAL);
goto alloc_fmr_exit0;
}
if (mr_access_flags & IB_ACCESS_MW_BIND) {
- EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+ ehca_err(pd->device, "bad input values:
mr_access_flags=%x",
mr_access_flags);
ib_fmr = ERR_PTR(-EINVAL);
goto alloc_fmr_exit0;
}
if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
- EDEB_ERR(4, "bad input values: fmr_attr->max_pages=%x "
+ ehca_err(pd->device, "bad input values:
fmr_attr->max_pages=%x "
"fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
fmr_attr->max_pages, fmr_attr->max_maps,
fmr_attr->page_shift);
@@ -850,15 +726,12 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_
}
if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
- EDEB_ERR(4, "unsupported fmr_attr->page_shift=%x",
+ ehca_err(pd->device, "unsupported
fmr_attr->page_shift=%x",
fmr_attr->page_shift);
ib_fmr = ERR_PTR(-EINVAL);
goto alloc_fmr_exit0;
}
- e_pd = container_of(pd, struct ehca_pd, ib_pd);
- shca = container_of(pd->device, struct ehca_shca, ib_device);
-
e_fmr = ehca_mr_new();
if (!e_fmr) {
ib_fmr = ERR_PTR(-ENOMEM);
@@ -881,19 +754,15 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_
e_fmr->fmr_max_pages = fmr_attr->max_pages;
e_fmr->fmr_max_maps = fmr_attr->max_maps;
e_fmr->fmr_map_cnt = 0;
- ib_fmr = &e_fmr->ib.ib_fmr;
- goto alloc_fmr_exit0;
+ return &e_fmr->ib.ib_fmr;
alloc_fmr_exit1:
ehca_mr_delete(e_fmr);
alloc_fmr_exit0:
if (IS_ERR(ib_fmr))
- EDEB_EX(4, "rc=%lx pd=%p mr_access_flags=%x "
- "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
- mr_access_flags, fmr_attr);
- else
- EDEB_EX(7, "ib_fmr=%p tmp_lkey=%x tmp_rkey=%x",
- ib_fmr, tmp_lkey, tmp_rkey);
+ ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
+ "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
+ mr_access_flags, fmr_attr);
return ib_fmr;
} /* end ehca_alloc_fmr() */
@@ -904,24 +773,16 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr
int list_len,
u64 iova)
{
- int ret = 0;
- struct ehca_shca *shca = NULL;
- struct ehca_mr *e_fmr = NULL;
- struct ehca_pd *e_pd = NULL;
+ int ret;
+ struct ehca_shca *shca =
+ container_of(fmr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr,
ib.ib_fmr);
+ struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd,
ib_pd);
struct ehca_mr_pginfo
pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
- u32 tmp_lkey = 0;
- u32 tmp_rkey = 0;
-
- EDEB_EN(7, "fmr=%p page_list=%p list_len=%x iova=%lx",
- fmr, page_list, list_len, iova);
-
- EHCA_CHECK_FMR(fmr);
- e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
- shca = container_of(fmr->device, struct ehca_shca, ib_device);
- e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
+ u32 tmp_lkey, tmp_rkey;
if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- EDEB_ERR(4, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+ ehca_err(fmr->device, "not a FMR, e_fmr=%p
e_fmr->flags=%x",
e_fmr, e_fmr->flags);
ret = -EINVAL;
goto map_phys_fmr_exit0;
@@ -931,16 +792,16 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr
goto map_phys_fmr_exit0;
if (iova % e_fmr->fmr_page_size) {
/* only whole-numbered pages */
- EDEB_ERR(4, "bad iova, iova=%lx fmr_page_size=%x",
+ ehca_err(fmr->device, "bad iova, iova=%lx
fmr_page_size=%x",
iova, e_fmr->fmr_page_size);
ret = -EINVAL;
goto map_phys_fmr_exit0;
}
if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
/* HCAD does not limit the maps, however trace this anyway
*/
- EDEB(6, "map limit exceeded, fmr=%p e_fmr->fmr_map_cnt=%x
"
- "e_fmr->fmr_max_maps=%x",
- fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
+ ehca_info(fmr->device, "map limit exceeded, fmr=%p "
+ "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
+ fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
}
pginfo.type = EHCA_MR_PGI_FMR;
@@ -960,14 +821,13 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr
e_fmr->fmr_map_cnt++;
e_fmr->ib.ib_fmr.lkey = tmp_lkey;
e_fmr->ib.ib_fmr.rkey = tmp_rkey;
+ return 0;
map_phys_fmr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x fmr=%p page_list=%p list_len=%x
iova=%lx",
- ret, fmr, page_list, list_len, iova);
- else
- EDEB_EX(7, "lkey=%x rkey=%x",
- e_fmr->ib.ib_fmr.lkey, e_fmr->ib.ib_fmr.rkey);
+ ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p
list_len=%x "
+ "iova=%lx",
+ ret, fmr, page_list, list_len, iova);
return ret;
} /* end ehca_map_phys_fmr() */
@@ -976,31 +836,34 @@ map_phys_fmr_exit0:
int ehca_unmap_fmr(struct list_head *fmr_list)
{
int ret = 0;
- struct ib_fmr *ib_fmr = NULL;
+ struct ib_fmr *ib_fmr;
struct ehca_shca *shca = NULL;
- struct ehca_shca *prev_shca = NULL;
- struct ehca_mr *e_fmr = NULL;
+ struct ehca_shca *prev_shca;
+ struct ehca_mr *e_fmr;
u32 num_fmr = 0;
u32 unmap_fmr_cnt = 0;
- EDEB_EN(7, "fmr_list=%p", fmr_list);
-
/* check all FMR belong to same SHCA, and check internal flag */
list_for_each_entry(ib_fmr, fmr_list, list) {
prev_shca = shca;
+ if (!ib_fmr) {
+ ehca_gen_err("bad fmr=%p in list", ib_fmr);
+ ret = -EINVAL;
+ goto unmap_fmr_exit0;
+ }
shca = container_of(ib_fmr->device, struct ehca_shca,
ib_device);
- EHCA_CHECK_FMR(ib_fmr);
e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
if ((shca != prev_shca) && prev_shca) {
- EDEB_ERR(4, "SHCA mismatch, shca=%p prev_shca=%p "
- "e_fmr=%p", shca, prev_shca, e_fmr);
+ ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p
"
+ "prev_shca=%p e_fmr=%p",
+ shca, prev_shca, e_fmr);
ret = -EINVAL;
goto unmap_fmr_exit0;
}
if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- EDEB_ERR(4, "not a FMR, e_fmr=%p e_fmr->flags=%x",
- e_fmr, e_fmr->flags);
+ ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
+ "e_fmr->flags=%x", e_fmr, e_fmr->flags);
ret = -EINVAL;
goto unmap_fmr_exit0;
}
@@ -1016,20 +879,18 @@ int ehca_unmap_fmr(struct list_head *fmr
ret = ehca_unmap_one_fmr(shca, e_fmr);
if (ret) {
/* unmap failed, stop unmapping of rest of FMRs */
- EDEB_ERR(4, "unmap of one FMR failed, stop rest, "
- "e_fmr=%p num_fmr=%x unmap_fmr_cnt=%x
lkey=%x",
- e_fmr, num_fmr, unmap_fmr_cnt,
- e_fmr->ib.ib_fmr.lkey);
+ ehca_err(&shca->ib_device, "unmap of one FMR
failed, "
+ "stop rest, e_fmr=%p num_fmr=%x "
+ "unmap_fmr_cnt=%x lkey=%x", e_fmr,
num_fmr,
+ unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
goto unmap_fmr_exit0;
}
}
unmap_fmr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x fmr_list=%p num_fmr=%x
unmap_fmr_cnt=%x",
- ret, fmr_list, num_fmr, unmap_fmr_cnt);
- else
- EDEB_EX(7, "num_fmr=%x", num_fmr);
+ ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x
unmap_fmr_cnt=%x",
+ ret, fmr_list, num_fmr, unmap_fmr_cnt);
return ret;
} /* end ehca_unmap_fmr() */
@@ -1037,19 +898,14 @@ unmap_fmr_exit0:
int ehca_dealloc_fmr(struct ib_fmr *fmr)
{
- int ret = 0;
- u64 h_ret = H_SUCCESS;
- struct ehca_shca *shca = NULL;
- struct ehca_mr *e_fmr = NULL;
-
- EDEB_EN(7, "fmr=%p", fmr);
-
- EHCA_CHECK_FMR(fmr);
- e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
- shca = container_of(fmr->device, struct ehca_shca, ib_device);
+ int ret;
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(fmr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr,
ib.ib_fmr);
if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
- EDEB_ERR(4, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+ ehca_err(fmr->device, "not a FMR, e_fmr=%p
e_fmr->flags=%x",
e_fmr, e_fmr->flags);
ret = -EINVAL;
goto free_fmr_exit0;
@@ -1057,21 +913,20 @@ int ehca_dealloc_fmr(struct ib_fmr *fmr)
h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
+ ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx
e_fmr=%p "
"hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
h_ret, e_fmr, shca->ipz_hca_handle.handle,
e_fmr->ipz_mr_handle.handle, fmr->lkey);
- ehca_mrmw_map_hrc_free_mr(h_ret);
+ ret = ehca_mrmw_map_hrc_free_mr(h_ret);
goto free_fmr_exit0;
}
/* successful deregistration */
ehca_mr_delete(e_fmr);
+ return 0;
free_fmr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x fmr=%p", ret, fmr);
- else
- EDEB_EX(7, "");
+ ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
return ret;
} /* end ehca_dealloc_fmr() */
@@ -1087,15 +942,11 @@ int ehca_reg_mr(struct ehca_shca *shca,
u32 *lkey, /*OUT*/
u32 *rkey) /*OUT*/
{
- int ret = 0;
- u64 h_ret = H_SUCCESS;
- u32 hipz_acl = 0;
+ int ret;
+ u64 h_ret;
+ u32 hipz_acl;
struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
- EDEB_EN(7, "shca=%p e_mr=%p iova_start=%p size=%lx acl=%x e_pd=%p
"
- "pginfo=%p num_pages=%lx num_4k=%lx", shca, e_mr,
iova_start,
- size, acl, e_pd, pginfo, pginfo->num_pages,
pginfo->num_4k);
-
ehca_mrmw_map_acl(acl, &hipz_acl);
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
if (ehca_use_hp_mr == 1)
@@ -1105,8 +956,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
(u64)iova_start, size, hipz_acl,
e_pd->fw_pd, &hipzout);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_alloc_mr failed, h_ret=%lx
hca_hndl=%lx",
- h_ret, shca->ipz_hca_handle.handle);
+ ehca_err(&shca->ib_device, "hipz_alloc_mr failed,
h_ret=%lx "
+ "hca_hndl=%lx", h_ret,
shca->ipz_hca_handle.handle);
ret = ehca_mrmw_map_hrc_alloc(h_ret);
goto ehca_reg_mr_exit0;
}
@@ -1125,26 +976,27 @@ int ehca_reg_mr(struct ehca_shca *shca,
e_mr->acl = acl;
*lkey = hipzout.lkey;
*rkey = hipzout.rkey;
- goto ehca_reg_mr_exit0;
+ return 0;
ehca_reg_mr_exit1:
h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(1, "h_ret=%lx shca=%p e_mr=%p iova_start=%p "
- "size=%lx acl=%x e_pd=%p lkey=%x pginfo=%p "
- "num_pages=%lx num_4k=%lx ret=%x", h_ret, shca,
e_mr,
- iova_start, size, acl, e_pd, hipzout.lkey,
pginfo,
- pginfo->num_pages, pginfo->num_4k, ret);
- EDEB_ERR(1, "internal error in ehca_reg_mr, not
recoverable");
+ ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
+ "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
+ "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
+ h_ret, shca, e_mr, iova_start, size, acl, e_pd,
+ hipzout.lkey, pginfo, pginfo->num_pages,
+ pginfo->num_4k, ret);
+ ehca_err(&shca->ib_device, "internal error in ehca_reg_mr,
"
+ "not recoverable");
}
ehca_reg_mr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x shca=%p e_mr=%p iova_start=%p size=%lx
"
- "acl=%x e_pd=%p pginfo=%p num_pages=%lx
num_4k=%lx",
- ret, shca, e_mr, iova_start, size, acl, e_pd,
pginfo,
- pginfo->num_pages, pginfo->num_4k);
- else
- EDEB_EX(7, "ret=%x lkey=%x rkey=%x", ret, *lkey, *rkey);
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+ "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p
"
+ "num_pages=%lx num_4k=%lx",
+ ret, shca, e_mr, iova_start, size, acl, e_pd,
pginfo,
+ pginfo->num_pages, pginfo->num_4k);
return ret;
} /* end ehca_reg_mr() */
@@ -1155,18 +1007,15 @@ int ehca_reg_mr_rpages(struct ehca_shca
struct ehca_mr_pginfo *pginfo)
{
int ret = 0;
- u64 h_ret = H_SUCCESS;
- u32 rnum = 0;
- u64 rpage = 0;
+ u64 h_ret;
+ u32 rnum;
+ u64 rpage;
u32 i;
- u64 *kpage = NULL;
-
- EDEB_EN(7, "shca=%p e_mr=%p pginfo=%p num_pages=%lx num_4k=%lx",
- shca, e_mr, pginfo, pginfo->num_pages, pginfo->num_4k);
+ u64 *kpage;
kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!kpage) {
- EDEB_ERR(4, "kpage alloc failed");
+ ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM;
goto ehca_reg_mr_rpages_exit0;
}
@@ -1184,29 +1033,29 @@ int ehca_reg_mr_rpages(struct ehca_shca
if (rnum > 1) {
ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
if (ret) {
- EDEB_ERR(4, "ehca_set_pagebuf bad rc,
ret=%x "
- "rnum=%x kpage=%p", ret, rnum,
kpage);
+ ehca_err(&shca->ib_device,
"ehca_set_pagebuf "
+ "bad rc, ret=%x rnum=%x
kpage=%p",
+ ret, rnum, kpage);
ret = -EFAULT;
goto ehca_reg_mr_rpages_exit1;
}
rpage = virt_to_abs(kpage);
if (!rpage) {
- EDEB_ERR(4, "kpage=%p i=%x", kpage, i);
+ ehca_err(&shca->ib_device, "kpage=%p
i=%x",
+ kpage, i);
ret = -EFAULT;
goto ehca_reg_mr_rpages_exit1;
}
} else { /* rnum==1 */
ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
if (ret) {
- EDEB_ERR(4, "ehca_set_pagebuf_1 bad rc, "
- "ret=%x i=%x", ret, i);
+ ehca_err(&shca->ib_device,
"ehca_set_pagebuf_1 "
+ "bad rc, ret=%x i=%x", ret, i);
ret = -EFAULT;
goto ehca_reg_mr_rpages_exit1;
}
}
- EDEB(9, "i=%x rnum=%x rpage=%lx", i, rnum, rpage);
-
h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle,
e_mr,
0, /* pagesize 4k */
0, rpage, rnum);
@@ -1217,9 +1066,10 @@ int ehca_reg_mr_rpages(struct ehca_shca
* and for 'page registered'==H_PAGE_REGISTERED
*/
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "last hipz_reg_rpage_mr
failed, "
- "h_ret=%lx e_mr=%p i=%x
hca_hndl=%lx "
- "mr_hndl=%lx lkey=%x", h_ret,
e_mr, i,
+ ehca_err(&shca->ib_device, "last "
+ "hipz_reg_rpage_mr failed,
h_ret=%lx "
+ "e_mr=%p i=%x hca_hndl=%lx
mr_hndl=%lx"
+ " lkey=%x", h_ret, e_mr, i,
shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle,
e_mr->ib.ib_mr.lkey);
@@ -1228,8 +1078,8 @@ int ehca_reg_mr_rpages(struct ehca_shca
} else
ret = 0;
} else if (h_ret != H_PAGE_REGISTERED) {
- EDEB_ERR(4, "hipz_reg_rpage_mr failed, h_ret=%lx "
- "e_mr=%p i=%x lkey=%x hca_hndl=%lx "
+ ehca_err(&shca->ib_device, "hipz_reg_rpage_mr
failed, "
+ "h_ret=%lx e_mr=%p i=%x lkey=%x
hca_hndl=%lx "
"mr_hndl=%lx", h_ret, e_mr, i,
e_mr->ib.ib_mr.lkey,
shca->ipz_hca_handle.handle,
@@ -1245,11 +1095,9 @@ ehca_reg_mr_rpages_exit1:
kfree(kpage);
ehca_reg_mr_rpages_exit0:
if (ret)
- EDEB_EX(4, "ret=%x shca=%p e_mr=%p pginfo=%p num_pages=%lx
"
- "num_4k=%lx", ret, shca, e_mr, pginfo,
- pginfo->num_pages, pginfo->num_4k);
- else
- EDEB_EX(7, "ret=%x", ret);
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p
pginfo=%p "
+ "num_pages=%lx num_4k=%lx", ret, shca, e_mr,
pginfo,
+ pginfo->num_pages, pginfo->num_4k);
return ret;
} /* end ehca_reg_mr_rpages() */
@@ -1265,25 +1113,20 @@ inline int ehca_rereg_mr_rereg1(struct e
u32 *lkey, /*OUT*/
u32 *rkey) /*OUT*/
{
- int ret = 0;
- u64 h_ret = H_SUCCESS;
- u32 hipz_acl = 0;
- u64 *kpage = NULL;
- u64 rpage = 0;
+ int ret;
+ u64 h_ret;
+ u32 hipz_acl;
+ u64 *kpage;
+ u64 rpage;
struct ehca_mr_pginfo pginfo_save;
struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
- EDEB_EN(7, "shca=%p e_mr=%p iova_start=%p size=%lx acl=%x "
- "e_pd=%p pginfo=%p num_pages=%lx num_4k=%lx", shca, e_mr,
- iova_start, size, acl, e_pd, pginfo, pginfo->num_pages,
- pginfo->num_4k);
-
ehca_mrmw_map_acl(acl, &hipz_acl);
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (!kpage) {
- EDEB_ERR(4, "kpage alloc failed");
+ ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM;
goto ehca_rereg_mr_rereg1_exit0;
}
@@ -1291,14 +1134,15 @@ inline int ehca_rereg_mr_rereg1(struct e
pginfo_save = *pginfo;
ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
if (ret) {
- EDEB_ERR(4, "set pagebuf failed, e_mr=%p pginfo=%p type=%x
"
- "num_pages=%lx num_4k=%lx kpage=%p", e_mr,
pginfo,
- pginfo->type, pginfo->num_pages,
pginfo->num_4k,kpage);
+ ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
+ "pginfo=%p type=%x num_pages=%lx num_4k=%lx
kpage=%p",
+ e_mr, pginfo, pginfo->type, pginfo->num_pages,
+ pginfo->num_4k,kpage);
goto ehca_rereg_mr_rereg1_exit1;
}
rpage = virt_to_abs(kpage);
if (!rpage) {
- EDEB_ERR(4, "kpage=%p", kpage);
+ ehca_err(&shca->ib_device, "kpage=%p", kpage);
ret = -EFAULT;
goto ehca_rereg_mr_rereg1_exit1;
}
@@ -1311,13 +1155,13 @@ inline int ehca_rereg_mr_rereg1(struct e
* e.g. this is required in case H_MR_CONDITION
* (MW bound or MR is shared)
*/
- EDEB(6, "hipz_h_reregister_pmr failed (Rereg1), h_ret=%lx
"
- "e_mr=%p", h_ret, e_mr);
+ ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed
"
+ "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
*pginfo = pginfo_save;
ret = -EAGAIN;
} else if ((u64*)hipzout.vaddr != iova_start) {
- EDEB_ERR(4, "PHYP changed iova_start in rereg_pmr, "
- "iova_start=%p iova_start_out=%lx e_mr=%p "
+ ehca_err(&shca->ib_device, "PHYP changed iova_start in "
+ "rereg_pmr, iova_start=%p iova_start_out=%lx
e_mr=%p "
"mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
e_mr->ib.ib_mr.lkey, hipzout.lkey);
@@ -1340,13 +1184,10 @@ ehca_rereg_mr_rereg1_exit1:
kfree(kpage);
ehca_rereg_mr_rereg1_exit0:
if ( ret && (ret != -EAGAIN) )
- EDEB_EX(4, "ret=%x h_ret=%lx lkey=%x rkey=%x pginfo=%p "
- "num_pages=%lx num_4k=%lx", ret, h_ret, *lkey,
*rkey,
- pginfo, pginfo->num_pages, pginfo->num_4k);
- else
- EDEB_EX(7, "ret=%x h_ret=%lx lkey=%x rkey=%x pginfo=%p "
- "num_pages=%lx num_4k=%lx", ret, h_ret, *lkey,
*rkey,
- pginfo, pginfo->num_pages, pginfo->num_4k);
+ ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
+ "pginfo=%p num_pages=%lx num_4k=%lx",
+ ret, *lkey, *rkey, pginfo, pginfo->num_pages,
+ pginfo->num_4k);
return ret;
} /* end ehca_rereg_mr_rereg1() */
@@ -1363,20 +1204,15 @@ int ehca_rereg_mr(struct ehca_shca *shca
u32 *rkey)
{
int ret = 0;
- u64 h_ret = H_SUCCESS;
+ u64 h_ret;
int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration
*/
- EDEB_EN(7, "shca=%p e_mr=%p iova_start=%p size=%lx acl=%x "
- "e_pd=%p pginfo=%p num_pages=%lx num_4k=%lx", shca, e_mr,
- iova_start, size, acl, e_pd, pginfo, pginfo->num_pages,
- pginfo->num_4k);
-
/* first determine reregistration hCall(s) */
if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
(pginfo->num_4k > e_mr->num_4k)) {
- EDEB(7, "Rereg3 case, pginfo->num_4k=%lx "
- "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
+ ehca_dbg(&shca->ib_device, "Rereg3 case,
pginfo->num_4k=%lx "
+ "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
rereg_1_hcall = 0;
rereg_3_hcall = 1;
}
@@ -1385,7 +1221,8 @@ int ehca_rereg_mr(struct ehca_shca *shca
rereg_1_hcall = 0;
rereg_3_hcall = 1;
e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
- EDEB(4, "Rereg MR for max-MR! e_mr=%p", e_mr);
+ ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
+ e_mr);
}
if (rereg_1_hcall) {
@@ -1405,8 +1242,9 @@ int ehca_rereg_mr(struct ehca_shca *shca
/* first deregister old MR */
h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle,
e_mr);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx
e_mr=%p "
- "hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+ ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+ "h_ret=%lx e_mr=%p hca_hndl=%lx
mr_hndl=%lx "
+ "mr->lkey=%x",
h_ret, e_mr, shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle,
e_mr->ib.ib_mr.lkey);
@@ -1436,18 +1274,12 @@ int ehca_rereg_mr(struct ehca_shca *shca
ehca_rereg_mr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x shca=%p e_mr=%p iova_start=%p size=%lx
"
- "acl=%x e_pd=%p pginfo=%p num_pages=%lx lkey=%x
rkey=%x"
- " rereg_1_hcall=%x rereg_3_hcall=%x", ret, shca,
e_mr,
- iova_start, size, acl, e_pd, pginfo,
pginfo->num_pages,
- *lkey, *rkey, rereg_1_hcall, rereg_3_hcall);
- else
- EDEB_EX(7, "ret=%x shca=%p e_mr=%p iova_start=%p size=%lx
"
- "acl=%x e_pd=%p pginfo=%p num_pages=%lx lkey=%x
rkey=%x"
- " rereg_1_hcall=%x rereg_3_hcall=%x", ret, shca,
e_mr,
- iova_start, size, acl, e_pd, pginfo,
pginfo->num_pages,
- *lkey, *rkey, rereg_1_hcall, rereg_3_hcall);
-
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+ "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p
"
+ "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+ "rereg_3_hcall=%x", ret, shca, e_mr, iova_start,
size,
+ acl, e_pd, pginfo, pginfo->num_pages, *lkey,
*rkey,
+ rereg_1_hcall, rereg_3_hcall);
return ret;
} /* end ehca_rereg_mr() */
@@ -1457,26 +1289,22 @@ int ehca_unmap_one_fmr(struct ehca_shca
struct ehca_mr *e_fmr)
{
int ret = 0;
- u64 h_ret = H_SUCCESS;
+ u64 h_ret;
int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
- struct ehca_pd *e_pd = NULL;
+ struct ehca_pd *e_pd =
+ container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
struct ehca_mr save_fmr;
- u32 tmp_lkey = 0;
- u32 tmp_rkey = 0;
+ u32 tmp_lkey, tmp_rkey;
struct ehca_mr_pginfo
pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
- EDEB_EN(7, "shca=%p e_fmr=%p", shca, e_fmr);
-
/* first check if reregistration hCall can be used for unmap */
if (e_fmr->fmr_max_pages > 512) {
rereg_1_hcall = 0;
rereg_3_hcall = 1;
}
- e_pd = container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
-
if (rereg_1_hcall) {
/*
* note: after using rereg hcall with len=0,
@@ -1489,10 +1317,10 @@ int ehca_unmap_one_fmr(struct ehca_shca
* should not happen, because length checked
above,
* FMRs are not shared and no MW bound to FMRs
*/
- EDEB_ERR(4, "hipz_reregister_pmr failed (Rereg1),
"
- "h_ret=%lx e_fmr=%p hca_hndl=%lx
mr_hndl=%lx "
- "lkey=%x lkey_out=%x", h_ret, e_fmr,
- shca->ipz_hca_handle.handle,
+ ehca_err(&shca->ib_device, "hipz_reregister_pmr
failed "
+ "(Rereg1), h_ret=%lx e_fmr=%p
hca_hndl=%lx "
+ "mr_hndl=%lx lkey=%x lkey_out=%x",
+ h_ret, e_fmr,
shca->ipz_hca_handle.handle,
e_fmr->ipz_mr_handle.handle,
e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
rereg_3_hcall = 1;
@@ -1511,9 +1339,10 @@ int ehca_unmap_one_fmr(struct ehca_shca
/* first free old FMR */
h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle,
e_fmr);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx
e_fmr=%p "
- "hca_hndl=%lx mr_hndl=%lx lkey=%x",
h_ret,
- e_fmr, shca->ipz_hca_handle.handle,
+ ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+ "h_ret=%lx e_fmr=%p hca_hndl=%lx
mr_hndl=%lx "
+ "lkey=%x",
+ h_ret, e_fmr,
shca->ipz_hca_handle.handle,
e_fmr->ipz_mr_handle.handle,
e_fmr->ib.ib_fmr.lkey);
ret = ehca_mrmw_map_hrc_free_mr(h_ret);
@@ -1547,9 +1376,11 @@ int ehca_unmap_one_fmr(struct ehca_shca
}
ehca_unmap_one_fmr_exit0:
- EDEB_EX(7, "ret=%x tmp_lkey=%x tmp_rkey=%x fmr_max_pages=%x "
- "rereg_1_hcall=%x rereg_3_hcall=%x", ret, tmp_lkey,
tmp_rkey,
- e_fmr->fmr_max_pages, rereg_1_hcall, rereg_3_hcall);
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x
"
+ "fmr_max_pages=%x rereg_1_hcall=%x
rereg_3_hcall=%x",
+ ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
+ rereg_1_hcall, rereg_3_hcall);
return ret;
} /* end ehca_unmap_one_fmr() */
@@ -1565,13 +1396,10 @@ int ehca_reg_smr(struct ehca_shca *shca,
u32 *rkey) /*OUT*/
{
int ret = 0;
- u64 h_ret = H_SUCCESS;
- u32 hipz_acl = 0;
+ u64 h_ret;
+ u32 hipz_acl;
struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
- EDEB_EN(7,"shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x
e_pd=%p",
- shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-
ehca_mrmw_map_acl(acl, &hipz_acl);
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
@@ -1579,10 +1407,11 @@ int ehca_reg_smr(struct ehca_shca *shca,
(u64)iova_start, hipz_acl,
e_pd->fw_pd,
&hipzout);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_reg_smr failed, h_ret=%lx shca=%p
e_origmr=%p"
- " e_newmr=%p iova_start=%p acl=%x e_pd=%p
hca_hndl=%lx"
- " mr_hndl=%lx lkey=%x", h_ret, shca, e_origmr,
e_newmr,
- iova_start, acl, e_pd,
shca->ipz_hca_handle.handle,
+ ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx
"
+ "shca=%p e_origmr=%p e_newmr=%p iova_start=%p
acl=%x "
+ "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ h_ret, shca, e_origmr, e_newmr, iova_start, acl,
e_pd,
+ shca->ipz_hca_handle.handle,
e_origmr->ipz_mr_handle.handle,
e_origmr->ib.ib_mr.lkey);
ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
@@ -1597,15 +1426,13 @@ int ehca_reg_smr(struct ehca_shca *shca,
e_newmr->ipz_mr_handle = hipzout.handle;
*lkey = hipzout.lkey;
*rkey = hipzout.rkey;
- goto ehca_reg_smr_exit0;
+ return 0;
ehca_reg_smr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x shca=%p e_origmr=%p e_newmr=%p "
- "iova_start=%p acl=%x e_pd=%p",
- ret, shca, e_origmr, e_newmr, iova_start, acl,
e_pd);
- else
- EDEB_EX(7, "ret=%x lkey=%x rkey=%x", ret, *lkey, *rkey);
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
+ "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
+ ret, shca, e_origmr, e_newmr, iova_start, acl,
e_pd);
return ret;
} /* end ehca_reg_smr() */
@@ -1617,27 +1444,18 @@ int ehca_reg_internal_maxmr(
struct ehca_pd *e_pd,
struct ehca_mr **e_maxmr) /*OUT*/
{
- int ret = 0;
- struct ehca_mr *e_mr = NULL;
- u64 *iova_start = NULL;
- u64 size_maxmr = 0;
+ int ret;
+ struct ehca_mr *e_mr;
+ u64 *iova_start;
+ u64 size_maxmr;
struct ehca_mr_pginfo
pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
struct ib_phys_buf ib_pbuf;
- u32 num_pages_mr = 0;
- u32 num_pages_4k = 0; /* 4k portion "pages" */
-
- EDEB_EN(7, "shca=%p e_pd=%p e_maxmr=%p", shca, e_pd, e_maxmr);
-
- if (ehca_adr_bad(shca) || ehca_adr_bad(e_pd) ||
ehca_adr_bad(e_maxmr)) {
- EDEB_ERR(4, "bad input values: shca=%p e_pd=%p
e_maxmr=%p",
- shca, e_pd, e_maxmr);
- ret = -EINVAL;
- goto ehca_reg_internal_maxmr_exit0;
- }
+ u32 num_pages_mr;
+ u32 num_pages_4k; /* 4k portion "pages" */
e_mr = ehca_mr_new();
if (!e_mr) {
- EDEB_ERR(4, "out of memory");
+ ehca_err(&shca->ib_device, "out of memory");
ret = -ENOMEM;
goto ehca_reg_internal_maxmr_exit0;
}
@@ -1645,7 +1463,6 @@ int ehca_reg_internal_maxmr(
/* register internal max-MR on HCA */
size_maxmr = (u64)high_memory - PAGE_OFFSET;
- EDEB(7, "high_memory=%p PAGE_OFFSET=%lx", high_memory,
PAGE_OFFSET);
iova_start = (u64*)KERNELBASE;
ib_pbuf.addr = 0;
ib_pbuf.size = size_maxmr;
@@ -1664,8 +1481,8 @@ int ehca_reg_internal_maxmr(
&pginfo, &e_mr->ib.ib_mr.lkey,
&e_mr->ib.ib_mr.rkey);
if (ret) {
- EDEB_ERR(4, "reg of internal max MR failed, e_mr=%p "
- "iova_start=%p size_maxmr=%lx num_pages_mr=%x "
+ ehca_err(&shca->ib_device, "reg of internal max MR failed,
"
+ "e_mr=%p iova_start=%p size_maxmr=%lx
num_pages_mr=%x "
"num_pages_4k=%x", e_mr, iova_start, size_maxmr,
num_pages_mr, num_pages_4k);
goto ehca_reg_internal_maxmr_exit1;
@@ -1678,18 +1495,14 @@ int ehca_reg_internal_maxmr(
atomic_inc(&(e_pd->ib_pd.usecnt));
atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
*e_maxmr = e_mr;
- goto ehca_reg_internal_maxmr_exit0;
+ return 0;
ehca_reg_internal_maxmr_exit1:
ehca_mr_delete(e_mr);
ehca_reg_internal_maxmr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
- ret, shca, e_pd, e_maxmr);
- else
- EDEB_EX(7, "*e_maxmr=%p lkey=%x rkey=%x",
- *e_maxmr, (*e_maxmr)->ib.ib_mr.lkey,
- (*e_maxmr)->ib.ib_mr.rkey);
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p
e_maxmr=%p",
+ ret, shca, e_pd, e_maxmr);
return ret;
} /* end ehca_reg_internal_maxmr() */
@@ -1703,15 +1516,11 @@ int ehca_reg_maxmr(struct ehca_shca *shc
u32 *lkey,
u32 *rkey)
{
- int ret = 0;
- u64 h_ret = H_SUCCESS;
+ u64 h_ret;
struct ehca_mr *e_origmr = shca->maxmr;
- u32 hipz_acl = 0;
+ u32 hipz_acl;
struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
- EDEB_EN(7,"shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x
e_pd=%p",
- shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-
ehca_mrmw_map_acl(acl, &hipz_acl);
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
@@ -1719,13 +1528,12 @@ int ehca_reg_maxmr(struct ehca_shca *shc
(u64)iova_start, hipz_acl,
e_pd->fw_pd,
&hipzout);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_reg_smr failed, h_ret=%lx e_origmr=%p "
- "hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx
"
+ "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
h_ret, e_origmr, shca->ipz_hca_handle.handle,
e_origmr->ipz_mr_handle.handle,
e_origmr->ib.ib_mr.lkey);
- ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
- goto ehca_reg_maxmr_exit0;
+ return ehca_mrmw_map_hrc_reg_smr(h_ret);
}
/* successful registration */
e_newmr->num_pages = e_origmr->num_pages;
@@ -1736,24 +1544,19 @@ int ehca_reg_maxmr(struct ehca_shca *shc
e_newmr->ipz_mr_handle = hipzout.handle;
*lkey = hipzout.lkey;
*rkey = hipzout.rkey;
-
-ehca_reg_maxmr_exit0:
- EDEB_EX(7, "ret=%x lkey=%x rkey=%x", ret, *lkey, *rkey);
- return ret;
+ return 0;
} /* end ehca_reg_maxmr() */
/*----------------------------------------------------------------------*/
int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
{
- int ret = 0;
- struct ehca_mr *e_maxmr = NULL;
- struct ib_pd *ib_pd = NULL;
-
- EDEB_EN(7, "shca=%p shca->maxmr=%p", shca, shca->maxmr);
+ int ret;
+ struct ehca_mr *e_maxmr;
+ struct ib_pd *ib_pd;
if (!shca->maxmr) {
- EDEB_ERR(4, "bad call, shca=%p", shca);
+ ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
ret = -EINVAL;
goto ehca_dereg_internal_maxmr_exit0;
}
@@ -1764,7 +1567,7 @@ int ehca_dereg_internal_maxmr(struct ehc
ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
if (ret) {
- EDEB_ERR(3, "dereg internal max-MR failed, "
+ ehca_err(&shca->ib_device, "dereg internal max-MR failed,
"
"ret=%x e_maxmr=%p shca=%p lkey=%x",
ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
shca->maxmr = e_maxmr;
@@ -1775,10 +1578,8 @@ int ehca_dereg_internal_maxmr(struct ehc
ehca_dereg_internal_maxmr_exit0:
if (ret)
- EDEB_EX(4, "ret=%x shca=%p shca->maxmr=%p",
- ret, shca, shca->maxmr);
- else
- EDEB_EX(7, "");
+ ehca_err(&shca->ib_device, "ret=%x shca=%p
shca->maxmr=%p",
+ ret, shca, shca->maxmr);
return ret;
} /* end ehca_dereg_internal_maxmr() */
@@ -1798,34 +1599,35 @@ int ehca_mr_chk_buf_and_calc_size(struct
u32 i;
if (num_phys_buf == 0) {
- EDEB_ERR(4, "bad phys buf array len, num_phys_buf=0");
+ ehca_gen_err("bad phys buf array len, num_phys_buf=0");
return -EINVAL;
}
/* check first buffer */
if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
- EDEB_ERR(4, "iova_start/addr mismatch, iova_start=%p "
- "pbuf->addr=%lx pbuf->size=%lx",
- iova_start, pbuf->addr, pbuf->size);
+ ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
+ "pbuf->addr=%lx pbuf->size=%lx",
+ iova_start, pbuf->addr, pbuf->size);
return -EINVAL;
}
if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
(num_phys_buf > 1)) {
- EDEB_ERR(4, "addr/size mismatch in 1st buf, pbuf->addr=%lx
"
- "pbuf->size=%lx", pbuf->addr, pbuf->size);
+ ehca_gen_err("addr/size mismatch in 1st buf,
pbuf->addr=%lx "
+ "pbuf->size=%lx", pbuf->addr, pbuf->size);
return -EINVAL;
}
for (i = 0; i < num_phys_buf; i++) {
if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
- EDEB_ERR(4, "bad address, i=%x pbuf->addr=%lx "
- "pbuf->size=%lx", i, pbuf->addr,
pbuf->size);
+ ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
+ "pbuf->size=%lx",
+ i, pbuf->addr, pbuf->size);
return -EINVAL;
}
if (((i > 0) && /* not 1st */
(i < (num_phys_buf - 1)) && /* not last */
(pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
- EDEB_ERR(4, "bad size, i=%x pbuf->size=%lx",
- i, pbuf->size);
+ ehca_gen_err("bad size, i=%x pbuf->size=%lx",
+ i, pbuf->size);
return -EINVAL;
}
size_count += pbuf->size;
@@ -1844,17 +1646,12 @@ int ehca_fmr_check_page_list(struct ehca
int list_len)
{
u32 i;
- u64 *page = NULL;
-
- if (ehca_adr_bad(page_list)) {
- EDEB_ERR(4, "bad page_list, page_list=%p fmr=%p",
- page_list, e_fmr);
- return -EINVAL;
- }
+ u64 *page;
if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
- EDEB_ERR(4, "bad list_len, list_len=%x
e_fmr->fmr_max_pages=%x "
- "fmr=%p", list_len, e_fmr->fmr_max_pages, e_fmr);
+ ehca_gen_err("bad list_len, list_len=%x "
+ "e_fmr->fmr_max_pages=%x fmr=%p",
+ list_len, e_fmr->fmr_max_pages, e_fmr);
return -EINVAL;
}
@@ -1862,9 +1659,9 @@ int ehca_fmr_check_page_list(struct ehca
page = page_list;
for (i = 0; i < list_len; i++) {
if (*page % e_fmr->fmr_page_size) {
- EDEB_ERR(4, "bad page, i=%x *page=%lx page=%p "
- "fmr=%p fmr_page_size=%x",
- i, *page, page, e_fmr,
e_fmr->fmr_page_size);
+ ehca_gen_err("bad page, i=%x *page=%lx page=%p
fmr=%p "
+ "fmr_page_size=%x", i, *page, page,
e_fmr,
+ e_fmr->fmr_page_size);
return -EINVAL;
}
page++;
@@ -1882,24 +1679,14 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
u64 *kpage)
{
int ret = 0;
- struct ib_umem_chunk *prev_chunk = NULL;
- struct ib_umem_chunk *chunk = NULL;
- struct ib_phys_buf *pbuf = NULL;
- u64 *fmrlist = NULL;
- u64 num4k = 0;
- u64 pgaddr = 0;
- u64 offs4k = 0;
+ struct ib_umem_chunk *prev_chunk;
+ struct ib_umem_chunk *chunk;
+ struct ib_phys_buf *pbuf;
+ u64 *fmrlist;
+ u64 num4k, pgaddr, offs4k;
u32 i = 0;
u32 j = 0;
- EDEB_EN(7, "pginfo=%p type=%x num_pages=%lx num_4k=%lx
next_buf=%lx "
- "next_4k=%lx number=%x kpage=%p page_cnt=%lx
page_4k_cnt=%lx "
- "next_listelem=%lx region=%p next_chunk=%p next_nmap=%lx",
- pginfo, pginfo->type, pginfo->num_pages, pginfo->num_4k,
- pginfo->next_buf, pginfo->next_4k, number, kpage,
- pginfo->page_cnt, pginfo->page_4k_cnt,
pginfo->next_listelem,
- pginfo->region, pginfo->next_chunk, pginfo->next_nmap);
-
if (pginfo->type == EHCA_MR_PGI_PHYS) {
/* loop over desired phys_buf_array entries */
while (i < number) {
@@ -1911,23 +1698,27 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
/* sanity check */
if ((pginfo->page_cnt >=
pginfo->num_pages) ||
(pginfo->page_4k_cnt >=
pginfo->num_4k)) {
- EDEB_ERR(4, "page_cnt >=
num_pages, "
- "page_cnt=%lx
num_pages=%lx "
- "page_4k_cnt=%lx
num_4k=%lx "
- "i=%x", pginfo->page_cnt,
- pginfo->num_pages,
- pginfo->page_4k_cnt,
- pginfo->num_4k, i);
+ ehca_gen_err("page_cnt >=
num_pages, "
+ "page_cnt=%lx "
+ "num_pages=%lx "
+ "page_4k_cnt=%lx "
+ "num_4k=%lx i=%x",
+ pginfo->page_cnt,
+ pginfo->num_pages,
+ pginfo->page_4k_cnt,
+ pginfo->num_4k, i);
ret = -EFAULT;
+ goto ehca_set_pagebuf_exit0;
}
*kpage = phys_to_abs(
(pbuf->addr & EHCA_PAGEMASK)
+ (pginfo->next_4k *
EHCA_PAGESIZE));
if ( !(*kpage) && pbuf->addr ) {
- EDEB_ERR(4, "pbuf->addr=%lx "
- "pbuf->size=%lx
next_4k=%lx",
- pbuf->addr, pbuf->size,
- pginfo->next_4k);
+ ehca_gen_err("pbuf->addr=%lx "
+ "pbuf->size=%lx "
+ "next_4k=%lx",
pbuf->addr,
+ pbuf->size,
+ pginfo->next_4k);
ret = -EFAULT;
goto ehca_set_pagebuf_exit0;
}
@@ -1952,23 +1743,21 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
list_for_each_entry_continue(chunk,
(&(pginfo->region->chunk_list)),
list) {
- EDEB(9, "chunk->page_list[0]=%lx",
- (u64)sg_dma_address(&chunk->page_list[0]));
for (i = pginfo->next_nmap; i < chunk->nmap; ) {
pgaddr = (
page_to_pfn(chunk->page_list[i].page)
<< PAGE_SHIFT );
*kpage = phys_to_abs(pgaddr +
(pginfo->next_4k *
EHCA_PAGESIZE));
- EDEB(9,"pgaddr=%lx *kpage=%lx
next_4k=%lx",
- pgaddr, *kpage, pginfo->next_4k);
if ( !(*kpage) ) {
- EDEB_ERR(4, "pgaddr=%lx "
- "chunk->page_list[i]=%lx
i=%x "
- "next_4k=%lx mr=%p",
pgaddr,
- (u64)sg_dma_address(
- &chunk->page_list[i]),
- i, pginfo->next_4k,
e_mr);
+ ehca_gen_err("pgaddr=%lx "
+ "chunk->page_list[i]=%lx "
+ "i=%x next_4k=%lx
mr=%p",
+ pgaddr,
+ (u64)sg_dma_address(
+ &chunk->
+ page_list[i]),
+ i, pginfo->next_4k,
e_mr);
ret = -EFAULT;
goto ehca_set_pagebuf_exit0;
}
@@ -2009,10 +1798,11 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
*kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
pginfo->next_4k *
EHCA_PAGESIZE);
if ( !(*kpage) ) {
- EDEB_ERR(4, "*fmrlist=%lx fmrlist=%p "
- "next_listelem=%lx next_4k=%lx",
- *fmrlist, fmrlist,
- pginfo->next_listelem,pginfo->next_4k);
+ ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+ "next_listelem=%lx
next_4k=%lx",
+ *fmrlist, fmrlist,
+ pginfo->next_listelem,
+ pginfo->next_4k);
ret = -EFAULT;
goto ehca_set_pagebuf_exit0;
}
@@ -2028,32 +1818,23 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
}
}
} else {
- EDEB_ERR(4, "bad pginfo->type=%x", pginfo->type);
+ ehca_gen_err("bad pginfo->type=%x", pginfo->type);
ret = -EFAULT;
goto ehca_set_pagebuf_exit0;
}
ehca_set_pagebuf_exit0:
if (ret)
- EDEB_EX(4, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx
"
- "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
- "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
- "next_listelem=%lx region=%p next_chunk=%p "
- "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
- pginfo->num_pages, pginfo->num_4k,
pginfo->next_buf,
- pginfo->next_4k, number, kpage, pginfo->page_cnt,
- pginfo->page_4k_cnt, i, pginfo->next_listelem,
- pginfo->region, pginfo->next_chunk,
pginfo->next_nmap);
- else
- EDEB_EX(7, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx
"
- "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
- "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
- "next_listelem=%lx region=%p next_chunk=%p "
- "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
- pginfo->num_pages, pginfo->num_4k,
pginfo->next_buf,
- pginfo->next_4k, number, kpage, pginfo->page_cnt,
- pginfo->page_4k_cnt, i, pginfo->next_listelem,
- pginfo->region, pginfo->next_chunk,
pginfo->next_nmap);
+ ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x
num_pages=%lx "
+ "num_4k=%lx next_buf=%lx next_4k=%lx
number=%x "
+ "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
+ "next_listelem=%lx region=%p next_chunk=%p "
+ "next_nmap=%lx", ret, e_mr, pginfo,
pginfo->type,
+ pginfo->num_pages, pginfo->num_4k,
+ pginfo->next_buf, pginfo->next_4k, number,
kpage,
+ pginfo->page_cnt, pginfo->page_4k_cnt, i,
+ pginfo->next_listelem, pginfo->region,
+ pginfo->next_chunk, pginfo->next_nmap);
return ret;
} /* end ehca_set_pagebuf() */
@@ -2065,30 +1846,20 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
u64 *rpage)
{
int ret = 0;
- struct ib_phys_buf *tmp_pbuf = NULL;
- u64 *fmrlist = NULL;
- struct ib_umem_chunk *chunk = NULL;
- struct ib_umem_chunk *prev_chunk = NULL;
- u64 pgaddr = 0;
- u64 num4k = 0;
- u64 offs4k = 0;
-
- EDEB_EN(7, "pginfo=%p type=%x num_pages=%lx num_4k=%lx
next_buf=%lx "
- "next_4k=%lx rpage=%p page_cnt=%lx page_4k_cnt=%lx "
- "next_listelem=%lx region=%p next_chunk=%p next_nmap=%lx",
- pginfo, pginfo->type, pginfo->num_pages, pginfo->num_4k,
- pginfo->next_buf, pginfo->next_4k, rpage,
pginfo->page_cnt,
- pginfo->page_4k_cnt, pginfo->next_listelem,
pginfo->region,
- pginfo->next_chunk, pginfo->next_nmap);
+ struct ib_phys_buf *tmp_pbuf;
+ u64 *fmrlist;
+ struct ib_umem_chunk *chunk;
+ struct ib_umem_chunk *prev_chunk;
+ u64 pgaddr, num4k, offs4k;
if (pginfo->type == EHCA_MR_PGI_PHYS) {
/* sanity check */
if ((pginfo->page_cnt >= pginfo->num_pages) ||
(pginfo->page_4k_cnt >= pginfo->num_4k)) {
- EDEB_ERR(4, "page_cnt >= num_pages, page_cnt=%lx "
- "num_pages=%lx page_4k_cnt=%lx
num_4k=%lx",
- pginfo->page_cnt, pginfo->num_pages,
- pginfo->page_4k_cnt, pginfo->num_4k);
+ ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx
"
+ "num_pages=%lx page_4k_cnt=%lx
num_4k=%lx",
+ pginfo->page_cnt, pginfo->num_pages,
+ pginfo->page_4k_cnt, pginfo->num_4k);
ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0;
}
@@ -2099,10 +1870,10 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
*rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
(pginfo->next_4k * EHCA_PAGESIZE));
if ( !(*rpage) && tmp_pbuf->addr ) {
- EDEB_ERR(4, "tmp_pbuf->addr=%lx"
- " tmp_pbuf->size=%lx next_4k=%lx",
- tmp_pbuf->addr, tmp_pbuf->size,
- pginfo->next_4k);
+ ehca_gen_err("tmp_pbuf->addr=%lx"
+ " tmp_pbuf->size=%lx next_4k=%lx",
+ tmp_pbuf->addr, tmp_pbuf->size,
+ pginfo->next_4k);
ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0;
}
@@ -2125,16 +1896,15 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
<< PAGE_SHIFT);
*rpage = phys_to_abs(pgaddr +
(pginfo->next_4k *
EHCA_PAGESIZE));
- EDEB(9,"pgaddr=%lx *rpage=%lx next_4k=%lx",
pgaddr,
- *rpage, pginfo->next_4k);
if ( !(*rpage) ) {
- EDEB_ERR(4, "pgaddr=%lx
chunk->page_list[]=%lx "
- "next_nmap=%lx next_4k=%lx
mr=%p",
- pgaddr, (u64)sg_dma_address(
- &chunk->page_list[
- pginfo->next_nmap]),
- pginfo->next_nmap,
pginfo->next_4k,
- e_mr);
+ ehca_gen_err("pgaddr=%lx
chunk->page_list[]=%lx"
+ " next_nmap=%lx next_4k=%lx
mr=%p",
+ pgaddr, (u64)sg_dma_address(
+ &chunk->page_list[
+ pginfo->
+ next_nmap]),
+ pginfo->next_nmap,
pginfo->next_4k,
+ e_mr);
ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0;
}
@@ -2161,9 +1931,10 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
*rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
pginfo->next_4k * EHCA_PAGESIZE);
if ( !(*rpage) ) {
- EDEB_ERR(4, "*fmrlist=%lx fmrlist=%p
next_listelem=%lx "
- "next_4k=%lx", *fmrlist, fmrlist,
- pginfo->next_listelem, pginfo->next_4k);
+ ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+ "next_listelem=%lx next_4k=%lx",
+ *fmrlist, fmrlist,
pginfo->next_listelem,
+ pginfo->next_4k);
ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0;
}
@@ -2176,32 +1947,22 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
pginfo->next_4k = 0;
}
} else {
- EDEB_ERR(4, "bad pginfo->type=%x", pginfo->type);
+ ehca_gen_err("bad pginfo->type=%x", pginfo->type);
ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0;
}
ehca_set_pagebuf_1_exit0:
if (ret)
- EDEB_EX(4, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx
"
- "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
- "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
- "region=%p next_chunk=%p next_nmap=%lx", ret,
e_mr,
- pginfo, pginfo->type, pginfo->num_pages,
pginfo->num_4k,
- pginfo->next_buf, pginfo->next_4k, rpage,
- pginfo->page_cnt, pginfo->page_4k_cnt,
- pginfo->next_listelem, pginfo->region,
- pginfo->next_chunk, pginfo->next_nmap);
- else
- EDEB_EX(7, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx
"
- "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
- "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
- "region=%p next_chunk=%p next_nmap=%lx", ret,
e_mr,
- pginfo, pginfo->type, pginfo->num_pages,
pginfo->num_4k,
- pginfo->next_buf, pginfo->next_4k, rpage,
- pginfo->page_cnt, pginfo->page_4k_cnt,
- pginfo->next_listelem, pginfo->region,
- pginfo->next_chunk, pginfo->next_nmap);
+ ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x
num_pages=%lx "
+ "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p
"
+ "page_cnt=%lx page_4k_cnt=%lx
next_listelem=%lx "
+ "region=%p next_chunk=%p next_nmap=%lx", ret,
e_mr,
+ pginfo, pginfo->type, pginfo->num_pages,
+ pginfo->num_4k, pginfo->next_buf,
pginfo->next_4k,
+ rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
+ pginfo->next_listelem, pginfo->region,
+ pginfo->next_chunk, pginfo->next_nmap);
return ret;
} /* end ehca_set_pagebuf_1() */
@@ -2217,7 +1978,7 @@ int ehca_mr_is_maxmr(u64 size,
/* a MR is treated as max-MR only if it fits following: */
if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
(iova_start == (void*)KERNELBASE)) {
- EDEB(6, "this is a max-MR");
+ ehca_gen_dbg("this is a max-MR");
return 1;
} else
return 0;
@@ -2470,3 +2231,31 @@ void ehca_mr_deletenew(struct ehca_mr *m
mr->nr_of_pages = 0;
mr->pagearray = NULL;
} /* end ehca_mr_deletenew() */
+
+int ehca_init_mrmw_cache(void)
+{
+ mr_cache = kmem_cache_create("ehca_cache_mr",
+ sizeof(struct ehca_mr), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!mr_cache)
+ return -ENOMEM;
+ mw_cache = kmem_cache_create("ehca_cache_mw",
+ sizeof(struct ehca_mw), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!mw_cache) {
+ kmem_cache_destroy(mr_cache);
+ mr_cache = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void ehca_cleanup_mrmw_cache(void)
+{
+ if (mr_cache)
+ kmem_cache_destroy(mr_cache);
+ if (mw_cache)
+ kmem_cache_destroy(mw_cache);
+}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_mrmw.h
linux-2.6/drivers/infiniband/hw/ehca/ehca_mrmw.h
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_mrmw.h 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_mrmw.h 2006-08-30
20:00:16.000000000 +0200
@@ -42,9 +42,6 @@
#ifndef _EHCA_MRMW_H_
#define _EHCA_MRMW_H_
-#undef DEB_PREFIX
-#define DEB_PREFIX "mrmw"
-
int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr,
u64 *iova_start,
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_pd.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_pd.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_pd.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_pd.c 2006-08-30
20:00:16.000000000 +0200
@@ -38,29 +38,22 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-
-#define DEB_PREFIX "vpd "
-
#include <asm/current.h>
#include "ehca_tools.h"
#include "ehca_iverbs.h"
+static struct kmem_cache *pd_cache;
+
struct ib_pd *ehca_alloc_pd(struct ib_device *device,
struct ib_ucontext *context, struct ib_udata
*udata)
{
- extern struct ehca_module ehca_module;
- struct ib_pd *mypd = NULL;
- struct ehca_pd *pd = NULL;
-
- EDEB_EN(7, "device=%p context=%p udata=%p", device, context,
udata);
+ struct ehca_pd *pd;
- EHCA_CHECK_DEVICE_P(device);
-
- pd = kmem_cache_alloc(ehca_module.cache_pd, SLAB_KERNEL);
+ pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
if (!pd) {
- EDEB_ERR(4, "ERROR device=%p context=%p pd=%p"
- " out of memory", device, context, mypd);
+ ehca_err(device, "device=%p context=%p out of memory",
+ device, context);
return ERR_PTR(-ENOMEM);
}
@@ -82,39 +75,40 @@ struct ib_pd *ehca_alloc_pd(struct ib_de
} else
pd->fw_pd.value = (u64)pd;
- mypd = &pd->ib_pd;
-
- EHCA_REGISTER_PD(device, pd);
-
- EDEB_EX(7, "device=%p context=%p pd=%p", device, context, mypd);
-
- return mypd;
+ return &pd->ib_pd;
}
int ehca_dealloc_pd(struct ib_pd *pd)
{
- extern struct ehca_module ehca_module;
- int ret = 0;
u32 cur_pid = current->tgid;
- struct ehca_pd *my_pd = NULL;
+ struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
- EDEB_EN(7, "pd=%p", pd);
-
- EHCA_CHECK_PD(pd);
- my_pd = container_of(pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
return -EINVAL;
}
- EHCA_DEREGISTER_PD(pd);
-
- kmem_cache_free(ehca_module.cache_pd,
+ kmem_cache_free(pd_cache,
container_of(pd, struct ehca_pd, ib_pd));
- EDEB_EX(7, "pd=%p", pd);
+ return 0;
+}
- return ret;
+int ehca_init_pd_cache(void)
+{
+ pd_cache = kmem_cache_create("ehca_cache_pd",
+ sizeof(struct ehca_pd), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!pd_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_pd_cache(void)
+{
+ if (pd_cache)
+ kmem_cache_destroy(pd_cache);
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_qp.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_qp.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_qp.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_qp.c 2006-08-30
20:00:16.000000000 +0200
@@ -42,8 +42,6 @@
*/
-#define DEB_PREFIX "e_qp"
-
#include <asm/current.h>
#include "ehca_classes.h"
@@ -53,6 +51,8 @@
#include "hcp_if.h"
#include "hipz_fns.h"
+static struct kmem_cache *qp_cache;
+
/*
* attributes not supported by query qp
*/
@@ -114,7 +114,7 @@ static inline enum ehca_qp_state ib2ehca
case IB_QPS_ERR:
return EHCA_QPS_ERR;
default:
- EDEB_ERR(4, "invalid ib_qp_state=%x", ib_qp_state);
+ ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
return -EINVAL;
}
}
@@ -142,7 +142,7 @@ static inline enum ib_qp_state ehca2ib_q
case EHCA_QPS_ERR:
return IB_QPS_ERR;
default:
- EDEB_ERR(4,"invalid ehca_qp_state=%x",ehca_qp_state);
+ ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
return -EINVAL;
}
}
@@ -176,7 +176,7 @@ static inline enum ehca_qp_type ib2ehcaq
case IB_QPT_UD:
return QPT_UD;
default:
- EDEB_ERR(4,"Invalid ibqptype=%x", ibqptype);
+ ehca_gen_err("Invalid ibqptype=%x", ibqptype);
return -EINVAL;
}
}
@@ -190,24 +190,34 @@ static inline enum ib_qp_statetrans get_
index = IB_QPST_ANY2RESET;
break;
case IB_QPS_INIT:
- if (ib_fromstate == IB_QPS_RESET)
+ switch (ib_fromstate) {
+ case IB_QPS_RESET:
index = IB_QPST_RESET2INIT;
- else if (ib_fromstate == IB_QPS_INIT)
+ break;
+ case IB_QPS_INIT:
index = IB_QPST_INIT2INIT;
+ break;
+ }
break;
case IB_QPS_RTR:
if (ib_fromstate == IB_QPS_INIT)
index = IB_QPST_INIT2RTR;
break;
case IB_QPS_RTS:
- if (ib_fromstate == IB_QPS_RTR)
+ switch (ib_fromstate) {
+ case IB_QPS_RTR:
index = IB_QPST_RTR2RTS;
- else if (ib_fromstate == IB_QPS_RTS)
+ break;
+ case IB_QPS_RTS:
index = IB_QPST_RTS2RTS;
- else if (ib_fromstate == IB_QPS_SQD)
+ break;
+ case IB_QPS_SQD:
index = IB_QPST_SQD2RTS;
- else if (ib_fromstate == IB_QPS_SQE)
+ break;
+ case IB_QPS_SQE:
index = IB_QPST_SQE2RTS;
+ break;
+ }
break;
case IB_QPS_SQD:
if (ib_fromstate == IB_QPS_RTS)
@@ -252,7 +262,7 @@ static inline int ibqptype2servicetype(e
case IB_QPT_RAW_ETY:
return -EINVAL;
default:
- EDEB_ERR(4, "Invalid ibqptype=%x", ibqptype);
+ ehca_gen_err("Invalid ibqptype=%x", ibqptype);
return -EINVAL;
}
}
@@ -260,7 +270,7 @@ static inline int ibqptype2servicetype(e
/*
* init_qp_queues initializes/constructs r/squeue and registers queue
pages.
*/
-static inline int init_qp_queues(struct ipz_adapter_handle
ipz_hca_handle,
+static inline int init_qp_queues(struct ehca_shca *shca,
struct ehca_qp *my_qp,
int nr_sq_pages,
int nr_rq_pages,
@@ -268,28 +278,26 @@ static inline int init_qp_queues(struct
int rwqe_size,
int nr_send_sges, int nr_receive_sges)
{
- int ret = -EINVAL;
- int cnt = 0;
- void *vpage = NULL;
- u64 rpage = 0;
- int ipz_rc = -1;
- u64 h_ret = H_PARAMETER;
+ int ret, cnt, ipz_rc;
+ void *vpage;
+ u64 rpage, h_ret;
+ struct ib_device *ib_dev = &shca->ib_device;
+ struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
nr_sq_pages,
EHCA_PAGESIZE, swqe_size, nr_send_sges);
if (!ipz_rc) {
- EDEB_ERR(4, "Cannot allocate page for squeue. ipz_rc=%x",
+ ehca_err(ib_dev,"Cannot allocate page for squeue.
ipz_rc=%x",
ipz_rc);
- ret = -EBUSY;
- return ret;
+ return -EBUSY;
}
ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
nr_rq_pages,
EHCA_PAGESIZE, rwqe_size,
nr_receive_sges);
if (!ipz_rc) {
- EDEB_ERR(4, "Cannot allocate page for rqueue. ipz_rc=%x",
+ ehca_err(ib_dev, "Cannot allocate page for rqueue.
ipz_rc=%x",
ipz_rc);
ret = -EBUSY;
goto init_qp_queues0;
@@ -298,7 +306,7 @@ static inline int init_qp_queues(struct
for (cnt = 0; cnt < nr_sq_pages; cnt++) {
vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
if (!vpage) {
- EDEB_ERR(4, "SQ ipz_qpageit_get_inc() "
+ ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
"failed p_vpage= %p", vpage);
ret = -EINVAL;
goto init_qp_queues1;
@@ -311,8 +319,8 @@ static inline int init_qp_queues(struct
rpage, 1,
my_qp->galpas.kernel);
if (h_ret < H_SUCCESS) {
- EDEB_ERR(4,"SQ hipz_qp_register_rpage() faield "
- "rc=%lx", h_ret);
+ ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
+ " failed rc=%lx", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queues1;
}
@@ -324,9 +332,8 @@ static inline int init_qp_queues(struct
for (cnt = 0; cnt < nr_rq_pages; cnt++) {
vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
if (!vpage) {
- EDEB_ERR(4,"RQ ipz_qpageit_get_inc() "
+ ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
"failed p_vpage = %p", vpage);
- h_ret = H_RESOURCE;
ret = -EINVAL;
goto init_qp_queues1;
}
@@ -338,29 +345,28 @@ static inline int init_qp_queues(struct
&my_qp->pf, 0, 1,
rpage,
1,my_qp->galpas.kernel);
if (h_ret < H_SUCCESS) {
- EDEB_ERR(4, "RQ hipz_qp_register_rpage() failed "
+ ehca_err(ib_dev, "RQ hipz_qp_register_rpage()
failed "
"rc=%lx", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queues1;
}
if (cnt == (nr_rq_pages - 1)) { /* last page! */
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4,"RQ hipz_qp_register_rpage() "
+ ehca_err(ib_dev, "RQ
hipz_qp_register_rpage() "
"h_ret= %lx ", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queues1;
}
vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
if (vpage) {
- EDEB_ERR(4,"ipz_qpageit_get_inc() "
- "should not succeed vpage=%p",
- vpage);
+ ehca_err(ib_dev, "ipz_qpageit_get_inc() "
+ "should not succeed vpage=%p",
vpage);
ret = -EINVAL;
goto init_qp_queues1;
}
} else {
if (h_ret != H_PAGE_REGISTERED) {
- EDEB_ERR(4,"RQ hipz_qp_register_rpage() "
+ ehca_err(ib_dev, "RQ
hipz_qp_register_rpage() "
"h_ret= %lx ", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queues1;
@@ -379,37 +385,30 @@ init_qp_queues0:
return ret;
}
-
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- extern struct ehca_module ehca_module;
- static int da_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
- int ret = -EINVAL;
-
- struct ehca_qp *my_qp = NULL;
- struct ehca_pd *my_pd = NULL;
- struct ehca_shca *shca = NULL;
+ static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
+ static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
+ struct ehca_qp *my_qp;
+ struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca = container_of(pd->device, struct
ehca_shca,
+ ib_device);
struct ib_ucontext *context = NULL;
- u64 h_ret = H_PARAMETER;
- int max_send_sge;
- int max_recv_sge;
+ u64 h_ret;
+ int max_send_sge, max_recv_sge, ret;
/* h_call's out parameters */
struct ehca_alloc_qp_parms parms;
- u32 qp_nr = 0, swqe_size = 0, rwqe_size = 0;
+ u32 swqe_size = 0, rwqe_size = 0;
u8 daqp_completion, isdaqp;
unsigned long flags;
- EDEB_EN(7,"pd=%p init_attr=%p", pd, init_attr);
- EHCA_CHECK_PD_P(pd);
- EHCA_CHECK_ADR_P(init_attr);
-
if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
- EDEB_ERR(4, "init_attr->sg_sig_type=%x not allowed",
- init_attr->sq_sig_type);
+ ehca_err(pd->device, "init_attr->sg_sig_type=%x not
allowed",
+ init_attr->sq_sig_type);
return ERR_PTR(-EINVAL);
}
@@ -424,20 +423,36 @@ struct ib_qp *ehca_create_qp(struct ib_p
init_attr->qp_type != IB_QPT_GSI &&
init_attr->qp_type != IB_QPT_UC &&
init_attr->qp_type != IB_QPT_RC) {
- EDEB_ERR(4,"wrong QP Type=%x",init_attr->qp_type);
+ ehca_err(pd->device, "wrong QP Type=%x",
init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
- if (init_attr->qp_type != IB_QPT_RC && isdaqp != 0) {
- EDEB_ERR(4,"unsupported LL QP
Type=%x",init_attr->qp_type);
+ if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type !=
IB_QPT_UD)
+ && isdaqp) {
+ ehca_err(pd->device, "unsupported LL QP Type=%x",
+ init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ } else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
+ (init_attr->cap.max_send_wr > 255 ||
+ init_attr->cap.max_recv_wr > 255 )) {
+ ehca_err(pd->device, "Invalid Number of max_sq_wr
=%x "
+ "or max_rq_wr=%x for QP Type=%x",
+ init_attr->cap.max_send_wr,
+ init_attr->cap.max_recv_wr,init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ } else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
+ init_attr->cap.max_send_wr > 255) {
+ ehca_err(pd->device,
+ "Invalid Number of max_send_wr=%x for UD
QP_TYPE=%x",
+ init_attr->cap.max_send_wr, init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
if (pd->uobject && udata)
context = pd->uobject->context;
- my_qp = kmem_cache_alloc(ehca_module.cache_qp, SLAB_KERNEL);
+ my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
if (!my_qp) {
- EDEB_ERR(4, "pd=%p not enough memory to alloc qp", pd);
+ ehca_err(pd->device, "pd=%p not enough memory to alloc
qp", pd);
return ERR_PTR(-ENOMEM);
}
@@ -446,9 +461,6 @@ struct ib_qp *ehca_create_qp(struct ib_p
spin_lock_init(&my_qp->spinlock_s);
spin_lock_init(&my_qp->spinlock_r);
- my_pd = container_of(pd, struct ehca_pd, ib_pd);
-
- shca = container_of(pd->device, struct ehca_shca, ib_device);
my_qp->recv_cq =
container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
my_qp->send_cq =
@@ -459,7 +471,7 @@ struct ib_qp *ehca_create_qp(struct ib_p
do {
if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
ret = -ENOMEM;
- EDEB_ERR(4, "Can't reserve idr resources.");
+ ehca_err(pd->device, "Can't reserve idr
resources.");
goto create_qp_exit0;
}
@@ -471,14 +483,14 @@ struct ib_qp *ehca_create_qp(struct ib_p
if (ret) {
ret = -ENOMEM;
- EDEB_ERR(4, "Can't allocate new idr entry.");
+ ehca_err(pd->device, "Can't allocate new idr entry.");
goto create_qp_exit0;
}
parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
if (parms.servicetype < 0) {
ret = -EINVAL;
- EDEB_ERR(4, "Invalid qp_type=%x", init_attr->qp_type);
+ ehca_err(pd->device, "Invalid qp_type=%x",
init_attr->qp_type);
goto create_qp_exit0;
}
@@ -497,8 +509,6 @@ struct ib_qp *ehca_create_qp(struct ib_p
max_recv_sge += 2;
}
- EDEB(7, "isdaqp=%x daqp_completion=%x", isdaqp, daqp_completion);
-
parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
parms.daqp_ctrl = isdaqp | daqp_completion;
parms.pd = my_pd->fw_pd;
@@ -508,7 +518,8 @@ struct ib_qp *ehca_create_qp(struct ib_p
h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp,
&parms);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "h_alloc_resource_qp() failed h_ret=%lx",
h_ret);
+ ehca_err(pd->device, "h_alloc_resource_qp() failed
h_ret=%lx",
+ h_ret);
ret = ehca2ib_return_code(h_ret);
goto create_qp_exit1;
}
@@ -521,8 +532,8 @@ struct ib_qp *ehca_create_qp(struct ib_p
rwqe_size = offsetof(struct ehca_wqe,
u.nud.sg_list[
(parms.act_nr_recv_sges)]);
} else { /* for daqp we need to use msg size, not wqe size
*/
- swqe_size = da_msg_size[max_send_sge];
- rwqe_size = da_msg_size[max_recv_sge];
+ swqe_size = da_rc_msg_size[max_send_sge];
+ rwqe_size = da_rc_msg_size[max_recv_sge];
parms.act_nr_send_sges = 1;
parms.act_nr_recv_sges = 1;
}
@@ -540,10 +551,17 @@ struct ib_qp *ehca_create_qp(struct ib_p
/* UD circumvention */
parms.act_nr_recv_sges -= 2;
parms.act_nr_send_sges -= 2;
- swqe_size = offsetof(struct ehca_wqe,
- u.ud_av.sg_list[parms.act_nr_send_sges]);
- rwqe_size = offsetof(struct ehca_wqe,
- u.ud_av.sg_list[parms.act_nr_recv_sges]);
+ if (isdaqp) {
+ swqe_size = da_ud_sq_msg_size[max_send_sge];
+ rwqe_size = da_rc_msg_size[max_recv_sge];
+ parms.act_nr_send_sges = 1;
+ parms.act_nr_recv_sges = 1;
+ } else {
+ swqe_size = offsetof(struct ehca_wqe,
+ u.ud_av.sg_list[parms.act_nr_send_sges]);
+ rwqe_size = offsetof(struct ehca_wqe,
+ u.ud_av.sg_list[parms.act_nr_recv_sges]);
+ }
if (IB_QPT_GSI == init_attr->qp_type ||
IB_QPT_SMI == init_attr->qp_type) {
@@ -562,13 +580,13 @@ struct ib_qp *ehca_create_qp(struct ib_p
}
/* initializes r/squeue and registers queue pages */
- ret = init_qp_queues(shca->ipz_hca_handle, my_qp,
+ ret = init_qp_queues(shca, my_qp,
parms.nr_sq_pages, parms.nr_rq_pages,
swqe_size, rwqe_size,
parms.act_nr_send_sges,
parms.act_nr_recv_sges);
if (ret) {
- EDEB_ERR(4,"Couldn't initialize r/squeue and pages
ret=%x",
- ret);
+ ehca_err(pd->device,
+ "Couldn't initialize r/squeue and pages ret=%x",
ret);
goto create_qp_exit2;
}
@@ -597,7 +615,8 @@ struct ib_qp *ehca_create_qp(struct ib_p
if (init_attr->qp_type == IB_QPT_GSI) {
h_ret = ehca_define_sqp(shca, my_qp, init_attr);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "ehca_define_sqp() failed
rc=%lx",h_ret);
+ ehca_err(pd->device, "ehca_define_sqp() failed
rc=%lx",
+ h_ret);
ret = ehca2ib_return_code(h_ret);
goto create_qp_exit3;
}
@@ -607,7 +626,7 @@ struct ib_qp *ehca_create_qp(struct ib_p
struct ehca_cq, ib_cq);
ret = ehca_cq_assign_qp(cq, my_qp);
if (ret) {
- EDEB_ERR(4, "Couldn't assign qp to send_cq
ret=%x",
+ ehca_err(pd->device, "Couldn't assign qp to
send_cq ret=%x",
ret);
goto create_qp_exit3;
}
@@ -637,7 +656,7 @@ struct ib_qp *ehca_create_qp(struct ib_p
(void**)&resp.ipz_rqueue.queue,
&vma);
if (ret) {
- EDEB_ERR(4, "Could not mmap rqueue pages");
+ ehca_err(pd->device, "Could not mmap rqueue
pages");
goto create_qp_exit3;
}
my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
@@ -652,7 +671,7 @@ struct ib_qp *ehca_create_qp(struct ib_p
(void**)&resp.ipz_squeue.queue,
&vma);
if (ret) {
- EDEB_ERR(4, "Could not mmap squeue pages");
+ ehca_err(pd->device, "Could not mmap squeue
pages");
goto create_qp_exit4;
}
my_qp->uspace_squeue = resp.ipz_squeue.queue;
@@ -662,20 +681,18 @@ struct ib_qp *ehca_create_qp(struct ib_p
(void**)&resp.galpas.kernel.fw_handle,
&vma);
if (ret) {
- EDEB_ERR(4, "Could not mmap fw_handle");
+ ehca_err(pd->device, "Could not mmap fw_handle");
goto create_qp_exit5;
}
my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
- EDEB_ERR(4, "Copy to udata failed");
+ ehca_err(pd->device, "Copy to udata failed");
ret = -EINVAL;
goto create_qp_exit6;
}
}
- EDEB_EX(7, "ehca_qp=%p qp_num=%x, token=%x",
- my_qp, qp_nr, my_qp->token);
return &my_qp->ib_qp;
create_qp_exit6:
@@ -700,10 +717,8 @@ create_qp_exit1:
spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
create_qp_exit0:
- kmem_cache_free(ehca_module.cache_qp, my_qp);
- EDEB_EX(4, "failed ret=%x", ret);
+ kmem_cache_free(qp_cache, my_qp);
return ERR_PTR(ret);
-
}
/*
@@ -714,48 +729,45 @@ create_qp_exit0:
static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
int *bad_wqe_cnt)
{
- int ret = 0;
- u64 h_ret = H_SUCCESS;
- struct ipz_queue *squeue = NULL;
- void *bad_send_wqe_p = NULL;
- void *bad_send_wqe_v = NULL;
- void *squeue_start_p = NULL;
- void *squeue_end_p = NULL;
- void *squeue_start_v = NULL;
- void *squeue_end_v = NULL;
- struct ehca_wqe *wqe = NULL;
+ u64 h_ret;
+ struct ipz_queue *squeue;
+ void *bad_send_wqe_p, *bad_send_wqe_v;
+ void *squeue_start_p, *squeue_end_p;
+ void *squeue_start_v, *squeue_end_v;
+ struct ehca_wqe *wqe;
int qp_num = my_qp->ib_qp.qp_num;
- EDEB_EN(7, "ehca_qp=%p qp_num=%x ", my_qp, qp_num);
-
/* get send wqe pointer */
h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
&bad_send_wqe_p, NULL, 2);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_h_disable_and_get_wqe() failed "
- "ehca_qp=%p qp_num=%x h_ret=%lx",my_qp, qp_num,
h_ret);
- ret = ehca2ib_return_code(h_ret);
- goto prepare_sqe_rts_exit1;
+ ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe()
failed"
+ " ehca_qp=%p qp_num=%x h_ret=%lx",
+ my_qp, qp_num, h_ret);
+ return ehca2ib_return_code(h_ret);
}
bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
- EDEB(7, "qp_num=%x bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
+ ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
+ qp_num, bad_send_wqe_p);
/* convert wqe pointer to vadr */
bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
- EDEB_DMP(6, bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
+ if (ehca_debug_level)
+ ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
squeue = &my_qp->ipz_squeue;
squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L));
squeue_end_p = squeue_start_p+squeue->queue_length;
squeue_start_v = abs_to_virt((u64)squeue_start_p);
squeue_end_v = abs_to_virt((u64)squeue_end_p);
- EDEB(6, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
- qp_num, squeue_start_v, squeue_end_v);
+ ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p
squeue_end_v=%p",
+ qp_num, squeue_start_v, squeue_end_v);
/* loop sets wqe's purge bit */
wqe = (struct ehca_wqe*)bad_send_wqe_v;
*bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) {
- EDEB_DMP(6, wqe, 32, "qp_num=%x wqe", qp_num);
+ if (ehca_debug_level)
+ ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */
wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size);
@@ -768,13 +780,11 @@ static int prepare_sqe_rts(struct ehca_q
* bad wqe will be reprocessed and ignored when pol_cq() is
called,
* i.e. nr of wqes with flush error status is one less
*/
- EDEB(6, "qp_num=%x flusherr_wqe_cnt=%x", qp_num,
(*bad_wqe_cnt)-1);
+ ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
+ qp_num, (*bad_wqe_cnt)-1);
wqe->wqef = 0;
-prepare_sqe_rts_exit1:
-
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x", my_qp, qp_num, ret);
- return ret;
+ return 0;
}
/*
@@ -787,34 +797,25 @@ static int internal_modify_qp(struct ib_
struct ib_qp_attr *attr,
int attr_mask, int smi_reset2init)
{
- enum ib_qp_state qp_cur_state = 0, qp_new_state = 0;
- int cnt = 0, qp_attr_idx = 0, ret = 0;
-
+ enum ib_qp_state qp_cur_state, qp_new_state;
+ int cnt, qp_attr_idx, ret = 0;
enum ib_qp_statetrans statetrans;
- struct hcp_modify_qp_control_block *mqpcb = NULL;
- struct ehca_qp *my_qp = NULL;
- struct ehca_shca *shca = NULL;
- u64 update_mask = 0;
- u64 h_ret = H_SUCCESS;
+ struct hcp_modify_qp_control_block *mqpcb;
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca =
+ container_of(ibqp->pd->device, struct ehca_shca,
ib_device);
+ u64 update_mask;
+ u64 h_ret;
int bad_wqe_cnt = 0;
int squeue_locked = 0;
unsigned long spl_flags = 0;
- my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- shca = container_of(ibqp->pd->device, struct ehca_shca,
ib_device);
-
- EDEB_EN(7, "ehca_qp=%p qp_num=%x ibqp_type=%x "
- "new qp_state=%x attribute_mask=%x",
- my_qp, ibqp->qp_num, ibqp->qp_type,
- attr->qp_state, attr_mask);
-
/* do query_qp to obtain current attr values */
mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
if (mqpcb == NULL) {
- ret = -ENOMEM;
- EDEB_ERR(4, "Could not get zeroed page for mqpcb "
+ ehca_err(ibqp->device, "Could not get zeroed page for
mqpcb "
"ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
- goto modify_qp_exit0;
+ return -ENOMEM;
}
h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
@@ -822,20 +823,18 @@ static int internal_modify_qp(struct ib_
&my_qp->pf,
mqpcb, my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_h_query_qp() failed "
+ ehca_err(ibqp->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%lx",
my_qp, ibqp->qp_num, h_ret);
ret = ehca2ib_return_code(h_ret);
goto modify_qp_exit1;
}
- EDEB(7, "ehca_qp=%p qp_num=%x ehca_qp_state=%x",
- my_qp, ibqp->qp_num, mqpcb->qp_state);
qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
if (qp_cur_state == -EINVAL) { /* invalid qp state */
ret = -EINVAL;
- EDEB_ERR(4, "Invalid current ehca_qp_state=%x "
+ ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
"ehca_qp=%p qp_num=%x",
mqpcb->qp_state, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
@@ -860,37 +859,38 @@ static int internal_modify_qp(struct ib_
int smirc = internal_modify_qp(
ibqp, &smiqp_attr, smiqp_attr_mask, 1);
if (smirc) {
- EDEB_ERR(4, "SMI RESET -> INIT failed. "
+ ehca_err(ibqp->device, "SMI RESET -> INIT failed.
"
"ehca_modify_qp() rc=%x", smirc);
ret = H_PARAMETER;
goto modify_qp_exit1;
}
qp_cur_state = IB_QPS_INIT;
- EDEB(7, "SMI RESET -> INIT succeeded");
+ ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
}
/* is transmitted current state equal to "real" current state */
if ((attr_mask & IB_QP_CUR_STATE) &&
qp_cur_state != attr->cur_qp_state) {
ret = -EINVAL;
- EDEB_ERR(4, "Invalid IB_QP_CUR_STATE
attr->curr_qp_state=%x <>"
+ ehca_err(ibqp->device,
+ "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x
<>"
" actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
attr->cur_qp_state, qp_cur_state, my_qp,
ibqp->qp_num);
goto modify_qp_exit1;
}
- EDEB(7, "ehca_qp=%p qp_num=%x current qp_state=%x "
- "new qp_state=%x attribute_mask=%x",
- my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state,
attr_mask);
+ ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
+ "new qp_state=%x attribute_mask=%x",
+ my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state,
attr_mask);
qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state :
qp_cur_state;
if (!smi_reset2init &&
!ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
attr_mask)) {
ret = -EINVAL;
- EDEB_ERR(4, "Invalid qp transition new_state=%x
cur_state=%x "
- "ehca_qp=%p qp_num=%x attr_mask=%x",
- qp_new_state, qp_cur_state, my_qp, ibqp->qp_num,
- attr_mask);
+ ehca_err(ibqp->device,
+ "Invalid qp transition new_state=%x cur_state=%x
"
+ "ehca_qp=%p qp_num=%x attr_mask=%x",
qp_new_state,
+ qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
goto modify_qp_exit1;
}
@@ -898,7 +898,7 @@ static int internal_modify_qp(struct ib_
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
else {
ret = -EINVAL;
- EDEB_ERR(4, "Invalid new qp state=%x "
+ ehca_err(ibqp->device, "Invalid new qp state=%x "
"ehca_qp=%p qp_num=%x",
qp_new_state, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
@@ -908,10 +908,9 @@ static int internal_modify_qp(struct ib_
statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
if (statetrans < 0) {
ret = -EINVAL;
- EDEB_ERR(4, "<INVALID STATE CHANGE> qp_cur_state=%x "
- "new_qp_state=%x State_xsition=%x "
- "ehca_qp=%p qp_num=%x",
- qp_cur_state, qp_new_state,
+ ehca_err(ibqp->device, "<INVALID STATE CHANGE>
qp_cur_state=%x "
+ "new_qp_state=%x State_xsition=%x ehca_qp=%p "
+ "qp_num=%x", qp_cur_state, qp_new_state,
statetrans, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
}
@@ -920,13 +919,15 @@ static int internal_modify_qp(struct ib_
if (qp_attr_idx < 0) {
ret = qp_attr_idx;
- EDEB_ERR(4, "Invalid QP type=%x ehca_qp=%p qp_num=%x",
+ ehca_err(ibqp->device,
+ "Invalid QP type=%x ehca_qp=%p qp_num=%x",
ibqp->qp_type, my_qp, ibqp->qp_num);
goto modify_qp_exit1;
}
- EDEB(7, "ehca_qp=%p qp_num=%x <VALID STATE CHANGE>
qp_state_xsit=%x",
- my_qp, ibqp->qp_num, statetrans);
+ ehca_dbg(ibqp->device,
+ "ehca_qp=%p qp_num=%x <VALID STATE CHANGE>
qp_state_xsit=%x",
+ my_qp, ibqp->qp_num, statetrans);
/* sqe -> rts: set purge bit of bad wqe before actual trans */
if ((my_qp->qp_type == IB_QPT_UD ||
@@ -935,7 +936,7 @@ static int internal_modify_qp(struct ib_
statetrans == IB_QPST_SQE2RTS) {
/* mark next free wqe if kernel */
if (my_qp->uspace_squeue == 0) {
- struct ehca_wqe *wqe = NULL;
+ struct ehca_wqe *wqe;
/* lock send queue */
spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
squeue_locked = 1;
@@ -943,12 +944,12 @@ static int internal_modify_qp(struct ib_
wqe = (struct ehca_wqe*)
ipz_qeit_get(&my_qp->ipz_squeue);
wqe->optype = wqe->wqef = 0xff;
- EDEB(7, "qp_num=%x next_free_wqe=%p",
- ibqp->qp_num, wqe);
+ ehca_dbg(ibqp->device, "qp_num=%x
next_free_wqe=%p",
+ ibqp->qp_num, wqe);
}
ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
if (ret) {
- EDEB_ERR(4, "prepare_sqe_rts() failed "
+ ehca_err(ibqp->device, "prepare_sqe_rts() failed "
"ehca_qp=%p qp_num=%x ret=%x",
my_qp, ibqp->qp_num, ret);
goto modify_qp_exit2;
@@ -977,14 +978,11 @@ static int internal_modify_qp(struct ib_
if (attr_mask & IB_QP_PKEY_INDEX) {
mqpcb->prim_p_key_idx = attr->pkey_index;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX,
1);
- EDEB(7, "ehca_qp=%p qp_num=%x "
- "IB_QP_PKEY_INDEX update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_PORT) {
if (attr->port_num < 1 || attr->port_num >
shca->num_ports) {
ret = -EINVAL;
- EDEB_ERR(4, "Invalid port=%x. "
+ ehca_err(ibqp->device, "Invalid port=%x. "
"ehca_qp=%p qp_num=%x num_ports=%x",
attr->port_num, my_qp, ibqp->qp_num,
shca->num_ports);
@@ -992,14 +990,10 @@ static int internal_modify_qp(struct ib_
}
mqpcb->prim_phys_port = attr->port_num;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT,
1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_PORT update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_QKEY) {
mqpcb->qkey = attr->qkey;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_QKEY update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_AV) {
int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate);
@@ -1013,18 +1007,12 @@ static int internal_modify_qp(struct ib_
mqpcb->service_level = attr->ah_attr.sl;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL,
1);
- if (ah_mult < ehca_mult)
+ if (ah_mult < ehca_mult)
mqpcb->max_static_rate = (ah_mult > 0) ?
((ehca_mult - 1) / ah_mult) : 0;
else
mqpcb->max_static_rate = 0;
- EDEB(7, " ipd=mqpcb->max_static_rate set %x "
- " ah_mult=%x ehca_mult=%x "
- " attr->ah_attr.static_rate=%x",
- mqpcb->max_static_rate,ah_mult,ehca_mult,
- attr->ah_attr.static_rate);
-
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE,
1);
/*
@@ -1052,48 +1040,33 @@ static int internal_modify_qp(struct ib_
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS,
1);
}
-
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_AV update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_PATH_MTU) {
mqpcb->path_mtu = attr->path_mtu;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_PATH_MTU
update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_TIMEOUT) {
mqpcb->timeout = attr->timeout;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_TIMEOUT
update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_RETRY_CNT) {
mqpcb->retry_count = attr->retry_cnt;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_RETRY_CNT
update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_RNR_RETRY) {
mqpcb->rnr_retry_count = attr->rnr_retry;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT,
1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_RNR_RETRY
update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_RQ_PSN) {
mqpcb->receive_psn = attr->rq_psn;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_RQ_PSN
update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic
< 3 ?
- attr->max_dest_rd_atomic : 2; /* max is 2 */
+ attr->max_dest_rd_atomic : 2;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES,
1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_MAX_DEST_RD_ATOMIC "
- "update_mask=%lx", my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3
?
@@ -1101,8 +1074,6 @@ static int internal_modify_qp(struct ib_
update_mask |=
EHCA_BMASK_SET
(MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_MAX_QP_RD_ATOMIC "
- "update_mask=%lx", my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_ALT_PATH) {
int ah_mult =
ib_rate_to_mult(attr->alt_ah_attr.static_rate);
@@ -1123,10 +1094,6 @@ static int internal_modify_qp(struct ib_
else
mqpcb->max_static_rate_al = 0;
- EDEB(7, " ipd=mqpcb->max_static_rate set %x,"
- " ah_mult=%x ehca_mult=%x",
- mqpcb->max_static_rate,ah_mult,ehca_mult);
-
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1);
/*
@@ -1159,43 +1126,28 @@ static int internal_modify_qp(struct ib_
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
}
-
- EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_ALT_PATH
update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD,
1);
- EDEB(7, "ehca_qp=%p qp_num=%x "
- "IB_QP_MIN_RNR_TIMER update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_SQ_PSN) {
mqpcb->send_psn = attr->sq_psn;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x "
- "IB_QP_SQ_PSN update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_DEST_QPN) {
mqpcb->dest_qp_nr = attr->dest_qp_num;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x "
- "IB_QP_DEST_QPN update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_PATH_MIG_STATE) {
mqpcb->path_migration_state = attr->path_mig_state;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE,
1);
- EDEB(7, "ehca_qp=%p qp_num=%x "
- "IB_QP_PATH_MIG_STATE update_mask=%lx", my_qp,
- ibqp->qp_num, update_mask);
}
if (attr_mask & IB_QP_CAP) {
@@ -1205,13 +1157,11 @@ static int internal_modify_qp(struct ib_
mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR,
1);
- EDEB(7, "ehca_qp=%p qp_num=%x "
- "IB_QP_CAP update_mask=%lx",
- my_qp, ibqp->qp_num, update_mask);
/* no support for max_send/recv_sge yet */
}
- EDEB_DMP(7, mqpcb, 4*70, "ehca_qp=%p qp_num=%x", my_qp,
ibqp->qp_num);
+ if (ehca_debug_level)
+ ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
@@ -1221,9 +1171,8 @@ static int internal_modify_qp(struct ib_
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- EDEB_ERR(4, "hipz_h_modify_qp() failed rc=%lx "
- "ehca_qp=%p qp_num=%x",
- h_ret, my_qp, ibqp->qp_num);
+ ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
+ "ehca_qp=%p qp_num=%x",h_ret, my_qp,
ibqp->qp_num);
goto modify_qp_exit2;
}
@@ -1234,7 +1183,7 @@ static int internal_modify_qp(struct ib_
/* doorbell to reprocessing wqes */
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, bad_wqe_cnt-1);
- EDEB(6, "doorbell for %x wqes", bad_wqe_cnt);
+ ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
}
if (statetrans == IB_QPST_RESET2INIT ||
@@ -1244,10 +1193,6 @@ static int internal_modify_qp(struct ib_
update_mask = 0;
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
- EDEB(7, "ehca_qp=%p qp_num=%x "
- "RESET_2_INIT needs an additional enable "
- "-> update_mask=%lx", my_qp, ibqp->qp_num,
update_mask);
-
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
&my_qp->pf,
@@ -1257,10 +1202,9 @@ static int internal_modify_qp(struct ib_
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- EDEB_ERR(4, "ENABLE in context of "
- "RESET_2_INIT failed! "
- "Maybe you didn't get a LID"
- "h_ret=%lx ehca_qp=%p qp_num=%x",
+ ehca_err(ibqp->device, "ENABLE in context of "
+ "RESET_2_INIT failed! Maybe you didn't
get "
+ "a LID h_ret=%lx ehca_qp=%p qp_num=%x",
h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
@@ -1283,91 +1227,60 @@ modify_qp_exit2:
modify_qp_exit1:
kfree(mqpcb);
-modify_qp_exit0:
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ibqp_type=%x ret=%x",
- my_qp, ibqp->qp_num, ibqp->qp_type, ret);
return ret;
}
int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int
attr_mask)
{
- int ret = 0;
- struct ehca_qp *my_qp = NULL;
- struct ehca_pd *my_pd = NULL;
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct
ehca_pd,
+ ib_pd);
u32 cur_pid = current->tgid;
- EHCA_CHECK_ADR(ibqp);
- EHCA_CHECK_ADR(attr);
- EHCA_CHECK_ADR(ibqp->device);
-
- my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
- EDEB_EN(7, "ehca_qp=%p qp_num=%x ibqp_type=%x attr_mask=%x",
- my_qp, ibqp->qp_num, ibqp->qp_type, attr_mask);
-
- my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(ibqp->pd->device, "Invalid caller pid=%x
ownpid=%x",
cur_pid, my_pd->ownpid);
- ret = -EINVAL;
- } else
- ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
+ return -EINVAL;
+ }
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ibqp_type=%x ret=%x",
- my_qp, ibqp->qp_num, ibqp->qp_type, ret);
- return ret;
+ return internal_modify_qp(ibqp, attr, attr_mask, 0);
}
int ehca_query_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
- struct ehca_qp *my_qp = NULL;
- struct ehca_shca *shca = NULL;
- struct hcp_modify_qp_control_block *qpcb = NULL;
- struct ipz_adapter_handle adapter_handle;
- struct ehca_pd *my_pd = NULL;
+ struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct
ehca_pd,
+ ib_pd);
+ struct ehca_shca *shca = container_of(qp->device, struct
ehca_shca,
+ ib_device);
+ struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+ struct hcp_modify_qp_control_block *qpcb;
u32 cur_pid = current->tgid;
- int cnt = 0, ret = 0;
- u64 h_ret = H_SUCCESS;
+ int cnt, ret = 0;
+ u64 h_ret;
- EHCA_CHECK_ADR(qp);
- EHCA_CHECK_ADR(qp_attr);
- EHCA_CHECK_DEVICE(qp->device);
-
- my_qp = container_of(qp, struct ehca_qp, ib_qp);
-
- EDEB_EN(7, "ehca_qp=%p qp_num=%x "
- "qp_attr=%p qp_attr_mask=%x qp_init_attr=%p",
- my_qp, qp->qp_num, qp_attr, qp_attr_mask, qp_init_attr);
-
- my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
- ret = -EINVAL;
- goto query_qp_exit0;
+ return -EINVAL;
}
- shca = container_of(qp->device, struct ehca_shca, ib_device);
- adapter_handle = shca->ipz_hca_handle;
-
if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
- ret = -EINVAL;
- EDEB_ERR(4,"Invalid attribute mask "
+ ehca_err(qp->device,"Invalid attribute mask "
"ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
my_qp, qp->qp_num, qp_attr_mask);
- goto query_qp_exit0;
+ return -EINVAL;
}
qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL );
if (!qpcb) {
- ret = -ENOMEM;
- EDEB_ERR(4,"Out of memory for qpcb "
+ ehca_err(qp->device,"Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
- goto query_qp_exit0;
+ return -ENOMEM;
}
h_ret = hipz_h_query_qp(adapter_handle,
@@ -1377,7 +1290,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- EDEB_ERR(4,"hipz_h_query_qp() failed "
+ ehca_err(qp->device,"hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%lx",
my_qp, qp->qp_num, h_ret);
goto query_qp_exit1;
@@ -1385,9 +1298,10 @@ int ehca_query_qp(struct ib_qp *qp,
qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
qp_attr->qp_state = qp_attr->cur_qp_state;
+
if (qp_attr->cur_qp_state == -EINVAL) {
ret = -EINVAL;
- EDEB_ERR(4,"Got invalid ehca_qp_state=%x "
+ ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
"ehca_qp=%p qp_num=%x",
qpcb->qp_state, my_qp, qp->qp_num);
goto query_qp_exit1;
@@ -1482,54 +1396,33 @@ int ehca_query_qp(struct ib_qp *qp,
if (qp_init_attr)
*qp_init_attr = my_qp->init_attr;
- EDEB(7, "ehca_qp=%p qp_number=%x dest_qp_number=%x "
- "dlid=%x path_mtu=%x dest_gid=%lx_%lx "
- "service_level=%x qp_state=%x",
- my_qp, qpcb->qp_number, qpcb->dest_qp_nr,
- qpcb->dlid, qpcb->path_mtu,
- qpcb->dest_gid.dw[0], qpcb->dest_gid.dw[1],
- qpcb->service_level, qpcb->qp_state);
-
- EDEB_DMP(7, qpcb, 4*70, "ehca_qp=%p qp_num=%x", my_qp,
qp->qp_num);
+ if (ehca_debug_level)
+ ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
query_qp_exit1:
kfree(qpcb);
-query_qp_exit0:
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x",
- my_qp, qp->qp_num, ret);
return ret;
}
int ehca_destroy_qp(struct ib_qp *ibqp)
{
- extern struct ehca_module ehca_module;
- struct ehca_qp *my_qp = NULL;
- struct ehca_shca *shca = NULL;
- struct ehca_pfqp *qp_pf = NULL;
- struct ehca_pd *my_pd = NULL;
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca = container_of(ibqp->device, struct
ehca_shca,
+ ib_device);
+ struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct
ehca_pd,
+ ib_pd);
u32 cur_pid = current->tgid;
- u32 qp_num = 0;
- int ret = 0;
- u64 h_ret = H_SUCCESS;
- u8 port_num = 0;
+ u32 qp_num = ibqp->qp_num;
+ int ret;
+ u64 h_ret;
+ u8 port_num;
enum ib_qp_type qp_type;
unsigned long flags;
- EHCA_CHECK_ADR(ibqp);
-
- my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- qp_num = ibqp->qp_num;
- qp_pf = &my_qp->pf;
-
- shca = container_of(ibqp->device, struct ehca_shca, ib_device);
-
- EDEB_EN(7, "ehca_qp=%p qp_num=%x", my_qp, ibqp->qp_num);
-
- my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, ib_pd);
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
return -EINVAL;
}
@@ -1538,11 +1431,10 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
ret = ehca_cq_unassign_qp(my_qp->send_cq,
my_qp->real_qp_num);
if (ret) {
- EDEB_ERR(4, "Couldn't unassign qp from send_cq "
- "ret=%x qp_num=%x cq_num=%x",
- ret, my_qp->ib_qp.qp_num,
- my_qp->send_cq->cq_number);
- goto destroy_qp_exit0;
+ ehca_err(ibqp->device, "Couldn't unassign qp from
"
+ "send_cq ret=%x qp_num=%x cq_num=%x",
ret,
+ my_qp->ib_qp.qp_num,
my_qp->send_cq->cq_number);
+ return ret;
}
}
@@ -1554,17 +1446,25 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
if (my_qp->uspace_rqueue) {
ret = ehca_munmap(my_qp->uspace_rqueue,
my_qp->ipz_rqueue.queue_length);
+ if (ret)
+ ehca_err(ibqp->device, "Could not munmap rqueue "
+ "qp_num=%x", qp_num);
ret = ehca_munmap(my_qp->uspace_squeue,
my_qp->ipz_squeue.queue_length);
+ if (ret)
+ ehca_err(ibqp->device, "Could not munmap squeue "
+ "qp_num=%x", qp_num);
ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
+ if (ret)
+ ehca_err(ibqp->device, "Could not munmap fwh
qp_num=%x",
+ qp_num);
}
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
- EDEB_ERR(4, "hipz_h_destroy_qp() failed "
- "rc=%lx ehca_qp=%p qp_num=%x",
- h_ret, qp_pf, qp_num);
- goto destroy_qp_exit0;
+ ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx
"
+ "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
+ return ehca2ib_return_code(h_ret);
}
port_num = my_qp->init_attr.port_num;
@@ -1573,9 +1473,8 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
/* no support for IB_QPT_SMI yet */
if (qp_type == IB_QPT_GSI) {
struct ib_event event;
-
- EDEB(4, "device %s: port %x is inactive.",
- shca->ib_device.name, port_num);
+ ehca_info(ibqp->device, "device %s: port %x is inactive.",
+ shca->ib_device.name, port_num);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ERR;
event.element.port_num = port_num;
@@ -1585,10 +1484,23 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
ipz_queue_dtor(&my_qp->ipz_rqueue);
ipz_queue_dtor(&my_qp->ipz_squeue);
- kmem_cache_free(ehca_module.cache_qp, my_qp);
+ kmem_cache_free(qp_cache, my_qp);
+ return 0;
+}
-destroy_qp_exit0:
- ret = ehca2ib_return_code(h_ret);
- EDEB_EX(7,"ret=%x", ret);
- return ret;
+int ehca_init_qp_cache(void)
+{
+ qp_cache = kmem_cache_create("ehca_cache_qp",
+ sizeof(struct ehca_qp), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!qp_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_qp_cache(void)
+{
+ if (qp_cache)
+ kmem_cache_destroy(qp_cache);
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_reqs.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_reqs.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_reqs.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_reqs.c 2006-08-30
20:00:16.000000000 +0200
@@ -41,8 +41,6 @@
*/
-#define DEB_PREFIX "reqs"
-
#include <asm-powerpc/system.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
@@ -58,7 +56,7 @@ static inline int ehca_write_rwqe(struct
u8 cnt_ds;
if (unlikely((recv_wr->num_sge < 0) ||
(recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
- EDEB_ERR(4, "Invalid number of WQE SGE. "
+ ehca_gen_err("Invalid number of WQE SGE. "
"num_sqe=%x max_nr_of_sg=%x",
recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
return -EINVAL; /* invalid SG list length */
@@ -79,9 +77,9 @@ static inline int ehca_write_rwqe(struct
recv_wr->sg_list[cnt_ds].length;
}
- if (IS_EDEB_ON(7)) {
- EDEB(7, "RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue);
- EDEB_DMP(7, wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv
wqe");
+ if (ehca_debug_level) {
+ ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue);
+ ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv
wqe");
}
return 0;
@@ -94,31 +92,35 @@ static inline int ehca_write_rwqe(struct
static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
{
- int idx = 0;
- int j = 0;
+ int idx;
+ int j;
while (send_wr) {
struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
struct ib_sge *sge = send_wr->sg_list;
- EDEB(4, "send_wr#%x wr_id=%lx num_sge=%x "
- "send_flags=%x opcode=%x",idx, send_wr->wr_id,
- send_wr->num_sge, send_wr->send_flags,
send_wr->opcode);
+ ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
+ "send_flags=%x opcode=%x",idx,
send_wr->wr_id,
+ send_wr->num_sge, send_wr->send_flags,
+ send_wr->opcode);
if (mad_hdr) {
- EDEB(4, "send_wr#%x mad_hdr base_version=%x "
- "mgmt_class=%x class_version=%x method=%x "
- "status=%x class_specific=%x tid=%lx
attr_id=%x "
- "resv=%x attr_mod=%x",
- idx, mad_hdr->base_version,
mad_hdr->mgmt_class,
- mad_hdr->class_version, mad_hdr->method,
- mad_hdr->status, mad_hdr->class_specific,
- mad_hdr->tid, mad_hdr->attr_id,
mad_hdr->resv,
- mad_hdr->attr_mod);
+ ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
+ "mgmt_class=%x class_version=%x
method=%x "
+ "status=%x class_specific=%x tid=%lx
"
+ "attr_id=%x resv=%x attr_mod=%x",
+ idx, mad_hdr->base_version,
+ mad_hdr->mgmt_class,
+ mad_hdr->class_version,
mad_hdr->method,
+ mad_hdr->status,
mad_hdr->class_specific,
+ mad_hdr->tid, mad_hdr->attr_id,
+ mad_hdr->resv,
+ mad_hdr->attr_mod);
}
for (j = 0; j < send_wr->num_sge; j++) {
u8 *data = (u8 *) abs_to_virt(sge->addr);
- EDEB(4, "send_wr#%x sge#%x addr=%p length=%x
lkey=%x",
- idx, j, data, sge->length, sge->lkey);
+ ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x
"
+ "lkey=%x",
+ idx, j, data, sge->length,
sge->lkey);
/* assume length is n*16 */
- EDEB_DMP(4, data, sge->length, "send_wr#%x
sge#%x",
+ ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
idx, j);
sge++;
} /* eof for j */
@@ -140,7 +142,7 @@ static inline int ehca_write_swqe(struct
if (unlikely((send_wr->num_sge < 0) ||
(send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
- EDEB_ERR(4, "Invalid number of WQE SGE. "
+ ehca_gen_err("Invalid number of WQE SGE. "
"num_sqe=%x max_nr_of_sg=%x",
send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
return -EINVAL; /* invalid SG list length */
@@ -164,7 +166,7 @@ static inline int ehca_write_swqe(struct
wqe_p->optype = WQE_OPTYPE_RDMAREAD;
break;
default:
- EDEB_ERR(4, "Invalid opcode=%x", send_wr->opcode);
+ ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
return -EINVAL; /* invalid opcode */
}
@@ -196,7 +198,7 @@ static inline int ehca_write_swqe(struct
wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn
<< 8;
wqe_p->local_ee_context_qkey = remote_qkey;
if (!send_wr->wr.ud.ah) {
- EDEB_ERR(4, "wr.ud.ah is NULL. qp=%p", qp);
+ ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
return -EINVAL;
}
my_av = container_of(send_wr->wr.ud.ah, struct ehca_av,
ib_ah);
@@ -254,13 +256,13 @@ static inline int ehca_write_swqe(struct
break;
default:
- EDEB_ERR(4, "Invalid qptype=%x", qp->qp_type);
+ ehca_gen_err("Invalid qptype=%x", qp->qp_type);
return -EINVAL;
}
- if (IS_EDEB_ON(7)) {
- EDEB(7, "SEND WQE written into queue qp=%p ", qp);
- EDEB_DMP(7, wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send
wqe");
+ if (ehca_debug_level) {
+ ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
+ ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send
wqe");
}
return 0;
}
@@ -355,19 +357,12 @@ int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr)
{
- struct ehca_qp *my_qp = NULL;
- struct ib_send_wr *cur_send_wr = NULL;
- struct ehca_wqe *wqe_p = NULL;
+ struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ struct ib_send_wr *cur_send_wr;
+ struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
- unsigned long spl_flags = 0;
-
- EHCA_CHECK_ADR(qp);
- my_qp = container_of(qp, struct ehca_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EHCA_CHECK_ADR(send_wr);
- EDEB_EN(7, "ehca_qp=%p qp_num=%x send_wr=%p bad_send_wr=%p",
- my_qp, qp->qp_num, send_wr, bad_send_wr);
+ unsigned long spl_flags;
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -384,8 +379,8 @@ int ehca_post_send(struct ib_qp *qp,
*bad_send_wr = cur_send_wr;
if (wqe_cnt == 0) {
ret = -ENOMEM;
- EDEB_ERR(4, "Too many posted WQEs
qp_num=%x",
- qp->qp_num);
+ ehca_err(qp->device, "Too many posted WQEs
"
+ "qp_num=%x", qp->qp_num);
}
goto post_send_exit0;
}
@@ -400,14 +395,14 @@ int ehca_post_send(struct ib_qp *qp,
*bad_send_wr = cur_send_wr;
if (wqe_cnt == 0) {
ret = -EINVAL;
- EDEB_ERR(4, "Could not write WQE
qp_num=%x",
- qp->qp_num);
+ ehca_err(qp->device, "Could not write WQE
"
+ "qp_num=%x", qp->qp_num);
}
goto post_send_exit0;
}
wqe_cnt++;
- EDEB(7, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, qp->qp_num, wqe_cnt);
+ ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ my_qp, qp->qp_num, wqe_cnt);
} /* eof for cur_send_wr */
post_send_exit0:
@@ -415,8 +410,6 @@ post_send_exit0:
spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt);
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
- my_qp, qp->qp_num, ret, wqe_cnt);
return ret;
}
@@ -424,19 +417,12 @@ int ehca_post_recv(struct ib_qp *qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr)
{
- struct ehca_qp *my_qp = NULL;
- struct ib_recv_wr *cur_recv_wr = NULL;
- struct ehca_wqe *wqe_p = NULL;
+ struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ struct ib_recv_wr *cur_recv_wr;
+ struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
- unsigned long spl_flags = 0;
-
- EHCA_CHECK_ADR(qp);
- my_qp = container_of(qp, struct ehca_qp, ib_qp);
- EHCA_CHECK_QP(my_qp);
- EHCA_CHECK_ADR(recv_wr);
- EDEB_EN(7, "ehca_qp=%p qp_num=%x recv_wr=%p bad_recv_wr=%p",
- my_qp, qp->qp_num, recv_wr, bad_recv_wr);
+ unsigned long spl_flags;
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
@@ -453,14 +439,13 @@ int ehca_post_recv(struct ib_qp *qp,
*bad_recv_wr = cur_recv_wr;
if (wqe_cnt == 0) {
ret = -ENOMEM;
- EDEB_ERR(4, "Too many posted WQEs
qp_num=%x",
- qp->qp_num);
+ ehca_err(qp->device, "Too many posted WQEs
"
+ "qp_num=%x", qp->qp_num);
}
goto post_recv_exit0;
}
/* write a RECV WQE into the QUEUE */
- ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p,
- cur_recv_wr);
+ ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p,
cur_recv_wr);
/*
* if something failed,
* reset the free entry pointer to the start value
@@ -470,13 +455,13 @@ int ehca_post_recv(struct ib_qp *qp,
*bad_recv_wr = cur_recv_wr;
if (wqe_cnt == 0) {
ret = -EINVAL;
- EDEB_ERR(4, "Could not write WQE
qp_num=%x",
- qp->qp_num);
+ ehca_err(qp->device, "Could not write WQE
"
+ "qp_num=%x", qp->qp_num);
}
goto post_recv_exit0;
}
wqe_cnt++;
- EDEB(7, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
my_qp, qp->qp_num, wqe_cnt);
} /* eof for cur_recv_wr */
@@ -484,8 +469,6 @@ post_recv_exit0:
spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
iosync(); /* serialize GAL register access */
hipz_update_rqa(my_qp, wqe_cnt);
- EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
- my_qp, qp->qp_num, ret, wqe_cnt);
return ret;
}
@@ -510,18 +493,16 @@ static inline int ehca_poll_cq_one(struc
{
int ret = 0;
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- struct ehca_cqe *cqe = NULL;
+ struct ehca_cqe *cqe;
int cqe_count = 0;
- EDEB_EN(7, "ehca_cq=%p cq_num=%x wc=%p", my_cq, my_cq->cq_number,
wc);
-
poll_cq_one_read_cqe:
cqe = (struct ehca_cqe *)
ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
if (!cqe) {
ret = -EAGAIN;
- EDEB(7, "Completion queue is empty ehca_cq=%p cq_num=%x "
- "ret=%x", my_cq, my_cq->cq_number, ret);
+ ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p
"
+ "cq_num=%x ret=%x", my_cq, my_cq->cq_number,
ret);
goto poll_cq_one_exit0;
}
@@ -531,13 +512,13 @@ poll_cq_one_read_cqe:
cqe_count++;
if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
struct ehca_qp *qp=ehca_cq_get_qp(my_cq,
cqe->local_qp_number);
- int purgeflag = 0;
- unsigned long spl_flags = 0;
+ int purgeflag;
+ unsigned long spl_flags;
if (!qp) {
- EDEB_ERR(4, "cq_num=%x qp_num=%x "
+ ehca_err(cq->device, "cq_num=%x qp_num=%x "
"could not find qp -> ignore cqe",
my_cq->cq_number, cqe->local_qp_number);
- EDEB_DMP(4, cqe, 64, "cq_num=%x qp_num=%x",
+ ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
my_cq->cq_number, cqe->local_qp_number);
/* ignore this purged cqe */
goto poll_cq_one_read_cqe;
@@ -547,10 +528,13 @@ poll_cq_one_read_cqe:
spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
if (purgeflag) {
- EDEB(6, "Got CQE with purged bit qp_num=%x
src_qp=%x",
- cqe->local_qp_number, cqe->remote_qp_number);
- EDEB_DMP(6, cqe, 64, "qp_num=%x src_qp=%x",
+ ehca_dbg(cq->device, "Got CQE with purged bit
qp_num=%x "
+ "src_qp=%x",
cqe->local_qp_number,
cqe->remote_qp_number);
+ if (ehca_debug_level)
+ ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
+ cqe->local_qp_number,
+ cqe->remote_qp_number);
/*
* ignore this to avoid double cqes of bad wqe
* that caused sqe and turn off purge flag
@@ -561,13 +545,15 @@ poll_cq_one_read_cqe:
}
/* tracing cqe */
- if (IS_EDEB_ON(7)) {
- EDEB(7, "Received COMPLETION ehca_cq=%p cq_num=%x -----",
- my_cq, my_cq->cq_number);
- EDEB_DMP(7, cqe, 64, "ehca_cq=%p cq_num=%x",
+ if (ehca_debug_level) {
+ ehca_dbg(cq->device,
+ "Received COMPLETION ehca_cq=%p cq_num=%x -----",
+ my_cq, my_cq->cq_number);
+ ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
+ my_cq, my_cq->cq_number);
+ ehca_dbg(cq->device,
+ "ehca_cq=%p cq_num=%x -------------------------",
my_cq, my_cq->cq_number);
- EDEB(7, "ehca_cq=%p cq_num=%x -------------------------",
- my_cq, my_cq->cq_number);
}
/* we got a completion! */
@@ -576,11 +562,11 @@ poll_cq_one_read_cqe:
/* eval ib_wc_opcode */
wc->opcode = ib_wc_opcode[cqe->optype]-1;
if (unlikely(wc->opcode == -1)) {
- EDEB_ERR(4, "Invalid cqe->OPType=%x cqe->status=%x "
+ ehca_err(cq->device, "Invalid cqe->OPType=%x
cqe->status=%x "
"ehca_cq=%p cq_num=%x",
cqe->optype, cqe->status, my_cq,
my_cq->cq_number);
/* dump cqe for other infos */
- EDEB_DMP(4, cqe, 64, "ehca_cq=%p cq_num=%x",
+ ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number);
/* update also queue adder to throw away this entry!!! */
goto poll_cq_one_exit0;
@@ -604,49 +590,35 @@ poll_cq_one_read_cqe:
wc->sl = cqe->service_level;
if (wc->status != IB_WC_SUCCESS)
- EDEB(6, "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
- "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx
cqe=%p",
- my_cq, my_cq->cq_number, cqe->optype, cqe->status,
- cqe->local_qp_number, cqe->remote_qp_number,
- cqe->work_request_id, cqe);
+ ehca_dbg(cq->device,
+ "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
+ "OPType=%x status=%x qp_num=%x src_qp=%x
wr_id=%lx "
+ "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
+ cqe->status, cqe->local_qp_number,
+ cqe->remote_qp_number, cqe->work_request_id,
cqe);
poll_cq_one_exit0:
if (cqe_count > 0)
hipz_update_feca(my_cq, cqe_count);
- EDEB_EX(7, "ret=%x ehca_cq=%p cq_number=%x wc=%p "
- "status=%x opcode=%x qp_num=%x byte_len=%x",
- ret, my_cq, my_cq->cq_number, wc, wc->status,
- wc->opcode, wc->qp_num, wc->byte_len);
-
return ret;
}
int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
- struct ehca_cq *my_cq = NULL;
- int nr = 0;
- struct ib_wc *current_wc = NULL;
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ int nr;
+ struct ib_wc *current_wc = wc;
int ret = 0;
- unsigned long spl_flags = 0;
-
- EHCA_CHECK_CQ(cq);
- EHCA_CHECK_ADR(wc);
-
- my_cq = container_of(cq, struct ehca_cq, ib_cq);
- EHCA_CHECK_CQ(my_cq);
-
- EDEB_EN(7, "ehca_cq=%p cq_num=%x num_entries=%d wc=%p",
- my_cq, my_cq->cq_number, num_entries, wc);
+ unsigned long spl_flags;
if (num_entries < 1) {
- EDEB_ERR(4, "Invalid num_entries=%d ehca_cq=%p cq_num=%x",
- num_entries, my_cq, my_cq->cq_number);
+ ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
+ "cq_num=%x", num_entries, my_cq,
my_cq->cq_number);
ret = -EINVAL;
goto poll_cq_exit0;
}
- current_wc = wc;
spin_lock_irqsave(&my_cq->spinlock, spl_flags);
for (nr = 0; nr < num_entries; nr++) {
ret = ehca_poll_cq_one(cq, current_wc);
@@ -659,22 +631,12 @@ int ehca_poll_cq(struct ib_cq *cq, int n
ret = nr;
poll_cq_exit0:
- EDEB_EX(7, "ehca_cq=%p cq_num=%x ret=%x wc=%p nr_entries=%d",
- my_cq, my_cq->cq_number, ret, wc, nr);
-
return ret;
}
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
{
- struct ehca_cq *my_cq = NULL;
- int ret = 0;
-
- EHCA_CHECK_CQ(cq);
- my_cq = container_of(cq, struct ehca_cq, ib_cq);
- EHCA_CHECK_CQ(my_cq);
- EDEB_EN(7, "ehca_cq=%p cq_num=%x cq_notif=%x",
- my_cq, my_cq->cq_number, cq_notify);
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
switch (cq_notify) {
case IB_CQ_SOLICITED:
@@ -687,8 +649,5 @@ int ehca_req_notify_cq(struct ib_cq *cq,
return -EINVAL;
}
- EDEB_EX(7, "ehca_cq=%p cq_num=%x ret=%x",
- my_cq, my_cq->cq_number, ret);
-
- return ret;
+ return 0;
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_sqp.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_sqp.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_sqp.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_sqp.c 2006-08-30
20:00:16.000000000 +0200
@@ -40,8 +40,6 @@
*/
-#define DEB_PREFIX "e_qp"
-
#include <linux/module.h>
#include <linux/err.h>
#include "ehca_classes.h"
@@ -51,11 +49,6 @@
#include "hcp_if.h"
-extern int ehca_create_aqp1(struct ehca_shca *shca, struct ehca_sport
*sport);
-extern int ehca_destroy_aqp1(struct ehca_sport *sport);
-
-extern int ehca_port_act_time;
-
/**
* ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special
queue
* pair is created successfully, the corresponding port gets active.
@@ -69,15 +62,10 @@ u64 ehca_define_sqp(struct ehca_shca *sh
struct ehca_qp *ehca_qp,
struct ib_qp_init_attr *qp_init_attr)
{
-
- u32 pma_qp_nr = 0;
- u32 bma_qp_nr = 0;
- u64 ret = H_SUCCESS;
+ u32 pma_qp_nr, bma_qp_nr;
+ u64 ret;
u8 port = qp_init_attr->port_num;
- int counter = 0;
-
- EDEB_EN(7, "port=%x qp_type=%x",
- port, qp_init_attr->qp_type);
+ int counter;
shca->sport[port - 1].port_state = IB_PORT_DOWN;
@@ -93,31 +81,31 @@ u64 ehca_define_sqp(struct ehca_shca *sh
&pma_qp_nr, &bma_qp_nr);
if (ret != H_SUCCESS) {
- EDEB_ERR(4, "Can't define AQP1 for port %x.
rc=%lx",
- port, ret);
- goto ehca_define_aqp1;
+ ehca_err(&shca->ib_device,
+ "Can't define AQP1 for port %x. rc=%lx",
+ port, ret);
+ return ret;
}
break;
default:
- ret = H_PARAMETER;
- goto ehca_define_aqp1;
+ ehca_err(&shca->ib_device, "invalid qp_type=%x",
+ qp_init_attr->qp_type);
+ return H_PARAMETER;
}
- while ((shca->sport[port - 1].port_state != IB_PORT_ACTIVE) &&
- (counter < ehca_port_act_time)) {
- EDEB(6, "... wait until port %x is active",
- port);
+ for (counter = 0;
+ shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
+ counter < ehca_port_act_time;
+ counter++) {
+ ehca_dbg(&shca->ib_device, "... wait until port %x is
active",
+ port);
msleep_interruptible(1000);
- counter++;
}
if (counter == ehca_port_act_time) {
- EDEB_ERR(4, "Port %x is not active.", port);
- ret = H_HARDWARE;
+ ehca_err(&shca->ib_device, "Port %x is not active.",
port);
+ return H_HARDWARE;
}
-ehca_define_aqp1:
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return H_SUCCESS;
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_tools.h
linux-2.6/drivers/infiniband/hw/ehca/ehca_tools.h
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_tools.h 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_tools.h 2006-08-30
20:00:17.000000000 +0200
@@ -57,195 +57,70 @@
#include <linux/version.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
+#include <linux/device.h>
#include <asm/abs_addr.h>
#include <asm/ibmebus.h>
#include <asm/io.h>
#include <asm/pgtable.h>
-#define EHCA_EDEB_TRACE_MASK_SIZE 32
-extern u8 ehca_edeb_mask[EHCA_EDEB_TRACE_MASK_SIZE];
-#define EDEB_ID_TO_U32(str4) (str4[3] | (str4[2] << 8) | (str4[1] << 16)
| \
- (str4[0] << 24))
+extern int ehca_debug_level;
-static inline u64 ehca_edeb_filter(const u32 level,
- const u32 id, const u32 line)
-{
- u64 ret = 0;
- u32 filenr = 0;
- u32 filter_level = 9;
- u32 dynamic_level = 0;
-
- /*
- * This is code written for the gcc -O2 optimizer
- * which should collapse to two single ints.
- * Filter_level is the first level kicked out by
- * compiler and means trace everything below 6.
- */
-
- if (id == EDEB_ID_TO_U32("ehav")) {
- filenr = 0x01;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("clas")) {
- filenr = 0x02;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("cqeq")) {
- filenr = 0x03;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("shca")) {
- filenr = 0x05;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("eirq")) {
- filenr = 0x06;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("lMad")) {
- filenr = 0x07;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("mcas")) {
- filenr = 0x08;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("mrmw")) {
- filenr = 0x09;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("vpd ")) {
- filenr = 0x0a;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("e_qp")) {
- filenr = 0x0b;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("uqes")) {
- filenr = 0x0c;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("PHYP")) {
- filenr = 0x0d;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("hcpi")) {
- filenr = 0x0e;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("iptz")) {
- filenr = 0x0f;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("spta")) {
- filenr = 0x10;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("simp")) {
- filenr = 0x11;
- filter_level = 8;
- }
- if (id == EDEB_ID_TO_U32("reqs")) {
- filenr = 0x12;
- filter_level = 8;
- }
-
- if ((filenr - 1) > sizeof(ehca_edeb_mask)) {
- filenr = 0;
- }
-
- if (filenr == 0) {
- filter_level = 9;
- } /* default */
- ret = filenr * 0x10000 + line;
- if (filter_level <= level) {
- return ret | 0x100000000L; /* this is the flag to not
trace */
- }
- dynamic_level = ehca_edeb_mask[filenr];
- if (likely(dynamic_level <= level)) {
- ret = ret | 0x100000000L;
- };
- return ret;
-}
-
-#ifdef EHCA_USE_HCALL_KERNEL
-#ifdef CONFIG_PPC_PSERIES
-
-#include <asm/paca.h>
+#define ehca_dbg(ib_dev, format, arg...) \
+ do { \
+ if (unlikely(ehca_debug_level)) \
+ dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
+ "PU%04x EHCA_DBG:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, \
+ ## arg); \
+ } while (0)
-/*
- * IS_EDEB_ON - Checks if debug is on for the given level.
- */
-#define IS_EDEB_ON(level) \
-((ehca_edeb_filter(level, EDEB_ID_TO_U32(DEB_PREFIX), __LINE__) & \
- 0x100000000L) == 0)
-
-#define EDEB_P_GENERIC(level,idstring,format,args...) \
-do { \
- u64 ehca_edeb_filterresult = \
- ehca_edeb_filter(level, EDEB_ID_TO_U32(DEB_PREFIX),
__LINE__);\
- if ((ehca_edeb_filterresult & 0x100000000L) == 0) \
- printk("PU%04x %08x:%s " idstring " "format "\n", \
- get_paca()->paca_index,
(u32)(ehca_edeb_filterresult), \
- __func__, ##args); \
-} while (1 == 0)
-
-#elif REAL_HCALL
-
-#define EDEB_P_GENERIC(level,idstring,format,args...) \
-do { \
- u64 ehca_edeb_filterresult = \
- ehca_edeb_filter(level, EDEB_ID_TO_U32(DEB_PREFIX),
__LINE__); \
- if ((ehca_edeb_filterresult & 0x100000000L) == 0) \
- printk("%08x:%s " idstring " "format "\n", \
- (u32)(ehca_edeb_filterresult), \
- __func__, ##args); \
-} while (1 == 0)
-
-#endif
-#else
-
-#define IS_EDEB_ON(level) (1)
-
-#define EDEB_P_GENERIC(level,idstring,format,args...) \
-do { \
- printk("%s " idstring " "format "\n", \
- __func__, ##args); \
-} while (1 == 0)
+#define ehca_info(ib_dev, format, arg...) \
+ dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n",
\
+ get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_warn(ib_dev, format, arg...) \
+ dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n",
\
+ get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_err(ib_dev, format, arg...) \
+ dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, ## arg)
+
+/* use this one only if no ib_dev available */
+#define ehca_gen_dbg(format, arg...) \
+ do { \
+ if (unlikely(ehca_debug_level)) \
+ printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format
"\n",\
+ get_paca()->paca_index, __FUNCTION__, ##
arg); \
+ } while (0)
-#endif
+#define ehca_gen_warn(format, arg...) \
+ do { \
+ if (unlikely(ehca_debug_level)) \
+ printk(KERN_INFO "PU%04x EHCA_WARN:%s " format
"\n",\
+ get_paca()->paca_index, __FUNCTION__, ##
arg); \
+ } while (0)
-/**
- * EDEB - Trace output macro.
- * @level: tracelevel
- * @format: optional format string, use "" if not desired
- * @args: printf like arguments for trace
- */
-#define EDEB(level,format,args...) \
- EDEB_P_GENERIC(level,"",format,##args)
-#define EDEB_ERR(level,format,args...) \
- EDEB_P_GENERIC(level,"HCAD_ERROR ",format,##args)
-#define EDEB_EN(level,format,args...) \
- EDEB_P_GENERIC(level,">>>",format,##args)
-#define EDEB_EX(level,format,args...) \
- EDEB_P_GENERIC(level,"<<<",format,##args)
+#define ehca_gen_err(format, arg...) \
+ printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, ## arg)
/**
- * EDEB_DMP - macro to dump a memory block, whose length is n*8 bytes.
+ * ehca_dmp - printk a memory block, whose length is n*8 bytes.
* Each line has the following layout:
* <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
*/
-#define EDEB_DMP(level,adr,len,format,args...) \
+#define ehca_dmp(adr, len, format, args...) \
do { \
unsigned int x; \
unsigned int l = (unsigned int)(len); \
unsigned char *deb = (unsigned char*)(adr); \
for (x = 0; x < l; x += 16) { \
- EDEB(level, format " adr=%p ofs=%04x %016lx
%016lx", \
- ##args, deb, x, \
- *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
+ printk("EHCA_DMP:%s" format \
+ " adr=%p ofs=%04x %016lx %016lx\n", \
+ __FUNCTION__, ##args, deb, x, \
+ *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
deb += 16; \
} \
} while (0)
@@ -275,129 +150,8 @@ do { \
* EHCA_BMASK_GET - extract a parameter from value by mask
*/
#define EHCA_BMASK_GET(mask,value) \
- ( EHCA_BMASK_MASK(mask)&
(((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
-
-#define PARANOIA_MODE
-#ifdef PARANOIA_MODE
+ (EHCA_BMASK_MASK(mask)&
(((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
-#define EHCA_CHECK_ADR_P(adr) \
- if (unlikely(adr == 0)) { \
- EDEB_ERR(4, "adr=%p check failed line %i", adr, \
- __LINE__); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_ADR(adr) \
- if (unlikely(adr == 0)) { \
- EDEB_ERR(4, "adr=%p check failed line %i", adr, \
- __LINE__); \
- return -EFAULT; }
-
-#define EHCA_CHECK_DEVICE_P(device) \
- if (unlikely(device == 0)) { \
- EDEB_ERR(4, "device=%p check failed", device); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_DEVICE(device) \
- if (unlikely(device == 0)) { \
- EDEB_ERR(4, "device=%p check failed", device); \
- return -EFAULT; }
-
-#define EHCA_CHECK_PD(pd) \
- if (unlikely(pd == 0)) { \
- EDEB_ERR(4, "pd=%p check failed", pd); \
- return -EFAULT; }
-
-#define EHCA_CHECK_PD_P(pd) \
- if (unlikely(pd == 0)) { \
- EDEB_ERR(4, "pd=%p check failed", pd); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_AV(av) \
- if (unlikely(av == 0)) { \
- EDEB_ERR(4, "av=%p check failed", av); \
- return -EFAULT; }
-
-#define EHCA_CHECK_AV_P(av) \
- if (unlikely(av == 0)) { \
- EDEB_ERR(4, "av=%p check failed", av); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_CQ(cq) \
- if (unlikely(cq == 0)) { \
- EDEB_ERR(4, "cq=%p check failed", cq); \
- return -EFAULT; }
-
-#define EHCA_CHECK_CQ_P(cq) \
- if (unlikely(cq == 0)) { \
- EDEB_ERR(4, "cq=%p check failed", cq); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_EQ(eq) \
- if (unlikely(eq == 0)) { \
- EDEB_ERR(4, "eq=%p check failed", eq); \
- return -EFAULT; }
-
-#define EHCA_CHECK_EQ_P(eq) \
- if (unlikely(eq == 0)) { \
- EDEB_ERR(4, "eq=%p check failed", eq); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_QP(qp) \
- if (unlikely(qp == 0)) { \
- EDEB_ERR(4, "qp=%p check failed", qp); \
- return -EFAULT; }
-
-#define EHCA_CHECK_QP_P(qp) \
- if (unlikely(qp == 0)) { \
- EDEB_ERR(4, "qp=%p check failed", qp); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_MR(mr) \
- if (unlikely(mr == 0)) { \
- EDEB_ERR(4, "mr=%p check failed", mr); \
- return -EFAULT; }
-
-#define EHCA_CHECK_MR_P(mr) \
- if (unlikely(mr == 0)) { \
- EDEB_ERR(4, "mr=%p check failed", mr); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_MW(mw) \
- if (unlikely(mw == 0)) { \
- EDEB_ERR(4, "mw=%p check failed", mw); \
- return -EFAULT; }
-
-#define EHCA_CHECK_MW_P(mw) \
- if (unlikely(mw == 0)) { \
- EDEB_ERR(4, "mw=%p check failed", mw); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_FMR(fmr) \
- if (unlikely(fmr == 0)) { \
- EDEB_ERR(4, "fmr=%p check failed", fmr); \
- return -EFAULT; }
-
-#define EHCA_CHECK_FMR_P(fmr) \
- if (unlikely(fmr == 0)) { \
- EDEB_ERR(4, "fmr=%p check failed", fmr); \
- return ERR_PTR(-EFAULT); }
-
-#define EHCA_REGISTER_PD(device,pd)
-#define EHCA_REGISTER_AV(pd,av)
-#define EHCA_DEREGISTER_PD(PD)
-#define EHCA_DEREGISTER_AV(av)
-#else
-#define EHCA_CHECK_DEVICE_P(device)
-
-#define EHCA_CHECK_PD(pd)
-#define EHCA_REGISTER_PD(device,pd)
-#define EHCA_DEREGISTER_PD(PD)
-#endif
-
-static inline int ehca_adr_bad(void *adr)
-{
- return !adr;
-}
/* Converts ehca to ib return code */
static inline int ehca2ib_return_code(u64 ehca_rc)
@@ -414,4 +168,5 @@ static inline int ehca2ib_return_code(u6
}
}
+
#endif /* EHCA_TOOLS_H */
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_uverbs.c
linux-2.6/drivers/infiniband/hw/ehca/ehca_uverbs.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ehca_uverbs.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ehca_uverbs.c 2006-08-30
20:00:16.000000000 +0200
@@ -40,9 +40,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#undef DEB_PREFIX
-#define DEB_PREFIX "uver"
-
#include <asm/current.h>
#include "ehca_classes.h"
@@ -54,30 +51,20 @@
struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
struct ib_udata *udata)
{
- struct ehca_ucontext *my_context = NULL;
-
- EHCA_CHECK_ADR_P(device);
- EDEB_EN(7, "device=%p name=%s", device, device->name);
+ struct ehca_ucontext *my_context;
my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
if (!my_context) {
- EDEB_ERR(4, "Out of memory device=%p", device);
+ ehca_err(device, "Out of memory device=%p", device);
return ERR_PTR(-ENOMEM);
}
- EDEB_EX(7, "device=%p ucontext=%p", device, my_context);
-
return &my_context->ib_ucontext;
}
int ehca_dealloc_ucontext(struct ib_ucontext *context)
{
- struct ehca_ucontext *my_context = NULL;
- EHCA_CHECK_ADR(context);
- EDEB_EN(7, "ucontext=%p", context);
- my_context = container_of(context, struct ehca_ucontext,
ib_ucontext);
- kfree(my_context);
- EDEB_EN(7, "ucontext=%p", context);
+ kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
return 0;
}
@@ -91,83 +78,88 @@ struct page *ehca_nopage(struct vm_area_
u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
u32 cur_pid = current->tgid;
unsigned long flags;
+ struct ehca_cq *cq;
+ struct ehca_qp *qp;
+ struct ehca_pd *pd;
+ u64 offset;
+ void *vaddr;
- EDEB_EN(7, "vm_start=%lx vm_end=%lx vm_page_prot=%lx
vm_fileoff=%lx "
- "address=%lx",
- vma->vm_start, vma->vm_end, vma->vm_page_prot, fileoffset,
- address);
-
- if (q_type == 1) { /* CQ */
- struct ehca_cq *cq = NULL;
- u64 offset;
- void *vaddr = NULL;
-
+ switch (q_type) {
+ case 1: /* CQ */
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
cq = idr_find(&ehca_cq_idr, idr_handle);
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- if (cq->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
- cur_pid, cq->ownpid);
+ /* make sure this mmap really belongs to the authorized
user */
+ if (!cq) {
+ ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
return NOPAGE_SIGBUS;
}
- /* make sure this mmap really belongs to the authorized
user */
- if (!cq) {
- EDEB_ERR(4, "cq is NULL ret=NOPAGE_SIGBUS");
+ if (cq->ownpid != cur_pid) {
+ ehca_err(cq->ib_cq.device,
+ "Invalid caller pid=%x ownpid=%x",
+ cur_pid, cq->ownpid);
return NOPAGE_SIGBUS;
}
+
if (rsrc_type == 2) {
- EDEB(6, "cq=%p cq queuearea", cq);
+ ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea",
cq);
offset = address - vma->vm_start;
vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
- EDEB(6, "offset=%lx vaddr=%p", offset, vaddr);
+ ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
+ offset, vaddr);
mypage = virt_to_page(vaddr);
}
- } else if (q_type == 2) { /* QP */
- struct ehca_qp *qp = NULL;
- struct ehca_pd *pd = NULL;
- u64 offset;
- void *vaddr = NULL;
+ break;
+ case 2: /* QP */
spin_lock_irqsave(&ehca_qp_idr_lock, flags);
qp = idr_find(&ehca_qp_idr, idr_handle);
spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ /* make sure this mmap really belongs to the authorized
user */
+ if (!qp) {
+ ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
+ return NOPAGE_SIGBUS;
+ }
pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
if (pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(qp->ib_qp.device,
+ "Invalid caller pid=%x ownpid=%x",
cur_pid, pd->ownpid);
return NOPAGE_SIGBUS;
}
- /* make sure this mmap really belongs to the authorized
user */
- if (!qp) {
- EDEB_ERR(4, "qp is NULL ret=NOPAGE_SIGBUS");
- return NOPAGE_SIGBUS;
- }
if (rsrc_type == 2) { /* rqueue */
- EDEB(6, "qp=%p qp rqueuearea", qp);
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea",
qp);
offset = address - vma->vm_start;
vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
- EDEB(6, "offset=%lx vaddr=%p", offset, vaddr);
+ ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+ offset, vaddr);
mypage = virt_to_page(vaddr);
} else if (rsrc_type == 3) { /* squeue */
- EDEB(6, "qp=%p qp squeuearea", qp);
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea",
qp);
offset = address - vma->vm_start;
vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
- EDEB(6, "offset=%lx vaddr=%p", offset, vaddr);
+ ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+ offset, vaddr);
mypage = virt_to_page(vaddr);
}
+ break;
+
+ default:
+ ehca_gen_err("bad queue type %x", q_type);
+ return NOPAGE_SIGBUS;
}
if (!mypage) {
- EDEB_ERR(4, "Invalid page adr==NULL ret=NOPAGE_SIGBUS");
+ ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
return NOPAGE_SIGBUS;
}
get_page(mypage);
- EDEB_EX(7, "page adr=%p", mypage);
+
return mypage;
}
@@ -181,159 +173,161 @@ int ehca_mmap(struct ib_ucontext *contex
u32 idr_handle = fileoffset >> 32;
u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
- u32 ret = -EFAULT; /* assume the worst */
- u64 vsize = 0; /* must be calculated/set below */
- u64 physical = 0; /* must be calculated/set below */
u32 cur_pid = current->tgid;
+ u32 ret;
+ u64 vsize, physical;
unsigned long flags;
+ struct ehca_cq *cq;
+ struct ehca_qp *qp;
+ struct ehca_pd *pd;
- EDEB_EN(7, "vm_start=%lx vm_end=%lx vm_page_prot=%lx
vm_fileoff=%lx",
- vma->vm_start, vma->vm_end, vma->vm_page_prot,
fileoffset);
-
- if (q_type == 1) { /* CQ */
- struct ehca_cq *cq;
-
+ switch (q_type) {
+ case 1: /* CQ */
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
cq = idr_find(&ehca_cq_idr, idr_handle);
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ /* make sure this mmap really belongs to the authorized
user */
+ if (!cq)
+ return -EINVAL;
+
if (cq->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(cq->ib_cq.device,
+ "Invalid caller pid=%x ownpid=%x",
cur_pid, cq->ownpid);
return -ENOMEM;
}
- /* make sure this mmap really belongs to the authorized
user */
- if (!cq)
- return -EINVAL;
- if (!cq->ib_cq.uobject)
- return -EINVAL;
- if (cq->ib_cq.uobject->context != context)
+ if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context !=
context)
return -EINVAL;
- if (rsrc_type == 1) { /* galpa fw handle */
- EDEB(6, "cq=%p cq triggerarea", cq);
+
+ switch (rsrc_type) {
+ case 1: /* galpa fw handle */
+ ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea",
cq);
vma->vm_flags |= VM_RESERVED;
vsize = vma->vm_end - vma->vm_start;
if (vsize != EHCA_PAGESIZE) {
- EDEB_ERR(4, "invalid vsize=%lx",
+ ehca_err(cq->ib_cq.device, "invalid
vsize=%lx",
vma->vm_end - vma->vm_start);
- ret = -EINVAL;
- goto mmap_exit0;
+ return -EINVAL;
}
physical = cq->galpas.user.fw_handle;
vma->vm_page_prot =
pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO | VM_RESERVED;
- EDEB(6, "vsize=%lx physical=%lx", vsize,
physical);
+ ehca_dbg(cq->ib_cq.device,
+ "vsize=%lx physical=%lx", vsize,
physical);
ret = remap_pfn_range(vma, vma->vm_start,
physical >> PAGE_SHIFT,
vsize,
vma->vm_page_prot);
if (ret) {
- EDEB_ERR(4, "remap_pfn_range() failed
ret=%x",
+ ehca_err(cq->ib_cq.device,
+ "remap_pfn_range() failed
ret=%x",
ret);
- ret = -ENOMEM;
+ return -ENOMEM;
}
- goto mmap_exit0;
- } else if (rsrc_type == 2) { /* cq queue_addr */
- EDEB(6, "cq=%p cq q_addr", cq);
+ break;
+
+ case 2: /* cq queue_addr */
+ ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &ehcau_vm_ops;
- ret = 0;
- goto mmap_exit0;
- } else {
- EDEB_ERR(6, "bad resource type %x", rsrc_type);
- ret = -EINVAL;
- goto mmap_exit0;
+ break;
+
+ default:
+ ehca_err(cq->ib_cq.device, "bad resource type %x",
+ rsrc_type);
+ return -EINVAL;
}
- } else if (q_type == 2) { /* QP */
- struct ehca_qp *qp = NULL;
- struct ehca_pd *pd = NULL;
+ break;
+ case 2: /* QP */
spin_lock_irqsave(&ehca_qp_idr_lock, flags);
qp = idr_find(&ehca_qp_idr, idr_handle);
spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ /* make sure this mmap really belongs to the authorized
user */
+ if (!qp)
+ return -EINVAL;
+
pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
if (pd->ownpid != cur_pid) {
- EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(qp->ib_qp.device,
+ "Invalid caller pid=%x ownpid=%x",
cur_pid, pd->ownpid);
return -ENOMEM;
}
- /* make sure this mmap really belongs to the authorized
user */
- if (!qp || !qp->ib_qp.uobject ||
- qp->ib_qp.uobject->context != context) {
- EDEB(6, "qp=%p, uobject=%p, context=%p",
- qp, qp->ib_qp.uobject,
qp->ib_qp.uobject->context);
- ret = -EINVAL;
- goto mmap_exit0;
- }
- if (rsrc_type == 1) { /* galpa fw handle */
- EDEB(6, "qp=%p qp triggerarea", qp);
+ if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context !=
context)
+ return -EINVAL;
+
+ switch (rsrc_type) {
+ case 1: /* galpa fw handle */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea",
qp);
vma->vm_flags |= VM_RESERVED;
vsize = vma->vm_end - vma->vm_start;
if (vsize != EHCA_PAGESIZE) {
- EDEB_ERR(4, "invalid vsize=%lx",
+ ehca_err(qp->ib_qp.device, "invalid
vsize=%lx",
vma->vm_end - vma->vm_start);
- ret = -EINVAL;
- goto mmap_exit0;
+ return -EINVAL;
}
physical = qp->galpas.user.fw_handle;
vma->vm_page_prot =
pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO | VM_RESERVED;
- EDEB(6, "vsize=%lx physical=%lx", vsize,
physical);
+ ehca_dbg(qp->ib_qp.device, "vsize=%lx
physical=%lx",
+ vsize, physical);
ret = remap_pfn_range(vma, vma->vm_start,
physical >> PAGE_SHIFT,
vsize,
vma->vm_page_prot);
if (ret) {
- EDEB_ERR(4, "remap_pfn_range() failed
ret=%x",
+ ehca_err(qp->ib_qp.device,
+ "remap_pfn_range() failed
ret=%x",
ret);
- ret = -ENOMEM;
+ return -ENOMEM;
}
- goto mmap_exit0;
- } else if (rsrc_type == 2) { /* qp rqueue_addr */
- EDEB(6, "qp=%p qp rqueue_addr", qp);
+ break;
+
+ case 2: /* qp rqueue_addr */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr",
qp);
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &ehcau_vm_ops;
- ret = 0;
- goto mmap_exit0;
- } else if (rsrc_type == 3) { /* qp squeue_addr */
- EDEB(6, "qp=%p qp squeue_addr", qp);
+ break;
+
+ case 3: /* qp squeue_addr */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr",
qp);
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &ehcau_vm_ops;
- ret = 0;
- goto mmap_exit0;
- } else {
- EDEB_ERR(4, "bad resource type %x", rsrc_type);
- ret = -EINVAL;
- goto mmap_exit0;
+ break;
+
+ default:
+ ehca_err(qp->ib_qp.device, "bad resource type %x",
+ rsrc_type);
+ return -EINVAL;
}
- } else {
- EDEB_ERR(4, "bad queue type %x", q_type);
- ret = -EINVAL;
- goto mmap_exit0;
+ break;
+
+ default:
+ ehca_gen_err("bad queue type %x", q_type);
+ return -EINVAL;
}
-mmap_exit0:
- EDEB_EX(7, "ret=%x", ret);
- return ret;
+ return 0;
}
-int ehca_mmap_nopage(u64 foffset, u64 length, void ** mapped,
- struct vm_area_struct ** vma)
+int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
+ struct vm_area_struct **vma)
{
- EDEB_EN(7, "foffset=%lx length=%lx", foffset, length);
down_write(¤t->mm->mmap_sem);
*mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
foffset);
up_write(¤t->mm->mmap_sem);
if (!(*mapped)) {
- EDEB_ERR(4, "couldn't mmap foffset=%lx length=%lx",
- foffset, length);
+ ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
+ foffset, length);
return -EINVAL;
}
@@ -342,49 +336,47 @@ int ehca_mmap_nopage(u64 foffset, u64 le
down_write(¤t->mm->mmap_sem);
do_munmap(current->mm, 0, length);
up_write(¤t->mm->mmap_sem);
- EDEB_ERR(4, "couldn't find vma queue=%p", *mapped);
+ ehca_gen_err("couldn't find vma queue=%p", *mapped);
return -EINVAL;
}
(*vma)->vm_flags |= VM_RESERVED;
(*vma)->vm_ops = &ehcau_vm_ops;
- EDEB_EX(7, "mapped=%p", *mapped);
return 0;
}
-int ehca_mmap_register(u64 physical, void ** mapped,
- struct vm_area_struct ** vma)
+int ehca_mmap_register(u64 physical, void **mapped,
+ struct vm_area_struct **vma)
{
- int ret = 0;
+ int ret;
unsigned long vsize;
/* ehca hw supports only 4k page */
ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
if (ret) {
- EDEB(4, "could'nt mmap physical=%lx", physical);
+ ehca_gen_err("could'nt mmap physical=%lx", physical);
return ret;
}
(*vma)->vm_flags |= VM_RESERVED;
vsize = (*vma)->vm_end - (*vma)->vm_start;
if (vsize != EHCA_PAGESIZE) {
- EDEB_ERR(4, "invalid vsize=%lx",
- (*vma)->vm_end - (*vma)->vm_start);
- ret = -EINVAL;
- return ret;
+ ehca_gen_err("invalid vsize=%lx",
+ (*vma)->vm_end - (*vma)->vm_start);
+ return -EINVAL;
}
(*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
(*vma)->vm_flags |= VM_IO | VM_RESERVED;
- EDEB(6, "vsize=%lx physical=%lx", vsize, physical);
ret = remap_pfn_range((*vma), (*vma)->vm_start,
physical >> PAGE_SHIFT, vsize,
(*vma)->vm_page_prot);
if (ret) {
- EDEB_ERR(4, "remap_pfn_range() failed ret=%x", ret);
- ret = -ENOMEM;
+ ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
+ return -ENOMEM;
}
- return ret;
+
+ return 0;
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/hcp_if.c
linux-2.6/drivers/infiniband/hw/ehca/hcp_if.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/hcp_if.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/hcp_if.c 2006-08-30
20:00:17.000000000 +0200
@@ -41,13 +41,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "hcpi"
-
#include <asm/hvcall.h>
#include "ehca_tools.h"
#include "hcp_if.h"
#include "hcp_phyp.h"
#include "hipz_fns.h"
+#include "ipz_pt_fn.h"
#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9,11)
#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12,12)
@@ -112,12 +111,12 @@ static long ehca_hcall_7arg_7ret(unsigne
unsigned long *out6,
unsigned long *out7)
{
- long ret = H_SUCCESS;
+ long ret;
int i, sleep_msecs;
- EDEB_EN(7, "opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx
arg5=%lx"
- " arg6=%lx arg7=%lx", opcode, arg1, arg2, arg3, arg4,
arg5,
- arg6, arg7);
+ ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx
arg5=%lx "
+ "arg6=%lx arg7=%lx", opcode, arg1, arg2, arg3, arg4,
arg5,
+ arg6, arg7);
for (i = 0; i < 5; i++) {
ret = plpar_hcall_7arg_7ret(opcode,
@@ -133,26 +132,24 @@ static long ehca_hcall_7arg_7ret(unsigne
}
if (ret < H_SUCCESS)
- EDEB_ERR(4, "opcode=%lx ret=%lx"
- " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
- " arg5=%lx arg6=%lx arg7=%lx"
- " out1=%lx out2=%lx out3=%lx out4=%lx"
- " out5=%lx out6=%lx out7=%lx",
- opcode, ret,
- arg1, arg2, arg3, arg4,
- arg5, arg6, arg7,
- *out1, *out2, *out3, *out4,
- *out5, *out6, *out7);
-
- EDEB_EX(7, "opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx
"
- "out4=%lx out5=%lx out6=%lx out7=%lx",
- opcode, ret, *out1, *out2, *out3, *out4, *out5,
- *out6, *out7);
+ ehca_gen_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx
arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx"
+ " out1=%lx out2=%lx out3=%lx
out4=%lx"
+ " out5=%lx out6=%lx out7=%lx",
+ opcode, ret,
+ arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7,
+ *out1, *out2, *out3, *out4,
+ *out5, *out6, *out7);
+
+ ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx
out3=%lx "
+ "out4=%lx out5=%lx out6=%lx out7=%lx",
+ opcode, ret, *out1, *out2, *out3, *out4,
*out5,
+ *out6, *out7);
return ret;
}
- EDEB_EX(7, "opcode=%lx ret=H_BUSY", opcode);
-
return H_BUSY;
}
@@ -176,14 +173,13 @@ static long ehca_hcall_9arg_9ret(unsigne
unsigned long *out8,
unsigned long *out9)
{
- long ret = H_SUCCESS;
+ long ret;
int i, sleep_msecs;
- EDEB_EN(7, "opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
- "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
- opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
- arg8, arg9);
-
+ ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
+ "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
+ opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
+ arg8, arg9);
for (i = 0; i < 5; i++) {
ret = plpar_hcall_9arg_9ret(opcode,
@@ -201,32 +197,32 @@ static long ehca_hcall_9arg_9ret(unsigne
}
if (ret < H_SUCCESS)
- EDEB_ERR(4, "opcode=%lx ret=%lx"
- " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
- " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
- " arg9=%lx"
- " out1=%lx out2=%lx out3=%lx out4=%lx"
- " out5=%lx out6=%lx out7=%lx out8=%lx"
- " out9=%lx",
- opcode, ret,
- arg1, arg2, arg3, arg4,
- arg5, arg6, arg7, arg8,
- arg9,
- *out1, *out2, *out3, *out4,
- *out5, *out6, *out7, *out8,
- *out9);
-
- EDEB_EX(7, "opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx
"
- "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx
out9=%lx",
- opcode, ret,*out1, *out2, *out3, *out4, *out5,
*out6,
- *out7, *out8, *out9);
+ ehca_gen_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx
arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx
arg8=%lx"
+ " arg9=%lx"
+ " out1=%lx out2=%lx out3=%lx
out4=%lx"
+ " out5=%lx out6=%lx out7=%lx
out8=%lx"
+ " out9=%lx",
+ opcode, ret,
+ arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7, arg8,
+ arg9,
+ *out1, *out2, *out3, *out4,
+ *out5, *out6, *out7, *out8,
+ *out9);
+
+ ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx
out3=%lx "
+ "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx
"
+ "out9=%lx", opcode, ret,*out1, *out2, *out3,
*out4,
+ *out5, *out6, *out7, *out8, *out9);
return ret;
}
- EDEB_EX(7, "opcode=%lx ret=H_BUSY", opcode);
return H_BUSY;
}
+
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle
adapter_handle,
struct ehca_pfeq *pfeq,
const u32 neq_control,
@@ -236,18 +232,10 @@ u64 hipz_h_alloc_resource_eq(const struc
u32 * act_pages,
u32 * eq_ist)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 act_nr_of_entries_out = 0;
- u64 act_pages_out = 0;
- u64 eq_ist_out = 0;
- u64 allocate_controls = 0;
- u32 x = (u64)(&x);
-
- EDEB_EN(7, "pfeq=%p adapter_handle=%lx new_control=%x"
- " number_of_entries=%x",
- pfeq, adapter_handle.handle, neq_control,
- number_of_entries);
+ u64 allocate_controls;
+ u64 act_nr_of_entries_out, act_pages_out, eq_ist_out;
/* resource type */
allocate_controls = 3ULL;
@@ -276,10 +264,7 @@ u64 hipz_h_alloc_resource_eq(const struc
*eq_ist = (u32)eq_ist_out;
if (ret == H_NOT_ENOUGH_RESOURCES)
- EDEB_ERR(4, "Not enough resource - ret=%lx ", ret);
-
- EDEB_EX(7, "act_nr_of_entries=%x act_pages=%x eq_ist=%x",
- *act_nr_of_entries, *act_pages, *eq_ist);
+ ehca_gen_err("Not enough resource - ret=%lx ", ret);
return ret;
}
@@ -288,45 +273,30 @@ u64 hipz_h_reset_event(const struct ipz_
struct ipz_eq_handle eq_handle,
const u64 event_mask)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- EDEB_EN(7, "eq_handle=%lx, adapter_handle=%lx event_mask=%lx",
- eq_handle.handle, adapter_handle.handle, event_mask);
-
- ret = ehca_hcall_7arg_7ret(H_RESET_EVENTS,
- adapter_handle.handle, /* r4 */
- eq_handle.handle, /* r5 */
- event_mask, /* r6 */
- 0, 0, 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
-
- EDEB(7, "ret=%lx", ret);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_RESET_EVENTS,
+ adapter_handle.handle, /* r4 */
+ eq_handle.handle, /* r5 */
+ event_mask, /* r6 */
+ 0, 0, 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle
adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 act_nr_of_entries_out;
- u64 act_pages_out;
- u64 g_la_privileged_out;
- u64 g_la_user_out;
-
- EDEB_EN(7, "Adapter_handle=%lx eq_handle=%lx cq_token=%x"
- " cq_number_of_entries=%x",
- adapter_handle.handle, param->eq_handle.handle,
- cq->token, param->nr_cqe);
+ u64 act_nr_of_entries_out, act_pages_out;
+ u64 g_la_privileged_out, g_la_user_out;
ret = ehca_hcall_7arg_7ret(H_ALLOC_RESOURCE,
adapter_handle.handle, /* r4 */
@@ -350,10 +320,7 @@ u64 hipz_h_alloc_resource_cq(const struc
hcp_galpas_ctor(&cq->galpas, g_la_privileged_out,
g_la_user_out);
if (ret == H_NOT_ENOUGH_RESOURCES)
- EDEB_ERR(4, "Not enough resources. ret=%lx", ret);
-
- EDEB_EX(7, "cq_handle=%lx act_nr_of_entries=%x act_pages=%x",
- cq->ipz_cq_handle.handle, param->act_nr_of_entries,
param->act_pages);
+ ehca_gen_err("Not enough resources. ret=%lx", ret);
return ret;
}
@@ -362,32 +329,13 @@ u64 hipz_h_alloc_resource_qp(const struc
struct ehca_qp *qp,
struct ehca_alloc_qp_parms *parms)
{
- u64 ret = H_SUCCESS;
- u64 allocate_controls;
- u64 max_r10_reg;
- u64 dummy = 0;
- u64 qp_nr_out = 0;
- u64 r6_out = 0;
- u64 r7_out = 0;
- u64 r8_out = 0;
- u64 g_la_user_out = 0;
- u64 r11_out = 0;
+ u64 ret;
+ u64 dummy, allocate_controls, max_r10_reg;
+ u64 qp_nr_out, r6_out, r7_out, r8_out, g_la_user_out, r11_out;
u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
int daqp_ctrl = parms->daqp_ctrl;
- EDEB_EN(7, "Adapter_handle=%lx servicetype=%x signalingtype=%x"
- " ud_av_l_key=%x send_cq_handle=%lx receive_cq_handle=%lx"
- " async_eq_handle=%lx qp_token=%x pd=%x
max_nr_send_wqes=%x"
- " max_nr_receive_wqes=%x max_nr_send_sges=%x"
- " max_nr_receive_sges=%x ud_av_l_key=%x galpa.pid=%x",
- adapter_handle.handle, parms->servicetype, parms->sigtype,
- parms->ud_av_l_key_ctl, qp->send_cq->ipz_cq_handle.handle,
- qp->recv_cq->ipz_cq_handle.handle,
parms->ipz_eq_handle.handle,
- qp->token, parms->pd.value, max_nr_send_wqes,
- max_nr_receive_wqes, parms->max_send_sge,
parms->max_recv_sge,
- parms->ud_av_l_key_ctl, qp->galpas.pid);
-
allocate_controls =
EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
(daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
@@ -453,17 +401,7 @@ u64 hipz_h_alloc_resource_qp(const struc
hcp_galpas_ctor(&qp->galpas, g_la_user_out,
g_la_user_out);
if (ret == H_NOT_ENOUGH_RESOURCES)
- EDEB_ERR(4, "Not enough resources. ret=%lx",ret);
-
- EDEB_EX(7, "qp_nr=%x act_nr_send_wqes=%x"
- " act_nr_receive_wqes=%x act_nr_send_sges=%x"
- " act_nr_receive_sges=%x nr_sq_pages=%x"
- " nr_rq_pages=%x galpa.user=%lx galpa.kernel=%lx",
- qp->real_qp_num, parms->act_nr_send_wqes,
- parms->act_nr_recv_wqes, parms->act_nr_send_sges,
- parms->act_nr_recv_sges, parms->nr_sq_pages,
- parms->nr_rq_pages, qp->galpas.user.fw_handle,
- qp->galpas.kernel.fw_handle);
+ ehca_gen_err("Not enough resources. ret=%lx",ret);
return ret;
}
@@ -472,20 +410,15 @@ u64 hipz_h_query_port(const struct ipz_a
const u8 port_id,
struct hipz_query_port *query_port_response_block)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 r_cb;
-
- EDEB_EN(7, "adapter_handle=%lx port_id %x",
- adapter_handle.handle, port_id);
+ u64 r_cb = virt_to_abs(query_port_response_block);
- if (((u64)query_port_response_block) & 0xfff) {
- EDEB_ERR(4, "response block not page aligned");
+ if (r_cb & (EHCA_PAGESIZE-1)) {
+ ehca_gen_err("response block not page aligned");
return H_PARAMETER;
}
- r_cb = virt_to_abs(query_port_response_block);
-
ret = ehca_hcall_7arg_7ret(H_QUERY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
@@ -499,19 +432,8 @@ u64 hipz_h_query_port(const struct ipz_a
&dummy,
&dummy);
- EDEB_DMP(7, query_port_response_block, 64,
"query_port_response_block");
- EDEB(7, "offset31=%x offset35=%x offset36=%x",
- ((u32*)query_port_response_block)[32],
- ((u32*)query_port_response_block)[36],
- ((u32*)query_port_response_block)[37]);
- EDEB(7, "offset200=%x offset201=%x offset202=%x "
- "offset203=%x",
- ((u32*)query_port_response_block)[0x200],
- ((u32*)query_port_response_block)[0x201],
- ((u32*)query_port_response_block)[0x202],
- ((u32*)query_port_response_block)[0x203]);
-
- EDEB_EX(7, "ret=%lx", ret);
+ if (ehca_debug_level)
+ ehca_dmp(query_port_response_block, 64, "response_block");
return ret;
}
@@ -519,62 +441,26 @@ u64 hipz_h_query_port(const struct ipz_a
u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
struct hipz_query_hca *query_hca_rblock)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- u64 r_cb;
- EDEB_EN(7, "adapter_handle=%lx", adapter_handle.handle);
+ u64 r_cb = virt_to_abs(query_hca_rblock);
- if (((u64)query_hca_rblock) & 0xfff) {
- EDEB_ERR(4, "response_block=%p not page aligned",
- query_hca_rblock);
+ if (r_cb & (EHCA_PAGESIZE-1)) {
+ ehca_gen_err("response_block=%p not page aligned",
+ query_hca_rblock);
return H_PARAMETER;
}
- r_cb = virt_to_abs(query_hca_rblock);
-
- ret = ehca_hcall_7arg_7ret(H_QUERY_HCA,
- adapter_handle.handle, /* r4 */
- r_cb, /* r5 */
- 0, 0, 0, 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
-
- EDEB(7, "offset0=%x offset1=%x offset2=%x offset3=%x",
- ((u32*)query_hca_rblock)[0],
- ((u32*)query_hca_rblock)[1],
- ((u32*)query_hca_rblock)[2], ((u32*)query_hca_rblock)[3]);
- EDEB(7, "offset4=%x offset5=%x offset6=%x offset7=%x",
- ((u32*)query_hca_rblock)[4],
- ((u32*)query_hca_rblock)[5],
- ((u32*)query_hca_rblock)[6], ((u32*)query_hca_rblock)[7]);
- EDEB(7, "offset8=%x offset9=%x offseta=%x offsetb=%x",
- ((u32*)query_hca_rblock)[8],
- ((u32*)query_hca_rblock)[9],
- ((u32*)query_hca_rblock)[10], ((u32*)query_hca_rblock)[11]);
- EDEB(7, "offsetc=%x offsetd=%x offsete=%x offsetf=%x",
- ((u32*)query_hca_rblock)[12],
- ((u32*)query_hca_rblock)[13],
- ((u32*)query_hca_rblock)[14], ((u32*)query_hca_rblock)[15]);
- EDEB(7, "offset136=%x offset192=%x offset204=%x",
- ((u32*)query_hca_rblock)[32],
- ((u32*)query_hca_rblock)[48], ((u32*)query_hca_rblock)[51]);
- EDEB(7, "offset231=%x offset235=%x",
- ((u32*)query_hca_rblock)[57], ((u32*)query_hca_rblock)[58]);
- EDEB(7, "offset200=%x offset201=%x offset202=%x offset203=%x",
- ((u32*)query_hca_rblock)[0x201],
- ((u32*)query_hca_rblock)[0x202],
- ((u32*)query_hca_rblock)[0x203],
- ((u32*)query_hca_rblock)[0x204]);
-
- EDEB_EX(7, "ret=%lx adapter_handle=%lx",
- ret, adapter_handle.handle);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_QUERY_HCA,
+ adapter_handle.handle, /* r4 */
+ r_cb, /* r5 */
+ 0, 0, 0, 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
@@ -584,32 +470,22 @@ u64 hipz_h_register_rpage(const struct i
const u64 logical_address_of_page,
u64 count)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- EDEB_EN(7, "adapter_handle=%lx pagesize=%x queue_type=%x"
- " resource_handle=%lx logical_address_of_page=%lx
count=%lx",
- adapter_handle.handle, pagesize, queue_type,
- resource_handle, logical_address_of_page, count);
-
- ret = ehca_hcall_7arg_7ret(H_REGISTER_RPAGES,
- adapter_handle.handle, /* r4 */
- queue_type | pagesize << 8, /* r5 */
- resource_handle, /* r6 */
- logical_address_of_page, /* r7 */
- count, /* r8 */
- 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
-
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_REGISTER_RPAGES,
+ adapter_handle.handle, /* r4 */
+ queue_type | pagesize << 8, /* r5 */
+ resource_handle, /* r6 */
+ logical_address_of_page, /* r7 */
+ count, /* r8 */
+ 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle
adapter_handle,
@@ -620,34 +496,22 @@ u64 hipz_h_register_rpage_eq(const struc
const u64 logical_address_of_page,
const u64 count)
{
- u64 ret = H_SUCCESS;
-
- EDEB_EN(7, "pfeq=%p adapter_handle=%lx eq_handle=%lx pagesize=%x"
- " queue_type=%x logical_address_of_page=%lx count=%lx",
- pfeq, adapter_handle.handle, eq_handle.handle, pagesize,
- queue_type,logical_address_of_page, count);
-
if (count != 1) {
- EDEB_ERR(4, "Ppage counter=%lx", count);
+ ehca_gen_err("Ppage counter=%lx", count);
return H_PARAMETER;
}
- ret = hipz_h_register_rpage(adapter_handle,
- pagesize,
- queue_type,
- eq_handle.handle,
- logical_address_of_page, count);
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return hipz_h_register_rpage(adapter_handle,
+ pagesize,
+ queue_type,
+ eq_handle.handle,
+ logical_address_of_page, count);
}
u32 hipz_h_query_int_state(const struct ipz_adapter_handle
adapter_handle,
u32 ist)
{
- u32 ret = H_SUCCESS;
- u64 dummy = 0;
-
- EDEB_EN(7, "ist=%x", ist);
+ u32 ret;
+ u64 dummy;
ret = ehca_hcall_7arg_7ret(H_QUERY_INT_STATE,
adapter_handle.handle, /* r4 */
@@ -662,9 +526,7 @@ u32 hipz_h_query_int_state(const struct
&dummy);
if (ret != H_SUCCESS && ret != H_BUSY)
- EDEB_ERR(4, "Could not query interrupt state.");
-
- EDEB_EX(7, "interrupt state: %x", ret);
+ ehca_gen_err("Could not query interrupt state.");
return ret;
}
@@ -678,24 +540,14 @@ u64 hipz_h_register_rpage_cq(const struc
const u64 count,
const struct h_galpa gal)
{
- u64 ret = H_SUCCESS;
-
- EDEB_EN(7, "pfcq=%p adapter_handle=%lx cq_handle=%lx pagesize=%x"
- " queue_type=%x logical_address_of_page=%lx count=%lx",
- pfcq, adapter_handle.handle, cq_handle.handle, pagesize,
- queue_type, logical_address_of_page, count);
-
if (count != 1) {
- EDEB_ERR(4, "Page counter=%lx", count);
+ ehca_gen_err("Page counter=%lx", count);
return H_PARAMETER;
}
- ret = hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
- cq_handle.handle,
logical_address_of_page,
- count);
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
+ cq_handle.handle,
logical_address_of_page,
+ count);
}
u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle
adapter_handle,
@@ -707,24 +559,14 @@ u64 hipz_h_register_rpage_qp(const struc
const u64 count,
const struct h_galpa galpa)
{
- u64 ret = H_SUCCESS;
-
- EDEB_EN(7, "pfqp=%p adapter_handle=%lx qp_handle=%lx pagesize=%x"
- " queue_type=%x logical_address_of_page=%lx count=%lx",
- pfqp, adapter_handle.handle, qp_handle.handle, pagesize,
- queue_type, logical_address_of_page, count);
-
if (count != 1) {
- EDEB_ERR(4, "Page counter=%lx", count);
+ ehca_gen_err("Page counter=%lx", count);
return H_PARAMETER;
}
- ret = hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
- qp_handle.handle,logical_address_of_page,
- count);
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
+ qp_handle.handle,logical_address_of_page,
+ count);
}
u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle
adapter_handle,
@@ -734,36 +576,25 @@ u64 hipz_h_disable_and_get_wqe(const str
void **log_addr_next_rq_wqe2processed,
int dis_and_get_function_code)
{
- u64 ret = H_SUCCESS;
- u8 function_code = 1;
u64 dummy, dummy1, dummy2;
- EDEB_EN(7, "pfqp=%p adapter_handle=%lx function=%x qp_handle=%lx",
- pfqp, adapter_handle.handle, function_code,
qp_handle.handle);
-
if (!log_addr_next_sq_wqe2processed)
log_addr_next_sq_wqe2processed = (void**)&dummy1;
if (!log_addr_next_rq_wqe2processed)
log_addr_next_rq_wqe2processed = (void**)&dummy2;
- ret = ehca_hcall_7arg_7ret(H_DISABLE_AND_GETC,
- adapter_handle.handle, /* r4 */
- dis_and_get_function_code, /* r5 */
- qp_handle.handle, /* r6 */
- 0, 0, 0, 0,
- (void*)log_addr_next_sq_wqe2processed,
- (void*)log_addr_next_rq_wqe2processed,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
- EDEB_EX(7, "ret=%lx ladr_next_rq_wqe_out=%p"
- " ladr_next_sq_wqe_out=%p", ret,
- *log_addr_next_sq_wqe2processed,
- *log_addr_next_rq_wqe2processed);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_DISABLE_AND_GETC,
+ adapter_handle.handle, /* r4 */
+ dis_and_get_function_code, /* r5 */
+ qp_handle.handle, /* r6 */
+ 0, 0, 0, 0,
+ (void*)log_addr_next_sq_wqe2processed,
+ (void*)log_addr_next_rq_wqe2processed,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
@@ -773,22 +604,15 @@ u64 hipz_h_modify_qp(const struct ipz_ad
struct hcp_modify_qp_control_block *mqpcb,
struct h_galpa gal)
{
- u64 ret = H_SUCCESS;
- u64 invalid_attribute_identifier = 0;
- u64 rc_attrib_mask = 0;
- u64 dummy;
- u64 r_cb;
- EDEB_EN(7, "pfqp=%p adapter_handle=%lx qp_handle=%lx"
- " update_mask=%lx qp_state=%x mqpcb=%p",
- pfqp, adapter_handle.handle, qp_handle.handle,
- update_mask, mqpcb->qp_state, mqpcb);
+ u64 ret;
+ u64 dummy;
+ u64 invalid_attribute_identifier, rc_attrib_mask;
- r_cb = virt_to_abs(mqpcb);
ret = ehca_hcall_7arg_7ret(H_MODIFY_QP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
update_mask, /* r6 */
- r_cb, /* r7 */
+ virt_to_abs(mqpcb), /* r7 */
0, 0, 0,
&invalid_attribute_identifier, /* r4 */
&dummy, /* r5 */
@@ -797,12 +621,9 @@ u64 hipz_h_modify_qp(const struct ipz_ad
&dummy, /* r8 */
&rc_attrib_mask, /* r9 */
&dummy);
- if (ret == H_NOT_ENOUGH_RESOURCES)
- EDEB_ERR(4, "Insufficient resources ret=%lx", ret);
- EDEB_EX(7, "ret=%lx invalid_attribute_identifier=%lx"
- " invalid_attribute_MASK=%lx", ret,
- invalid_attribute_identifier, rc_attrib_mask);
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+ ehca_gen_err("Insufficient resources ret=%lx", ret);
return ret;
}
@@ -813,47 +634,32 @@ u64 hipz_h_query_qp(const struct ipz_ada
struct hcp_modify_qp_control_block *qqpcb,
struct h_galpa gal)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- u64 r_cb;
- EDEB_EN(7, "adapter_handle=%lx qp_handle=%lx",
- adapter_handle.handle, qp_handle.handle);
-
- r_cb = virt_to_abs(qqpcb);
- EDEB(7, "r_cb=%lx", r_cb);
-
- ret = ehca_hcall_7arg_7ret(H_QUERY_QP,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- r_cb, /* r6 */
- 0, 0, 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
-
- EDEB_EX(7, "ret=%lx", ret);
- return ret;
+ return ehca_hcall_7arg_7ret(H_QUERY_QP,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ virt_to_abs(qqpcb), /* r6 */
+ 0, 0, 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_qp *qp)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 ladr_next_sq_wqe_out;
- u64 ladr_next_rq_wqe_out;
-
- EDEB_EN(7, "qp=%p ipz_qp_handle=%lx adapter_handle=%lx",
- qp, qp->ipz_qp_handle.handle, adapter_handle.handle);
+ u64 ladr_next_sq_wqe_out, ladr_next_rq_wqe_out;
ret = hcp_galpas_dtor(&qp->galpas);
if (ret) {
- EDEB_ERR(4, "Could not destruct qp->galpas");
+ ehca_gen_err("Could not destruct qp->galpas");
return H_RESOURCE;
}
ret = ehca_hcall_7arg_7ret(H_DISABLE_AND_GETC,
@@ -870,7 +676,7 @@ u64 hipz_h_destroy_qp(const struct ipz_a
&dummy,
&dummy);
if (ret == H_HARDWARE)
- EDEB_ERR(4, "HCA not operational. ret=%lx", ret);
+ ehca_gen_err("HCA not operational. ret=%lx", ret);
ret = ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
@@ -885,9 +691,7 @@ u64 hipz_h_destroy_qp(const struct ipz_a
&dummy);
if (ret == H_RESOURCE)
- EDEB_ERR(4, "Resource still in use. ret=%lx", ret);
-
- EDEB_EX(7, "ret=%lx", ret);
+ ehca_gen_err("Resource still in use. ret=%lx", ret);
return ret;
}
@@ -897,28 +701,20 @@ u64 hipz_h_define_aqp0(const struct ipz_
struct h_galpa gal,
u32 port)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- EDEB_EN(7, "port=%x ipz_qp_handle=%lx adapter_handle=%lx",
- port, qp_handle.handle, adapter_handle.handle);
-
- ret = ehca_hcall_7arg_7ret(H_DEFINE_AQP0,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- port, /* r6 */
- 0, 0, 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
-
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_DEFINE_AQP0,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ port, /* r6 */
+ 0, 0, 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
@@ -927,13 +723,9 @@ u64 hipz_h_define_aqp1(const struct ipz_
u32 port, u32 * pma_qp_nr,
u32 * bma_qp_nr)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 pma_qp_nr_out;
- u64 bma_qp_nr_out;
-
- EDEB_EN(7, "port=%x qp_handle=%lx adapter_handle=%lx",
- port, qp_handle.handle, adapter_handle.handle);
+ u64 pma_qp_nr_out, bma_qp_nr_out;
ret = ehca_hcall_7arg_7ret(H_DEFINE_AQP1,
adapter_handle.handle, /* r4 */
@@ -952,10 +744,7 @@ u64 hipz_h_define_aqp1(const struct ipz_
*bma_qp_nr = (u32)bma_qp_nr_out;
if (ret == H_ALIAS_EXIST)
- EDEB_ERR(4, "AQP1 already exists. ret=%lx", ret);
-
- EDEB_EX(7, "ret=%lx pma_qp_nr=%i bma_qp_nr=%i",
- ret, (int)*pma_qp_nr, (int)*bma_qp_nr);
+ ehca_gen_err("AQP1 already exists. ret=%lx", ret);
return ret;
}
@@ -966,23 +755,8 @@ u64 hipz_h_attach_mcqp(const struct ipz_
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u8 *dgid_sp = (u8*)&subnet_prefix;
- u8 *dgid_ii = (u8*)&interface_id;
-
- EDEB_EN(7, "qp_handle=%lx adapter_handle=%lx\nMCG_DGID ="
- " %d.%d.%d.%d.%d.%d.%d.%d."
- " %d.%d.%d.%d.%d.%d.%d.%d",
- qp_handle.handle, adapter_handle.handle,
- dgid_sp[0], dgid_sp[1],
- dgid_sp[2], dgid_sp[3],
- dgid_sp[4], dgid_sp[5],
- dgid_sp[6], dgid_sp[7],
- dgid_ii[0], dgid_ii[1],
- dgid_ii[2], dgid_ii[3],
- dgid_ii[4], dgid_ii[5],
- dgid_ii[6], dgid_ii[7]);
ret = ehca_hcall_7arg_7ret(H_ATTACH_MCQP,
adapter_handle.handle, /* r4 */
@@ -1000,9 +774,7 @@ u64 hipz_h_attach_mcqp(const struct ipz_
&dummy);
if (ret == H_NOT_ENOUGH_RESOURCES)
- EDEB_ERR(4, "Not enough resources. ret=%lx", ret);
-
- EDEB_EX(7, "ret=%lx", ret);
+ ehca_gen_err("Not enough resources. ret=%lx", ret);
return ret;
}
@@ -1013,56 +785,34 @@ u64 hipz_h_detach_mcqp(const struct ipz_
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- u8 *dgid_sp = (u8*)&subnet_prefix;
- u8 *dgid_ii = (u8*)&interface_id;
- EDEB_EN(7, "qp_handle=%lx adapter_handle=%lx\nMCG_DGID ="
- " %d.%d.%d.%d.%d.%d.%d.%d."
- " %d.%d.%d.%d.%d.%d.%d.%d",
- qp_handle.handle, adapter_handle.handle,
- dgid_sp[0], dgid_sp[1],
- dgid_sp[2], dgid_sp[3],
- dgid_sp[4], dgid_sp[5],
- dgid_sp[6], dgid_sp[7],
- dgid_ii[0], dgid_ii[1],
- dgid_ii[2], dgid_ii[3],
- dgid_ii[4], dgid_ii[5],
- dgid_ii[6], dgid_ii[7]);
- ret = ehca_hcall_7arg_7ret(H_DETACH_MCQP,
- adapter_handle.handle, /* r4 */
- qp_handle.handle, /* r5 */
- mcg_dlid, /* r6 */
- interface_id, /* r7 */
- subnet_prefix, /* r8 */
- 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
-
- EDEB(7, "ret=%lx", ret);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_DETACH_MCQP,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ mcg_dlid, /* r6 */
+ interface_id, /* r7 */
+ subnet_prefix, /* r8 */
+ 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
u8 force_flag)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- EDEB_EN(7, "cq->pf=%p cq=.%p ipz_cq_handle=%lx
adapter_handle=%lx",
- &cq->pf, cq, cq->ipz_cq_handle.handle,
adapter_handle.handle);
-
ret = hcp_galpas_dtor(&cq->galpas);
if (ret) {
- EDEB_ERR(4, "Could not destruct cp->galpas");
+ ehca_gen_err("Could not destruct cp->galpas");
return H_RESOURCE;
}
@@ -1080,9 +830,7 @@ u64 hipz_h_destroy_cq(const struct ipz_a
&dummy);
if (ret == H_RESOURCE)
- EDEB(4, "ret=%lx ", ret);
-
- EDEB_EX(7, "ret=%lx", ret);
+ ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
return ret;
}
@@ -1090,16 +838,12 @@ u64 hipz_h_destroy_cq(const struct ipz_a
u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_eq *eq)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- EDEB_EN(7, "eq->pf=%p eq=%p ipz_eq_handle=%lx adapter_handle=%lx",
- &eq->pf, eq, eq->ipz_eq_handle.handle,
- adapter_handle.handle);
-
ret = hcp_galpas_dtor(&eq->galpas);
if (ret) {
- EDEB_ERR(4, "Could not destruct eq->galpas");
+ ehca_gen_err("Could not destruct eq->galpas");
return H_RESOURCE;
}
@@ -1117,9 +861,7 @@ u64 hipz_h_destroy_eq(const struct ipz_a
if (ret == H_RESOURCE)
- EDEB_ERR(4, "Resource in use. ret=%lx ", ret);
-
- EDEB_EX(7, "ret=%lx", ret);
+ ehca_gen_err("Resource in use. ret=%lx ", ret);
return ret;
}
@@ -1132,16 +874,11 @@ u64 hipz_h_alloc_resource_mr(const struc
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
u64 lkey_out;
u64 rkey_out;
- EDEB_EN(7, "adapter_handle=%lx mr=%p vaddr=%lx length=%lx"
- " access_ctrl=%x pd=%x",
- adapter_handle.handle, mr, vaddr, length, access_ctrl,
- pd.value);
-
ret = ehca_hcall_7arg_7ret(H_ALLOC_RESOURCE,
adapter_handle.handle, /* r4
*/
5, /* r5
*/
@@ -1160,9 +897,6 @@ u64 hipz_h_alloc_resource_mr(const struc
outparms->lkey = (u32)lkey_out;
outparms->rkey = (u32)rkey_out;
- EDEB_EX(7, "ret=%lx mr_handle=%lx lkey=%x rkey=%x",
- ret, outparms->handle.handle, outparms->lkey,
outparms->rkey);
-
return ret;
}
@@ -1173,27 +907,22 @@ u64 hipz_h_register_rpage_mr(const struc
const u64 logical_address_of_page,
const u64 count)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
- EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx pagesize=%x"
- " queue_type=%x logical_address_of_page=%lx count=%lx",
- adapter_handle.handle, mr, mr->ipz_mr_handle.handle,
pagesize,
- queue_type, logical_address_of_page, count);
-
- if ((count > 1) && (logical_address_of_page & 0xfff)) {
- EDEB_ERR(4, "logical_address_of_page not on a 4k boundary
"
- "adapter_handle=%lx mr=%p mr_handle=%lx "
- "pagesize=%x queue_type=%x
logical_address_of_page=%lx"
- " count=%lx",
- adapter_handle.handle, mr,
mr->ipz_mr_handle.handle,
- pagesize, queue_type, logical_address_of_page,
count);
+ if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1)))
{
+ ehca_gen_err("logical_address_of_page not on a 4k boundary
"
+ "adapter_handle=%lx mr=%p mr_handle=%lx "
+ "pagesize=%x queue_type=%x "
+ "logical_address_of_page=%lx count=%lx",
+ adapter_handle.handle, mr,
+ mr->ipz_mr_handle.handle, pagesize,
queue_type,
+ logical_address_of_page, count);
ret = H_PARAMETER;
} else
ret = hipz_h_register_rpage(adapter_handle, pagesize,
queue_type,
mr->ipz_mr_handle.handle,
logical_address_of_page,
count);
- EDEB_EX(7, "ret=%lx", ret);
return ret;
}
@@ -1202,15 +931,9 @@ u64 hipz_h_query_mr(const struct ipz_ada
const struct ehca_mr *mr,
struct ehca_mr_hipzout_parms *outparms)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 remote_len_out;
- u64 remote_vaddr_out;
- u64 acc_ctrl_pd_out;
- u64 r9_out;
-
- EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx",
- adapter_handle.handle, mr, mr->ipz_mr_handle.handle);
+ u64 remote_len_out, remote_vaddr_out, acc_ctrl_pd_out, r9_out;
ret = ehca_hcall_7arg_7ret(H_QUERY_MR,
adapter_handle.handle, /* r4 */
@@ -1228,38 +951,25 @@ u64 hipz_h_query_mr(const struct ipz_ada
outparms->lkey = (u32)(r9_out >> 32);
outparms->rkey = (u32)(r9_out & (0xffffffff));
- EDEB_EX(7, "ret=%lx mr_local_length=%lx mr_local_vaddr=%lx "
- "mr_remote_length=%lx mr_remote_vaddr=%lx access_ctrl=%x "
- "pd=%x lkey=%x rkey=%x", ret, outparms->len,
- outparms->vaddr, remote_len_out, remote_vaddr_out,
- outparms->acl, outparms->acl, outparms->lkey,
outparms->rkey);
-
return ret;
}
u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle
adapter_handle,
const struct ehca_mr *mr)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx",
- adapter_handle.handle, mr, mr->ipz_mr_handle.handle);
-
- ret = ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- mr->ipz_mr_handle.handle, /* r5 */
- 0, 0, 0, 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ mr->ipz_mr_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
@@ -1271,15 +981,9 @@ u64 hipz_h_reregister_pmr(const struct i
const u64 mr_addr_cb,
struct ehca_mr_hipzout_parms *outparms)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 lkey_out;
- u64 rkey_out;
-
- EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx vaddr_in=%lx "
- "length=%lx access_ctrl=%x pd=%x mr_addr_cb=%lx",
- adapter_handle.handle, mr, mr->ipz_mr_handle.handle,
vaddr_in,
- length, access_ctrl, pd.value, mr_addr_cb);
+ u64 lkey_out, rkey_out;
ret = ehca_hcall_7arg_7ret(H_REREGISTER_PMR,
adapter_handle.handle, /* r4 */
@@ -1301,8 +1005,6 @@ u64 hipz_h_reregister_pmr(const struct i
outparms->lkey = (u32)lkey_out;
outparms->rkey = (u32)rkey_out;
- EDEB_EX(7, "ret=%lx vaddr=%lx lkey=%x rkey=%x",
- ret, outparms->vaddr, outparms->lkey, outparms->rkey);
return ret;
}
@@ -1314,16 +1016,9 @@ u64 hipz_h_register_smr(const struct ipz
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 lkey_out;
- u64 rkey_out;
-
- EDEB_EN(7, "adapter_handle=%lx orig_mr=%p orig_mr_handle=%lx "
- "vaddr_in=%lx access_ctrl=%x pd=%x",
adapter_handle.handle,
- orig_mr, orig_mr->ipz_mr_handle.handle, vaddr_in,
access_ctrl,
- pd.value);
-
+ u64 lkey_out, rkey_out;
ret = ehca_hcall_7arg_7ret(H_REGISTER_SMR,
adapter_handle.handle, /* r4
*/
@@ -1342,9 +1037,6 @@ u64 hipz_h_register_smr(const struct ipz
outparms->lkey = (u32)lkey_out;
outparms->rkey = (u32)rkey_out;
- EDEB_EX(7, "ret=%lx mr_handle=%lx lkey=%x rkey=%x",
- ret, outparms->handle.handle, outparms->lkey,
outparms->rkey);
-
return ret;
}
@@ -1353,13 +1045,10 @@ u64 hipz_h_alloc_resource_mw(const struc
const struct ipz_pd pd,
struct ehca_mw_hipzout_parms *outparms)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
u64 rkey_out;
- EDEB_EN(7, "adapter_handle=%lx mw=%p pd=%x",
- adapter_handle.handle, mw, pd.value);
-
ret = ehca_hcall_7arg_7ret(H_ALLOC_RESOURCE,
adapter_handle.handle, /* r4 */
6, /* r5 */
@@ -1375,8 +1064,6 @@ u64 hipz_h_alloc_resource_mw(const struc
outparms->rkey = (u32)rkey_out;
- EDEB_EX(7, "ret=%lx mw_handle=%lx rkey=%x",
- ret, outparms->handle.handle, outparms->rkey);
return ret;
}
@@ -1384,13 +1071,9 @@ u64 hipz_h_query_mw(const struct ipz_ada
const struct ehca_mw *mw,
struct ehca_mw_hipzout_parms *outparms)
{
- u64 ret = H_SUCCESS;
+ u64 ret;
u64 dummy;
- u64 pd_out;
- u64 rkey_out;
-
- EDEB_EN(7, "adapter_handle=%lx mw=%p mw_handle=%lx",
- adapter_handle.handle, mw, mw->ipz_mw_handle.handle);
+ u64 pd_out, rkey_out;
ret = ehca_hcall_7arg_7ret(H_QUERY_MW,
adapter_handle.handle, /* r4 */
@@ -1405,34 +1088,25 @@ u64 hipz_h_query_mw(const struct ipz_ada
&dummy);
outparms->rkey = (u32)rkey_out;
- EDEB_EX(7, "ret=%lx rkey=%x pd=%lx", ret, outparms->rkey, pd_out);
-
return ret;
}
u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle
adapter_handle,
const struct ehca_mw *mw)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- EDEB_EN(7, "adapter_handle=%lx mw=%p mw_handle=%lx",
- adapter_handle.handle, mw, mw->ipz_mw_handle.handle);
-
- ret = ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
- adapter_handle.handle, /* r4 */
- mw->ipz_mw_handle.handle, /* r5 */
- 0, 0, 0, 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ mw->ipz_mw_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
@@ -1440,34 +1114,24 @@ u64 hipz_h_error_data(const struct ipz_a
void *rblock,
unsigned long *byte_count)
{
- u64 ret = H_SUCCESS;
u64 dummy;
- u64 r_cb;
-
- EDEB_EN(7, "adapter_handle=%lx ressource_handle=%lx rblock=%p",
- adapter_handle.handle, ressource_handle, rblock);
+ u64 r_cb = virt_to_abs(rblock);
- if (((u64)rblock) & 0xfff) {
- EDEB_ERR(4, "rblock not page aligned.");
+ if (r_cb & (EHCA_PAGESIZE-1)) {
+ ehca_gen_err("rblock not page aligned.");
return H_PARAMETER;
}
- r_cb = virt_to_abs(rblock);
-
- ret = ehca_hcall_7arg_7ret(H_ERROR_DATA,
- adapter_handle.handle,
- ressource_handle,
- r_cb,
- 0, 0, 0, 0,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy,
- &dummy);
-
- EDEB_EX(7, "ret=%lx", ret);
-
- return ret;
+ return ehca_hcall_7arg_7ret(H_ERROR_DATA,
+ adapter_handle.handle,
+ ressource_handle,
+ r_cb,
+ 0, 0, 0, 0,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+ &dummy);
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/hcp_phyp.c
linux-2.6/drivers/infiniband/hw/ehca/hcp_phyp.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/hcp_phyp.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/hcp_phyp.c 2006-08-30
20:00:16.000000000 +0200
@@ -39,22 +39,17 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "PHYP"
-
#include "ehca_classes.h"
#include "hipz_hw.h"
int hcall_map_page(u64 physaddr, u64 *mapaddr)
{
*mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
-
- EDEB(7, "ioremap physaddr=%lx mapaddr=%lx", physaddr, *mapaddr);
return 0;
}
int hcall_unmap_page(u64 mapaddr)
{
- EDEB(7, "mapaddr=%lx", mapaddr);
iounmap((volatile void __iomem*)mapaddr);
return 0;
}
@@ -68,25 +63,18 @@ int hcp_galpas_ctor(struct h_galpas *gal
galpas->user.fw_handle = paddr_user;
- EDEB(7, "paddr_kernel=%lx paddr_user=%lx galpas->kernel=%lx"
- " galpas->user=%lx",
- paddr_kernel, paddr_user, galpas->kernel.fw_handle,
- galpas->user.fw_handle);
-
- return ret;
+ return 0;
}
int hcp_galpas_dtor(struct h_galpas *galpas)
{
- int ret = 0;
-
- if (galpas->kernel.fw_handle)
- ret = hcall_unmap_page(galpas->kernel.fw_handle);
-
- if (ret)
- return ret;
+ if (galpas->kernel.fw_handle) {
+ int ret = hcall_unmap_page(galpas->kernel.fw_handle);
+ if (ret)
+ return ret;
+ }
galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
- return ret;
+ return 0;
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/hcp_phyp.h
linux-2.6/drivers/infiniband/hw/ehca/hcp_phyp.h
--- linux-2.6_orig/drivers/infiniband/hw/ehca/hcp_phyp.h 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/hcp_phyp.h 2006-08-30
20:00:16.000000000 +0200
@@ -69,19 +69,13 @@ struct h_galpas {
static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
{
u64 addr = galpa.fw_handle + offset;
- u64 out;
- EDEB_EN(7, "addr=%lx offset=%x ", addr, offset);
- out = *(u64 *) addr;
- EDEB_EX(7, "addr=%lx value=%lx", addr, out);
- return out;
+ return *(volatile u64 __force *)addr;
}
static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64
value)
{
u64 addr = galpa.fw_handle + offset;
- EDEB(7, "addr=%lx offset=%x value=%lx", addr,
- offset, value);
- *(u64 *) addr = value;
+ *(volatile u64 __force *)addr = value;
}
int hcp_galpas_ctor(struct h_galpas *galpas,
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/hipz_fns_core.h
linux-2.6/drivers/infiniband/hw/ehca/hipz_fns_core.h
--- linux-2.6_orig/drivers/infiniband/hw/ehca/hipz_fns_core.h 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/hipz_fns_core.h 2006-08-30
20:00:16.000000000 +0200
@@ -60,63 +60,41 @@
static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
{
- struct h_galpa gal;
-
- EDEB_EN(7, "qp=%p", qp);
- gal = qp->galpas.kernel;
/* ringing doorbell :-) */
- hipz_galpa_store_qp(gal, qpx_sqa, EHCA_BMASK_SET(QPX_SQADDER,
nr_wqes));
- EDEB_EX(7, "qp=%p QPx_SQA = %i", qp, nr_wqes);
+ hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
+ EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
}
static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
{
- struct h_galpa gal;
-
- EDEB_EN(7, "qp=%p", qp);
- gal = qp->galpas.kernel;
/* ringing doorbell :-) */
- hipz_galpa_store_qp(gal, qpx_rqa, EHCA_BMASK_SET(QPX_RQADDER,
nr_wqes));
- EDEB_EX(7, "qp=%p QPx_RQA = %i", qp, nr_wqes);
+ hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
+ EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
}
static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
{
- struct h_galpa gal;
-
- EDEB_EN(7, "cq=%p", cq);
- gal = cq->galpas.kernel;
- hipz_galpa_store_cq(gal, cqx_feca,
+ hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
- EDEB_EX(7, "cq=%p CQx_FECA = %i", cq, nr_cqes);
}
static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
{
- struct h_galpa gal;
- u64 CQx_N0_reg = 0;
+ u64 cqx_n0_reg;
- EDEB_EN(7, "cq=%p event on solicited completion -- write CQx_N0",
cq);
- gal = cq->galpas.kernel;
- hipz_galpa_store_cq(gal, cqx_n0,
+ hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
value));
- CQx_N0_reg = hipz_galpa_load_cq(gal, cqx_n0);
- EDEB_EX(7, "cq=%p loaded CQx_N0=%lx", cq, (unsigned
long)CQx_N0_reg);
+ cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
}
static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
{
- struct h_galpa gal;
- u64 CQx_N1_reg = 0;
+ u64 cqx_n1_reg;
- EDEB_EN(7, "cq=%p event on completion -- write CQx_N1",
- cq);
- gal = cq->galpas.kernel;
- hipz_galpa_store_cq(gal, cqx_n1,
+ hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT,
value));
- CQx_N1_reg = hipz_galpa_load_cq(gal, cqx_n1);
- EDEB_EX(7, "cq=%p loaded CQx_N1=%lx", cq, (unsigned
long)CQx_N1_reg);
+ cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
}
#endif /* __HIPZ_FNC_CORE_H__ */
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ipz_pt_fn.c
linux-2.6/drivers/infiniband/hw/ehca/ipz_pt_fn.c
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ipz_pt_fn.c 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ipz_pt_fn.c 2006-08-30
20:00:16.000000000 +0200
@@ -38,13 +38,9 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define DEB_PREFIX "iptz"
-
#include "ehca_tools.h"
#include "ipz_pt_fn.h"
-extern int ehca_hwlevel;
-
void *ipz_qpageit_get_inc(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
@@ -54,10 +50,9 @@ void *ipz_qpageit_get_inc(struct ipz_que
ret = NULL;
}
if (((u64)ret) % EHCA_PAGESIZE) {
- EDEB(4, "ERROR!! not at PAGE-Boundary");
+ ehca_gen_err("ERROR!! not at PAGE-Boundary");
return NULL;
}
- EDEB(7, "queue=%p ret=%p", queue, ret);
return ret;
}
@@ -65,15 +60,13 @@ void *ipz_qeit_eq_get_inc(struct ipz_que
{
void *ret = ipz_qeit_get(queue);
u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+
queue->current_q_offset += queue->qe_size;
if (queue->current_q_offset > last_entry_in_q) {
queue->current_q_offset = 0;
queue->toggle_state = (~queue->toggle_state) & 1;
}
- EDEB(7, "queue=%p ret=%p new current_q_offset=%lx qe_size=%x",
- queue, ret, queue->current_q_offset, queue->qe_size);
-
return ret;
}
@@ -84,22 +77,20 @@ int ipz_queue_ctor(struct ipz_queue *que
int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
int f;
- EDEB_EN(7, "nr_of_pages=%x pagesize=%x qe_size=%x
pages_per_kpage=%x",
- nr_of_pages, pagesize, qe_size, pages_per_kpage);
if (pagesize > PAGE_SIZE) {
- EDEB_ERR(4, "FATAL ERROR: pagesize=%x is greater than "
- "kernel page size", pagesize);
+ ehca_gen_err("FATAL ERROR: pagesize=%x is greater "
+ "than kernel page size", pagesize);
return 0;
}
if (!pages_per_kpage) {
- EDEB_ERR(4, "FATAL ERROR: invalid kernel page size. "
- "pages_per_kpage=%x", pages_per_kpage);
+ ehca_gen_err("FATAL ERROR: invalid kernel page size. "
+ "pages_per_kpage=%x", pages_per_kpage);
return 0;
}
queue->queue_length = nr_of_pages * pagesize;
queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
if (!queue->queue_pages) {
- EDEB(4, "ERROR!! didn't get the memory");
+ ehca_gen_err("ERROR!! didn't get the memory");
return 0;
}
memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
@@ -126,14 +117,11 @@ int ipz_queue_ctor(struct ipz_queue *que
queue->act_nr_of_sg = nr_of_sg;
queue->pagesize = pagesize;
queue->toggle_state = 1;
- EDEB_EX(7, "queue_length=%x queue_pages=%p qe_size=%x"
- " act_nr_of_sg=%x", queue->queue_length,
queue->queue_pages,
- queue->qe_size, queue->act_nr_of_sg);
return 1;
ipz_queue_ctor_exit0:
- EDEB_ERR(4, "Couldn't get alloc pages queue=%p f=%x
nr_of_pages=%x",
- queue, f, nr_of_pages);
+ ehca_gen_err("Couldn't get alloc pages queue=%p f=%x
nr_of_pages=%x",
+ queue, f, nr_of_pages);
for (f = 0; f < nr_of_pages; f += pages_per_kpage) {
if (!(queue->queue_pages)[f])
break;
@@ -148,19 +136,14 @@ int ipz_queue_dtor(struct ipz_queue *que
int g;
int nr_pages;
- EDEB_EN(7, "ipz_queue pointer=%p", queue);
if (!queue || !queue->queue_pages) {
- EDEB_ERR(4, "queue or queue_pages is NULL");
+ ehca_gen_dbg("queue or queue_pages is NULL");
return 0;
}
- EDEB(7, "destructing a queue with the following "
- "properties:\n nr_of_pages=%x pagesize=%x qe_size=%x",
- queue->act_nr_of_sg, queue->pagesize, queue->qe_size);
nr_pages = queue->queue_length / queue->pagesize;
for (g = 0; g < nr_pages; g += pages_per_kpage)
free_page((unsigned long)(queue->queue_pages)[g]);
vfree(queue->queue_pages);
- EDEB_EX(7, "queue freed!");
return 1;
}
diff -Nurp linux-2.6_orig/drivers/infiniband/hw/ehca/ipz_pt_fn.h
linux-2.6/drivers/infiniband/hw/ehca/ipz_pt_fn.h
--- linux-2.6_orig/drivers/infiniband/hw/ehca/ipz_pt_fn.h 2006-09-08
00:16:13.000000000 +0200
+++ linux-2.6/drivers/infiniband/hw/ehca/ipz_pt_fn.h 2006-08-30
20:00:17.000000000 +0200
@@ -43,7 +43,6 @@
#ifndef __IPZ_PT_FN_H__
#define __IPZ_PT_FN_H__
-#include "ehca_qes.h"
#define EHCA_PAGESHIFT 12
#define EHCA_PAGESIZE 4096UL
#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
@@ -76,7 +75,7 @@ struct ipz_queue {
*/
static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
{
- struct ipz_page *current_page = NULL;
+ struct ipz_page *current_page;
if (q_offset >= queue->queue_length)
return NULL;
current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
@@ -118,9 +117,6 @@ static inline void *ipz_qeit_get_inc(str
queue->toggle_state = (~queue->toggle_state) & 1;
}
- EDEB(7, "queue=%p ret=%p new current_q_addr=%lx qe_size=%x",
- queue, ret, queue->current_q_offset, queue->qe_size);
-
return ret;
}
@@ -230,7 +226,6 @@ static inline void *ipz_eqit_eq_get_inc_
{
void *ret = ipz_qeit_get(queue);
u32 qe = *(u8 *) ret;
- EDEB(7, "ipz_QEit_EQ_get_inc_valid qe=%x", qe);
if ((qe >> 7) == (queue->toggle_state & 1))
ipz_qeit_eq_get_inc(queue); /* this is a good one */
else
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20060907/b9a6f0ba/attachment.html>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: smime.p7s
Type: application/pkcs7-signature
Size: 5203 bytes
Desc: S/MIME Cryptographic Signature
URL: <http://lists.openfabrics.org/pipermail/general/attachments/20060907/b9a6f0ba/attachment.bin>
More information about the general
mailing list