[ewg] [PATCH] ehca: backports for 2.6.16-sles10-sp2
Stefan Roscher
ossrosch at linux.vnet.ibm.com
Thu Oct 1 06:04:13 PDT 2009
Hi Vlad,
please apply the following ehca backports for 2.6.16-sles10-sp2. Thanks!
Signed-off-by: Stefan Roschert <stefan.roscher at de.ibm.com>
---
diff -Nurp ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-010-revert_dmem.patch ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-010-revert_dmem.patch
--- ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-010-revert_dmem.patch 1970-01-01 01:00:00.000000000 +0100
+++ ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-010-revert_dmem.patch 2009-10-01 12:51:46.000000000 +0200
@@ -0,0 +1,721 @@
+diff -Nurp ofa_kernel-1.5.patched/drivers/infiniband/hw/ehca/ehca_main.c ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c
+--- ofa_kernel-1.5.patched/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-25 12:18:09.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-23 15:08:25.000000000 +0200
+@@ -506,7 +506,6 @@ static int ehca_init_device(struct ehca_
+ shca->ib_device.detach_mcast = ehca_detach_mcast;
+ shca->ib_device.process_mad = ehca_process_mad;
+ shca->ib_device.mmap = ehca_mmap;
+- shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
+
+ if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
+ shca->ib_device.uverbs_cmd_mask |=
+@@ -1029,23 +1028,17 @@ static int __init ehca_module_init(void)
+ goto module_init1;
+ }
+
+- ret = ehca_create_busmap();
+- if (ret) {
+- ehca_gen_err("Cannot create busmap.");
+- goto module_init2;
+- }
+-
+ ret = ibmebus_register_driver(&ehca_driver);
+ if (ret) {
+ ehca_gen_err("Cannot register eHCA device driver");
+ ret = -EINVAL;
+- goto module_init3;
++ goto module_init2;
+ }
+
+ ret = register_memory_notifier(&ehca_mem_nb);
+ if (ret) {
+ ehca_gen_err("Failed registering memory add/remove notifier");
+- goto module_init4;
++ goto module_init3;
+ }
+
+ if (ehca_poll_all_eqs != 1) {
+@@ -1060,11 +1053,8 @@ static int __init ehca_module_init(void)
+
+ return 0;
+
+-module_init4:
+- ibmebus_unregister_driver(&ehca_driver);
+-
+ module_init3:
+- ehca_destroy_busmap();
++ ibmebus_unregister_driver(&ehca_driver);
+
+ module_init2:
+ ehca_destroy_slab_caches();
+@@ -1083,8 +1073,6 @@ static void __exit ehca_module_exit(void
+
+ unregister_memory_notifier(&ehca_mem_nb);
+
+- ehca_destroy_busmap();
+-
+ ehca_destroy_slab_caches();
+
+ ehca_destroy_comp_pool();
+diff -Nurp ofa_kernel-1.5.patched/drivers/infiniband/hw/ehca/ehca_mrmw.c ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mrmw.c
+--- ofa_kernel-1.5.patched/drivers/infiniband/hw/ehca/ehca_mrmw.c 2009-09-25 12:18:09.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mrmw.c 2009-09-23 15:08:25.000000000 +0200
+@@ -53,38 +53,6 @@
+ /* max number of rpages (per hcall register_rpages) */
+ #define MAX_RPAGES 512
+
+-/* DMEM toleration management */
+-#define EHCA_SECTSHIFT SECTION_SIZE_BITS
+-#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
+-#define EHCA_HUGEPAGESHIFT 34
+-#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
+-#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
+-#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
+-#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
+-#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
+-#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
+-#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
+-#define EHCA_DIR_MAP_SIZE (0x10000)
+-#define EHCA_ENT_MAP_SIZE (0x10000)
+-#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
+-
+-static unsigned long ehca_mr_len;
+-
+-/*
+- * Memory map data structures
+- */
+-struct ehca_dir_bmap {
+- u64 ent[EHCA_MAP_ENTRIES];
+-};
+-struct ehca_top_bmap {
+- struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
+-};
+-struct ehca_bmap {
+- struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
+-};
+-
+-static struct ehca_bmap *ehca_bmap;
+-
+ static struct kmem_cache *mr_cache;
+ static struct kmem_cache *mw_cache;
+
+@@ -100,8 +68,6 @@ enum ehca_mr_pgsize {
+ #define EHCA_MR_PGSHIFT1M 20
+ #define EHCA_MR_PGSHIFT16M 24
+
+-static u64 ehca_map_vaddr(void *caddr);
+-
+ static u32 ehca_encode_hwpage_size(u32 pgsize)
+ {
+ int log = ilog2(pgsize);
+@@ -169,8 +135,7 @@ struct ib_mr *ehca_get_dma_mr(struct ib_
+ goto get_dma_mr_exit0;
+ }
+
+- ret = ehca_reg_maxmr(shca, e_maxmr,
+- (void *)ehca_map_vaddr((void *)KERNELBASE),
++ ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
+ mr_access_flags, e_pd,
+ &e_maxmr->ib.ib_mr.lkey,
+ &e_maxmr->ib.ib_mr.rkey);
+@@ -286,7 +251,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
+
+ ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
+ e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
+- &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
++ &e_mr->ib.ib_mr.rkey);
+ if (ret) {
+ ib_mr = ERR_PTR(ret);
+ goto reg_phys_mr_exit1;
+@@ -405,7 +370,7 @@ reg_user_mr_fallback:
+
+ ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
+ e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
+- &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
++ &e_mr->ib.ib_mr.rkey);
+ if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
+ ehca_warn(pd->device, "failed to register mr "
+ "with hwpage_size=%llx", hwpage_size);
+@@ -829,7 +794,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_
+ ret = ehca_reg_mr(shca, e_fmr, NULL,
+ fmr_attr->max_pages * (1 << fmr_attr->page_shift),
+ mr_access_flags, e_pd, &pginfo,
+- &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
++ &tmp_lkey, &tmp_rkey);
+ if (ret) {
+ ib_fmr = ERR_PTR(ret);
+ goto alloc_fmr_exit1;
+@@ -1018,10 +983,6 @@ free_fmr_exit0:
+
+ /*----------------------------------------------------------------------*/
+
+-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
+- struct ehca_mr *e_mr,
+- struct ehca_mr_pginfo *pginfo);
+-
+ int ehca_reg_mr(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ u64 *iova_start,
+@@ -1030,8 +991,7 @@ int ehca_reg_mr(struct ehca_shca *shca,
+ struct ehca_pd *e_pd,
+ struct ehca_mr_pginfo *pginfo,
+ u32 *lkey, /*OUT*/
+- u32 *rkey, /*OUT*/
+- enum ehca_reg_type reg_type)
++ u32 *rkey) /*OUT*/
+ {
+ int ret;
+ u64 h_ret;
+@@ -1055,13 +1015,7 @@ int ehca_reg_mr(struct ehca_shca *shca,
+
+ e_mr->ipz_mr_handle = hipzout.handle;
+
+- if (reg_type == EHCA_REG_BUSMAP_MR)
+- ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
+- else if (reg_type == EHCA_REG_MR)
+- ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+- else
+- ret = -EINVAL;
+-
++ ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+ if (ret)
+ goto ehca_reg_mr_exit1;
+
+@@ -1362,7 +1316,7 @@ int ehca_rereg_mr(struct ehca_shca *shca
+ e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
+
+ ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
+- e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
++ e_pd, pginfo, lkey, rkey);
+ if (ret) {
+ u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
+ memcpy(&e_mr->flags, &(save_mr.flags),
+@@ -1455,7 +1409,7 @@ int ehca_unmap_one_fmr(struct ehca_shca
+ ret = ehca_reg_mr(shca, e_fmr, NULL,
+ (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
+ e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
+- &tmp_rkey, EHCA_REG_MR);
++ &tmp_rkey);
+ if (ret) {
+ u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
+ memcpy(&e_fmr->flags, &(save_mr.flags),
+@@ -1524,90 +1478,6 @@ ehca_reg_smr_exit0:
+ } /* end ehca_reg_smr() */
+
+ /*----------------------------------------------------------------------*/
+-static inline void *ehca_calc_sectbase(int top, int dir, int idx)
+-{
+- unsigned long ret = idx;
+- ret |= dir << EHCA_DIR_INDEX_SHIFT;
+- ret |= top << EHCA_TOP_INDEX_SHIFT;
+- return abs_to_virt(ret << SECTION_SIZE_BITS);
+-}
+-
+-#define ehca_bmap_valid(entry) \
+- ((u64)entry != (u64)EHCA_INVAL_ADDR)
+-
+-static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
+- struct ehca_shca *shca, struct ehca_mr *mr,
+- struct ehca_mr_pginfo *pginfo)
+-{
+- u64 h_ret = 0;
+- unsigned long page = 0;
+- u64 rpage = virt_to_abs(kpage);
+- int page_count;
+-
+- void *sectbase = ehca_calc_sectbase(top, dir, idx);
+- if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
+- ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
+- "hwpage_size does not fit to "
+- "section start address");
+- }
+- page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
+-
+- while (page < page_count) {
+- u64 rnum;
+- for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
+- rnum++) {
+- void *pg = sectbase + ((page++) * pginfo->hwpage_size);
+- kpage[rnum] = virt_to_abs(pg);
+- }
+-
+- h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
+- ehca_encode_hwpage_size(pginfo->hwpage_size),
+- 0, rpage, rnum);
+-
+- if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
+- ehca_err(&shca->ib_device, "register_rpage_mr failed");
+- return h_ret;
+- }
+- }
+- return h_ret;
+-}
+-
+-static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
+- struct ehca_shca *shca, struct ehca_mr *mr,
+- struct ehca_mr_pginfo *pginfo)
+-{
+- u64 hret = H_SUCCESS;
+- int idx;
+-
+- for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
+- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
+- continue;
+-
+- hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
+- pginfo);
+- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+- return hret;
+- }
+- return hret;
+-}
+-
+-static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
+- struct ehca_mr *mr,
+- struct ehca_mr_pginfo *pginfo)
+-{
+- u64 hret = H_SUCCESS;
+- int dir;
+-
+- for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
+- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+- continue;
+-
+- hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
+- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+- return hret;
+- }
+- return hret;
+-}
+
+ /* register internal max-MR to internal SHCA */
+ int ehca_reg_internal_maxmr(
+@@ -1625,11 +1495,6 @@ int ehca_reg_internal_maxmr(
+ u32 num_hwpages;
+ u64 hw_pgsize;
+
+- if (!ehca_bmap) {
+- ret = -EFAULT;
+- goto ehca_reg_internal_maxmr_exit0;
+- }
+-
+ e_mr = ehca_mr_new();
+ if (!e_mr) {
+ ehca_err(&shca->ib_device, "out of memory");
+@@ -1639,8 +1504,8 @@ int ehca_reg_internal_maxmr(
+ e_mr->flags |= EHCA_MR_FLAG_MAXMR;
+
+ /* register internal max-MR on HCA */
+- size_maxmr = ehca_mr_len;
+- iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
++ size_maxmr = (u64)high_memory - PAGE_OFFSET;
++ iova_start = (u64 *)KERNELBASE;
+ ib_pbuf.addr = 0;
+ ib_pbuf.size = size_maxmr;
+ num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
+@@ -1659,7 +1524,7 @@ int ehca_reg_internal_maxmr(
+
+ ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
+ &pginfo, &e_mr->ib.ib_mr.lkey,
+- &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
++ &e_mr->ib.ib_mr.rkey);
+ if (ret) {
+ ehca_err(&shca->ib_device, "reg of internal max MR failed, "
+ "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
+@@ -2212,8 +2077,8 @@ int ehca_mr_is_maxmr(u64 size,
+ u64 *iova_start)
+ {
+ /* a MR is treated as max-MR only if it fits following: */
+- if ((size == ehca_mr_len) &&
+- (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
++ if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
++ (iova_start == (void *)KERNELBASE)) {
+ ehca_gen_dbg("this is a max-MR");
+ return 1;
+ } else
+@@ -2319,350 +2184,3 @@ void ehca_cleanup_mrmw_cache(void)
+ if (mw_cache)
+ kmem_cache_destroy(mw_cache);
+ }
+-
+-static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
+- int dir)
+-{
+- if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
+- ehca_top_bmap->dir[dir] =
+- kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
+- if (!ehca_top_bmap->dir[dir])
+- return -ENOMEM;
+- /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+- memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
+- }
+- return 0;
+-}
+-
+-static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
+-{
+- if (!ehca_bmap_valid(ehca_bmap->top[top])) {
+- ehca_bmap->top[top] =
+- kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
+- if (!ehca_bmap->top[top])
+- return -ENOMEM;
+- /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+- memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
+- }
+- return ehca_init_top_bmap(ehca_bmap->top[top], dir);
+-}
+-
+-static inline int ehca_calc_index(unsigned long i, unsigned long s)
+-{
+- return (i >> s) & EHCA_INDEX_MASK;
+-}
+-
+-void ehca_destroy_busmap(void)
+-{
+- int top, dir;
+-
+- if (!ehca_bmap)
+- return;
+-
+- for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
+- if (!ehca_bmap_valid(ehca_bmap->top[top]))
+- continue;
+- for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
+- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+- continue;
+-
+- kfree(ehca_bmap->top[top]->dir[dir]);
+- }
+-
+- kfree(ehca_bmap->top[top]);
+- }
+-
+- kfree(ehca_bmap);
+- ehca_bmap = NULL;
+-}
+-
+-static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
+-{
+- unsigned long i, start_section, end_section;
+- int top, dir, idx;
+-
+- if (!nr_pages)
+- return 0;
+-
+- if (!ehca_bmap) {
+- ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
+- if (!ehca_bmap)
+- return -ENOMEM;
+- /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+- memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
+- }
+-
+- start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE;
+- end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
+- for (i = start_section; i < end_section; i++) {
+- int ret;
+- top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
+- dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
+- idx = i & EHCA_INDEX_MASK;
+-
+- ret = ehca_init_bmap(ehca_bmap, top, dir);
+- if (ret) {
+- ehca_destroy_busmap();
+- return ret;
+- }
+- ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
+- ehca_mr_len += EHCA_SECTSIZE;
+- }
+- return 0;
+-}
+-
+-static int ehca_is_hugepage(unsigned long pfn)
+-{
+- int page_order;
+-
+- if (pfn & EHCA_HUGEPAGE_PFN_MASK)
+- return 0;
+-
+- page_order = compound_order(pfn_to_page(pfn));
+- if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
+- return 0;
+-
+- return 1;
+-}
+-
+-static int ehca_create_busmap_callback(unsigned long initial_pfn,
+- unsigned long total_nr_pages, void *arg)
+-{
+- int ret;
+- unsigned long pfn, start_pfn, end_pfn, nr_pages;
+-
+- if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
+- return ehca_update_busmap(initial_pfn, total_nr_pages);
+-
+- /* Given chunk is >= 16GB -> check for hugepages */
+- start_pfn = initial_pfn;
+- end_pfn = initial_pfn + total_nr_pages;
+- pfn = start_pfn;
+-
+- while (pfn < end_pfn) {
+- if (ehca_is_hugepage(pfn)) {
+- /* Add mem found in front of the hugepage */
+- nr_pages = pfn - start_pfn;
+- ret = ehca_update_busmap(start_pfn, nr_pages);
+- if (ret)
+- return ret;
+- /* Skip the hugepage */
+- pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
+- start_pfn = pfn;
+- } else
+- pfn += (EHCA_SECTSIZE / PAGE_SIZE);
+- }
+-
+- /* Add mem found behind the hugepage(s) */
+- nr_pages = pfn - start_pfn;
+- return ehca_update_busmap(start_pfn, nr_pages);
+-}
+-
+-int ehca_create_busmap(void)
+-{
+- int ret;
+-
+- ehca_mr_len = 0;
+- ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
+- ehca_create_busmap_callback);
+- return ret;
+-}
+-
+-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
+- struct ehca_mr *e_mr,
+- struct ehca_mr_pginfo *pginfo)
+-{
+- int top;
+- u64 hret, *kpage;
+-
+- kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
+- if (!kpage) {
+- ehca_err(&shca->ib_device, "kpage alloc failed");
+- return -ENOMEM;
+- }
+- for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
+- if (!ehca_bmap_valid(ehca_bmap->top[top]))
+- continue;
+- hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
+- if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
+- break;
+- }
+-
+- ehca_free_fw_ctrlblock(kpage);
+-
+- if (hret == H_SUCCESS)
+- return 0; /* Everything is fine */
+- else {
+- ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
+- "h_ret=%lli e_mr=%p top=%x lkey=%x "
+- "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
+- e_mr->ib.ib_mr.lkey,
+- shca->ipz_hca_handle.handle,
+- e_mr->ipz_mr_handle.handle);
+- return ehca2ib_return_code(hret);
+- }
+-}
+-
+-static u64 ehca_map_vaddr(void *caddr)
+-{
+- int top, dir, idx;
+- unsigned long abs_addr, offset;
+- u64 entry;
+-
+- if (!ehca_bmap)
+- return EHCA_INVAL_ADDR;
+-
+- abs_addr = virt_to_abs(caddr);
+- top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
+- if (!ehca_bmap_valid(ehca_bmap->top[top]))
+- return EHCA_INVAL_ADDR;
+-
+- dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
+- if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+- return EHCA_INVAL_ADDR;
+-
+- idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
+-
+- entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
+- if (ehca_bmap_valid(entry)) {
+- offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
+- return entry | offset;
+- } else
+- return EHCA_INVAL_ADDR;
+-}
+-
+-static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+-{
+- return dma_addr == EHCA_INVAL_ADDR;
+-}
+-
+-static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
+- size_t size, enum dma_data_direction direction)
+-{
+- if (cpu_addr)
+- return ehca_map_vaddr(cpu_addr);
+- else
+- return EHCA_INVAL_ADDR;
+-}
+-
+-static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- /* This is only a stub; nothing to be done here */
+-}
+-
+-static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- u64 addr;
+-
+- if (offset + size > PAGE_SIZE)
+- return EHCA_INVAL_ADDR;
+-
+- addr = ehca_map_vaddr(page_address(page));
+- if (!ehca_dma_mapping_error(dev, addr))
+- addr += offset;
+-
+- return addr;
+-}
+-
+-static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- /* This is only a stub; nothing to be done here */
+-}
+-
+-static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+- int nents, enum dma_data_direction direction)
+-{
+- struct scatterlist *sg;
+- int i;
+-
+- for_each_sg(sgl, sg, nents, i) {
+- u64 addr;
+- addr = ehca_map_vaddr(sg_virt(sg));
+- if (ehca_dma_mapping_error(dev, addr))
+- return 0;
+-
+- sg->dma_address = addr;
+- sg->dma_length = sg->length;
+- }
+- return nents;
+-}
+-
+-static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction direction)
+-{
+- /* This is only a stub; nothing to be done here */
+-}
+-
+-static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
+-{
+- return sg->dma_address;
+-}
+-
+-static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
+-{
+- return sg->length;
+-}
+-
+-static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+-}
+-
+-static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+-}
+-
+-static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
+- u64 *dma_handle, gfp_t flag)
+-{
+- struct page *p;
+- void *addr = NULL;
+- u64 dma_addr;
+-
+- p = alloc_pages(flag, get_order(size));
+- if (p) {
+- addr = page_address(p);
+- dma_addr = ehca_map_vaddr(addr);
+- if (ehca_dma_mapping_error(dev, dma_addr)) {
+- free_pages((unsigned long)addr, get_order(size));
+- return NULL;
+- }
+- if (dma_handle)
+- *dma_handle = dma_addr;
+- return addr;
+- }
+- return NULL;
+-}
+-
+-static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
+- void *cpu_addr, u64 dma_handle)
+-{
+- if (cpu_addr && size)
+- free_pages((unsigned long)cpu_addr, get_order(size));
+-}
+-
+-
+-struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
+- .mapping_error = ehca_dma_mapping_error,
+- .map_single = ehca_dma_map_single,
+- .unmap_single = ehca_dma_unmap_single,
+- .map_page = ehca_dma_map_page,
+- .unmap_page = ehca_dma_unmap_page,
+- .map_sg = ehca_dma_map_sg,
+- .unmap_sg = ehca_dma_unmap_sg,
+- .dma_address = ehca_dma_address,
+- .dma_len = ehca_dma_len,
+- .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
+- .sync_single_for_device = ehca_dma_sync_single_for_device,
+- .alloc_coherent = ehca_dma_alloc_coherent,
+- .free_coherent = ehca_dma_free_coherent,
+-};
+diff -Nurp ofa_kernel-1.5.patched/drivers/infiniband/hw/ehca/ehca_mrmw.h ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mrmw.h
+--- ofa_kernel-1.5.patched/drivers/infiniband/hw/ehca/ehca_mrmw.h 2009-09-25 12:18:09.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mrmw.h 2009-09-23 15:08:25.000000000 +0200
+@@ -42,11 +42,6 @@
+ #ifndef _EHCA_MRMW_H_
+ #define _EHCA_MRMW_H_
+
+-enum ehca_reg_type {
+- EHCA_REG_MR,
+- EHCA_REG_BUSMAP_MR
+-};
+-
+ int ehca_reg_mr(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ u64 *iova_start,
+@@ -55,8 +50,7 @@ int ehca_reg_mr(struct ehca_shca *shca,
+ struct ehca_pd *e_pd,
+ struct ehca_mr_pginfo *pginfo,
+ u32 *lkey,
+- u32 *rkey,
+- enum ehca_reg_type reg_type);
++ u32 *rkey);
+
+ int ehca_reg_mr_rpages(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+@@ -124,9 +118,4 @@ void ehca_mrmw_reverse_map_acl(const u32
+
+ void ehca_mr_deletenew(struct ehca_mr *mr);
+
+-int ehca_create_busmap(void);
+-
+-void ehca_destroy_busmap(void);
+-
+-extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
+ #endif /*_EHCA_MRMW_H_*/
diff -Nurp ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-020-revert_inhibit_dmem.patch ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-020-revert_inhibit_dmem.patch
--- ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-020-revert_inhibit_dmem.patch 1970-01-01 01:00:00.000000000 +0100
+++ ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-020-revert_inhibit_dmem.patch 2009-10-01 12:51:46.000000000 +0200
@@ -0,0 +1,110 @@
+From 8e110a2fff1f110a9211f240acc12b8310e5cbd3 Mon Sep 17 00:00:00 2001
+From: Stefan Roscher <ossrosch at linux.vnet.ibm.com>
+Date: Wed, 22 Oct 2008 15:54:38 -0700
+Subject: [PATCH] IB/ehca: Reject dynamic memory add/remove when ehca adapter is present
+
+Since the ehca device driver does not support dynamic memory add and
+remove operations, the driver must explicitly reject such requests in
+order to prevent unpredictable behaviors related to existing memory
+regions that cover all of memory being used by InfiniBand protocols in
+the kernel.
+
+The solution (for now at least) is to add a memory notifier to the
+ehca device driver and if a request for dynamic memory add or remove
+comes in, ehca will always reject it. The user can add or remove
+memory by hot-removing the ehca adapter, performing the memory
+operation, and then hot-adding the ehca adapter back.
+
+Signed-off-by: Stefan Roscher <stefan.roscher at de.ibm.com>
+Signed-off-by: Roland Dreier <rolandd at cisco.com>
+---
+ drivers/infiniband/hw/ehca/ehca_main.c | 47 --------------------------------
+ 1 files changed, 0 insertions(+), 47 deletions(-)
+
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-25 12:41:13.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-25 12:42:48.000000000 +0200
+@@ -44,8 +44,6 @@
+ #include <linux/slab.h>
+ #endif
+
+-#include <linux/notifier.h>
+-#include <linux/memory.h>
+ #include "ehca_classes.h"
+ #include "ehca_iverbs.h"
+ #include "ehca_mrmw.h"
+@@ -973,41 +971,6 @@
+ spin_unlock(&shca_list_lock);
+ }
+
+-static int ehca_mem_notifier(struct notifier_block *nb,
+- unsigned long action, void *data)
+-{
+- static unsigned long ehca_dmem_warn_time;
+- unsigned long flags;
+-
+- switch (action) {
+- case MEM_CANCEL_OFFLINE:
+- case MEM_CANCEL_ONLINE:
+- case MEM_ONLINE:
+- case MEM_OFFLINE:
+- return NOTIFY_OK;
+- case MEM_GOING_ONLINE:
+- case MEM_GOING_OFFLINE:
+- /* only ok if no hca is attached to the lpar */
+- spin_lock_irqsave(&shca_list_lock, flags);
+- if (list_empty(&shca_list)) {
+- spin_unlock_irqrestore(&shca_list_lock, flags);
+- return NOTIFY_OK;
+- } else {
+- spin_unlock_irqrestore(&shca_list_lock, flags);
+- if (printk_timed_ratelimit(&ehca_dmem_warn_time,
+- 30 * 1000))
+- ehca_gen_err("DMEM operations are not allowed"
+- "in conjunction with eHCA");
+- return NOTIFY_BAD;
+- }
+- }
+- return NOTIFY_OK;
+-}
+-
+-static struct notifier_block ehca_mem_nb = {
+- .notifier_call = ehca_mem_notifier,
+-};
+-
+ static int __init ehca_module_init(void)
+ {
+ int ret;
+@@ -1035,12 +998,6 @@
+ goto module_init2;
+ }
+
+- ret = register_memory_notifier(&ehca_mem_nb);
+- if (ret) {
+- ehca_gen_err("Failed registering memory add/remove notifier");
+- goto module_init3;
+- }
+-
+ if (ehca_poll_all_eqs != 1) {
+ ehca_gen_err("WARNING!!!");
+ ehca_gen_err("It is possible to lose interrupts.");
+@@ -1053,9 +1010,6 @@
+
+ return 0;
+
+-module_init3:
+- ibmebus_unregister_driver(&ehca_driver);
+-
+ module_init2:
+ ehca_destroy_slab_caches();
+
+@@ -1071,8 +1025,6 @@
+
+ ibmebus_unregister_driver(&ehca_driver);
+
+- unregister_memory_notifier(&ehca_mem_nb);
+-
+ ehca_destroy_slab_caches();
+
+ ehca_destroy_comp_pool();
diff -Nurp ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-030-ibmebus_loc_code.patch ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-030-ibmebus_loc_code.patch
--- ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-030-ibmebus_loc_code.patch 1970-01-01 01:00:00.000000000 +0100
+++ ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-030-ibmebus_loc_code.patch 2009-10-01 12:51:46.000000000 +0200
@@ -0,0 +1,192 @@
+---
+ drivers/infiniband/hw/ehca/ehca_classes.h | 2 -
+ drivers/infiniband/hw/ehca/ehca_eq.c | 6 +--
+ drivers/infiniband/hw/ehca/ehca_main.c | 49 ++++++++++++++----------------
+ 3 files changed, 27 insertions(+), 30 deletions(-)
+
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_classes.h
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_classes.h 2009-09-23 15:08:25.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_classes.h 2009-09-25 12:43:39.000000000 +0200
+@@ -112,7 +112,7 @@
+
+ struct ehca_shca {
+ struct ib_device ib_device;
+- struct of_device *ofdev;
++ struct ibmebus_dev *ibmebus_dev;
+ u8 num_ports;
+ int hw_level;
+ struct list_head shca_list;
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_eq.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_eq.c 2009-09-23 15:08:25.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_eq.c 2009-09-25 12:43:39.000000000 +0200
+@@ -122,7 +122,7 @@
+
+ /* register interrupt handlers and initialize work queues */
+ if (type == EHCA_EQ) {
+- ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
++ ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq,
+ IRQF_DISABLED, "ehca_eq",
+ (void *)shca);
+ if (ret < 0)
+@@ -130,7 +130,7 @@
+
+ tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
+ } else if (type == EHCA_NEQ) {
+- ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
++ ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq,
+ IRQF_DISABLED, "ehca_neq",
+ (void *)shca);
+ if (ret < 0)
+@@ -170,7 +170,7 @@
+ u64 h_ret;
+
+ spin_lock_irqsave(&eq->spinlock, flags);
+- ibmebus_free_irq(eq->ist, (void *)shca);
++ ibmebus_free_irq(NULL, eq->ist, (void *)shca);
+
+ h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-25 12:42:48.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-25 12:45:40.000000000 +0200
+@@ -289,8 +289,8 @@
+ };
+
+ ehca_gen_dbg("Probing adapter %s...",
+- shca->ofdev->node->full_name);
+- loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
++ shca->ibmebus_dev->ofdev.node->full_name);
++ loc_code = of_get_property(shca->ibmebus_dev->ofdev.node, "ibm,loc-code", NULL);
+ if (loc_code)
+ ehca_gen_dbg(" ... location lode=%s", loc_code);
+
+@@ -459,7 +459,7 @@
+ shca->ib_device.node_type = RDMA_NODE_IB_CA;
+ shca->ib_device.phys_port_cnt = shca->num_ports;
+ shca->ib_device.num_comp_vectors = 1;
+- shca->ib_device.dma_device = &shca->ofdev->dev;
++ shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev;
+ shca->ib_device.query_device = ehca_query_device;
+ shca->ib_device.query_port = ehca_query_port;
+ shca->ib_device.query_gid = ehca_query_gid;
+@@ -620,11 +620,6 @@
+ .attrs = ehca_drv_attrs
+ };
+
+-static struct attribute_group *ehca_drv_attr_groups[] = {
+- &ehca_drv_attr_grp,
+- NULL,
+-};
+-
+ #define EHCA_RESOURCE_ATTR(name) \
+ static ssize_t ehca_show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+@@ -708,7 +703,7 @@
+ .attrs = ehca_dev_attrs
+ };
+
+-static int __devinit ehca_probe(struct of_device *dev,
++static int __devinit ehca_probe(struct ibmebus_dev *dev,
+ const struct of_device_id *id)
+ {
+ struct ehca_shca *shca;
+@@ -717,16 +712,16 @@
+ int ret, i, eq_size;
+ unsigned long flags;
+
+- handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
++ handle = of_get_property(dev->ofdev.node, "ibm,hca-handle", NULL);
+ if (!handle) {
+ ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
+- dev->node->full_name);
++ dev->ofdev.node->full_name);
+ return -ENODEV;
+ }
+
+ if (!(*handle)) {
+ ehca_gen_err("Wrong eHCA handle for adapter: %s.",
+- dev->node->full_name);
++ dev->ofdev.node->full_name);
+ return -ENODEV;
+ }
+
+@@ -745,9 +740,9 @@
+ for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
+ spin_lock_init(&shca->sport[i].mod_sqp_lock);
+
+- shca->ofdev = dev;
++ shca->ibmebus_dev = dev;
+ shca->ipz_hca_handle.handle = *handle;
+- dev->dev.driver_data = shca;
++ dev->ofdev.dev.driver_data = shca;
+
+ ret = ehca_sense_attributes(shca);
+ if (ret < 0) {
+@@ -824,7 +819,7 @@
+ }
+ }
+
+- ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
++ ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
+ if (ret) /* only complain; we can live without attributes */
+ ehca_err(&shca->ib_device,
+ "Cannot create device attributes ret=%d", ret);
+@@ -874,13 +869,13 @@
+ return -EINVAL;
+ }
+
+-static int __devexit ehca_remove(struct of_device *dev)
++static int __devexit ehca_remove(struct ibmebus_dev *dev)
+ {
+- struct ehca_shca *shca = dev->dev.driver_data;
++ struct ehca_shca *shca = dev->ofdev.dev.driver_data;
+ unsigned long flags;
+ int ret;
+
+- sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
++ sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
+
+ if (ehca_open_aqp1 == 1) {
+ int i;
+@@ -932,14 +927,11 @@
+ };
+ MODULE_DEVICE_TABLE(of, ehca_device_table);
+
+-static struct of_platform_driver ehca_driver = {
+- .name = "ehca",
+- .match_table = ehca_device_table,
+- .probe = ehca_probe,
+- .remove = ehca_remove,
+- .driver = {
+- .groups = ehca_drv_attr_groups,
+- },
++static struct ibmebus_driver ehca_driver = {
++ .name = "ehca",
++ .id_table = ehca_device_table,
++ .probe = ehca_probe,
++ .remove = ehca_remove,
+ };
+
+ void ehca_poll_eqs(unsigned long data)
+@@ -998,6 +990,10 @@
+ goto module_init2;
+ }
+
++ ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
++ if (ret) /* only complain; we can live without attributes */
++ ehca_gen_err("Cannot create driver attributes ret=%d", ret);
++
+ if (ehca_poll_all_eqs != 1) {
+ ehca_gen_err("WARNING!!!");
+ ehca_gen_err("It is possible to lose interrupts.");
+@@ -1023,6 +1019,7 @@
+ if (ehca_poll_all_eqs == 1)
+ del_timer_sync(&poll_eqs_timer);
+
++ sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
+ ibmebus_unregister_driver(&ehca_driver);
+
+ ehca_destroy_slab_caches();
diff -Nurp ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-040-undo_cpumask.patch ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-040-undo_cpumask.patch
--- ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-040-undo_cpumask.patch 1970-01-01 01:00:00.000000000 +0100
+++ ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-040-undo_cpumask.patch 2009-10-01 12:51:46.000000000 +0200
@@ -0,0 +1,42 @@
+---
+ drivers/infiniband/hw/ehca/ehca_irq.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_irq.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_irq.c 2009-07-27 08:20:08.000000000 -0400
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_irq.c 2009-07-27 08:26:31.000000000 -0400
+@@ -659,12 +659,12 @@
+
+ WARN_ON_ONCE(!in_interrupt());
+ if (ehca_debug_level >= 3)
+- ehca_dmp(cpu_online_mask, cpumask_size(), "");
++ ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+
+ spin_lock_irqsave(&pool->last_cpu_lock, flags);
+- cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
++ cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+ if (cpu >= nr_cpu_ids)
+- cpu = cpumask_first(cpu_online_mask);
++ cpu = first_cpu(cpu_online_map);
+ pool->last_cpu = cpu;
+ spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
+
+@@ -855,7 +855,7 @@
+ case CPU_UP_CANCELED_FROZEN:
+ ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+- kthread_bind(cct->task, cpumask_any(cpu_online_mask));
++ kthread_bind(cct->task, any_online_cpu(cpu_online_map));
+ destroy_comp_task(pool, cpu);
+ break;
+ case CPU_ONLINE:
+@@ -902,7 +902,7 @@
+ return -ENOMEM;
+
+ spin_lock_init(&pool->last_cpu_lock);
+- pool->last_cpu = cpumask_any(cpu_online_mask);
++ pool->last_cpu = any_online_cpu(cpu_online_map);
+
+ pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
+ if (pool->cpu_comp_tasks == NULL) {
diff -Nurp ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-050-undo_unsigned_long.patch ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-050-undo_unsigned_long.patch
--- ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-050-undo_unsigned_long.patch 1970-01-01 01:00:00.000000000 +0100
+++ ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-050-undo_unsigned_long.patch 2009-10-01 12:51:46.000000000 +0200
@@ -0,0 +1,994 @@
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_cq.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_cq.c 2009-09-23 15:08:25.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_cq.c 2009-09-25 13:31:12.000000000 +0200
+@@ -196,7 +196,7 @@
+
+ if (h_ret != H_SUCCESS) {
+ ehca_err(device, "hipz_h_alloc_resource_cq() failed "
+- "h_ret=%lli device=%p", h_ret, device);
++ "h_ret=%li device=%p", h_ret, device);
+ cq = ERR_PTR(ehca2ib_return_code(h_ret));
+ goto create_cq_exit2;
+ }
+@@ -232,7 +232,7 @@
+
+ if (h_ret < H_SUCCESS) {
+ ehca_err(device, "hipz_h_register_rpage_cq() failed "
+- "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
++ "ehca_cq=%p cq_num=%x h_ret=%li counter=%i "
+ "act_pages=%i", my_cq, my_cq->cq_number,
+ h_ret, counter, param.act_pages);
+ cq = ERR_PTR(-EINVAL);
+@@ -244,7 +244,7 @@
+ if ((h_ret != H_SUCCESS) || vpage) {
+ ehca_err(device, "Registration of pages not "
+ "complete ehca_cq=%p cq_num=%x "
+- "h_ret=%lli", my_cq, my_cq->cq_number,
++ "h_ret=%li", my_cq, my_cq->cq_number,
+ h_ret);
+ cq = ERR_PTR(-EAGAIN);
+ goto create_cq_exit4;
+@@ -252,7 +252,7 @@
+ } else {
+ if (h_ret != H_PAGE_REGISTERED) {
+ ehca_err(device, "Registration of page failed "
+- "ehca_cq=%p cq_num=%x h_ret=%lli "
++ "ehca_cq=%p cq_num=%x h_ret=%li "
+ "counter=%i act_pages=%i",
+ my_cq, my_cq->cq_number,
+ h_ret, counter, param.act_pages);
+@@ -266,7 +266,7 @@
+
+ gal = my_cq->galpas.kernel;
+ cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
+- ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
++ ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
+ my_cq, my_cq->cq_number, cqx_fec);
+
+ my_cq->ib_cq.cqe = my_cq->nr_of_entries =
+@@ -307,7 +307,7 @@
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
+ if (h_ret != H_SUCCESS)
+ ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
+- "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
++ "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret);
+
+ create_cq_exit2:
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
+@@ -355,7 +355,7 @@
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
+ if (h_ret == H_R_STATE) {
+ /* cq in err: read err data and destroy it forcibly */
+- ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
++ ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
+ "state. Try to delete it forcibly.",
+ my_cq, cq_num, my_cq->ipz_cq_handle.handle);
+ ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
+@@ -365,7 +365,7 @@
+ cq_num);
+ }
+ if (h_ret != H_SUCCESS) {
+- ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
++ ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li "
+ "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
+ return ehca2ib_return_code(h_ret);
+ }
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_hca.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_hca.c 2009-09-23 15:08:25.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_hca.c 2009-09-25 13:31:12.000000000 +0200
+@@ -393,7 +393,7 @@
+ hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
+ cap, props->init_type, port_modify_mask);
+ if (hret != H_SUCCESS) {
+- ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
++ ehca_err(&shca->ib_device, "Modify port failed h_ret=%li",
+ hret);
+ ret = -EINVAL;
+ }
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_irq.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_irq.c 2009-09-25 13:28:31.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_irq.c 2009-09-25 13:31:12.000000000 +0200
+@@ -99,7 +99,7 @@
+ return;
+
+ ehca_err(&shca->ib_device,
+- "QP 0x%x (resource=%llx) has errors.",
++ "QP 0x%x (resource=%lx) has errors.",
+ qp->ib_qp.qp_num, resource);
+ break;
+ }
+@@ -108,21 +108,21 @@
+ struct ehca_cq *cq = (struct ehca_cq *)data;
+
+ ehca_err(&shca->ib_device,
+- "CQ 0x%x (resource=%llx) has errors.",
++ "CQ 0x%x (resource=%lx) has errors.",
+ cq->cq_number, resource);
+ break;
+ }
+ default:
+ ehca_err(&shca->ib_device,
+- "Unknown error type: %llx on %s.",
++ "Unknown error type: %lx on %s.",
+ type, shca->ib_device.name);
+ break;
+ }
+
+- ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
++ ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
+ ehca_err(&shca->ib_device, "EHCA ----- error data begin "
+ "---------------------------------------------------");
+- ehca_dmp(rblock, length, "resource=%llx", resource);
++ ehca_dmp(rblock, length, "resource=%lx", resource);
+ ehca_err(&shca->ib_device, "EHCA ----- error data end "
+ "----------------------------------------------------");
+
+@@ -152,7 +152,7 @@
+
+ if (ret == H_R_STATE)
+ ehca_err(&shca->ib_device,
+- "No error data is available: %llx.", resource);
++ "No error data is available: %lx.", resource);
+ else if (ret == H_SUCCESS) {
+ int length;
+
+@@ -164,7 +164,7 @@
+ print_error_data(shca, data, rblock, length);
+ } else
+ ehca_err(&shca->ib_device,
+- "Error data could not be fetched: %llx", resource);
++ "Error data could not be fetched: %lx", resource);
+
+ ehca_free_fw_ctrlblock(rblock);
+
+@@ -514,7 +514,7 @@
+ struct ehca_cq *cq;
+
+ eqe_value = eqe->entry;
+- ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
++ ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
+ if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
+ ehca_dbg(&shca->ib_device, "Got completion event");
+ token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
+@@ -603,7 +603,7 @@
+ ret = hipz_h_eoi(eq->ist);
+ if (ret != H_SUCCESS)
+ ehca_err(&shca->ib_device,
+- "bad return code EOI -rc = %lld\n", ret);
++ "bad return code EOI -rc = %ld\n", ret);
+ ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
+ }
+ if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-25 12:46:08.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_main.c 2009-09-25 13:31:12.000000000 +0200
+@@ -302,7 +302,7 @@
+
+ h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
+ if (h_ret != H_SUCCESS) {
+- ehca_gen_err("Cannot query device properties. h_ret=%lli",
++ ehca_gen_err("Cannot query device properties. h_ret=%li",
+ h_ret);
+ ret = -EPERM;
+ goto sense_attributes1;
+@@ -389,7 +389,7 @@
+ port = (struct hipz_query_port *)rblock;
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
+ if (h_ret != H_SUCCESS) {
+- ehca_gen_err("Cannot query port properties. h_ret=%lli",
++ ehca_gen_err("Cannot query port properties. h_ret=%li",
+ h_ret);
+ ret = -EPERM;
+ goto sense_attributes1;
+@@ -675,7 +675,7 @@
+ {
+ struct ehca_shca *shca = dev->driver_data;
+
+- return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
++ return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle);
+
+ }
+ static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mcast.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_mcast.c 2009-09-23 15:08:25.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mcast.c 2009-09-25 13:31:12.000000000 +0200
+@@ -88,7 +88,7 @@
+ if (h_ret != H_SUCCESS)
+ ehca_err(ibqp->device,
+ "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
+- "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
++ "h_ret=%li", my_qp, ibqp->qp_num, h_ret);
+
+ return ehca2ib_return_code(h_ret);
+ }
+@@ -125,7 +125,7 @@
+ if (h_ret != H_SUCCESS)
+ ehca_err(ibqp->device,
+ "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
+- "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
++ "h_ret=%li", my_qp, ibqp->qp_num, h_ret);
+
+ return ehca2ib_return_code(h_ret);
+ }
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mrmw.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_mrmw.c 2009-09-25 12:41:13.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_mrmw.c 2009-09-25 13:31:12.000000000 +0200
+@@ -204,7 +204,7 @@
+ }
+ if ((size == 0) ||
+ (((u64)iova_start + size) < (u64)iova_start)) {
+- ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
++ ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
+ size, iova_start);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_phys_mr_exit0;
+@@ -309,8 +309,8 @@
+ }
+
+ if (length == 0 || virt + length < virt) {
+- ehca_err(pd->device, "bad input values: length=%llx "
+- "virt_base=%llx", length, virt);
++ ehca_err(pd->device, "bad input values: length=%lx "
++ "virt_base=%lx", length, virt);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_user_mr_exit0;
+ }
+@@ -373,7 +373,7 @@
+ &e_mr->ib.ib_mr.rkey);
+ if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
+ ehca_warn(pd->device, "failed to register mr "
+- "with hwpage_size=%llx", hwpage_size);
++ "with hwpage_size=%lx", hwpage_size);
+ ehca_info(pd->device, "try to register mr with "
+ "kpage_size=%lx", PAGE_SIZE);
+ /*
+@@ -509,7 +509,7 @@
+ goto rereg_phys_mr_exit1;
+ if ((new_size == 0) ||
+ (((u64)iova_start + new_size) < (u64)iova_start)) {
+- ehca_err(mr->device, "bad input values: new_size=%llx "
++ ehca_err(mr->device, "bad input values: new_size=%lx "
+ "iova_start=%p", new_size, iova_start);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit1;
+@@ -580,8 +580,8 @@
+
+ h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
+- "hca_hndl=%llx mr_hndl=%llx lkey=%x",
++ ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p "
++ "hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ h_ret, mr, shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle, mr->lkey);
+ ret = ehca2ib_return_code(h_ret);
+@@ -630,8 +630,8 @@
+ /* TODO: BUSY: MR still has bound window(s) */
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
+- "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
++ ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p "
++ "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+ h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle, mr->lkey);
+ ret = ehca2ib_return_code(h_ret);
+@@ -671,8 +671,8 @@
+ h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
+ e_pd->fw_pd, &hipzout);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
+- "shca=%p hca_hndl=%llx mw=%p",
++ ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li "
++ "shca=%p hca_hndl=%lx mw=%p",
+ h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
+ ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
+ goto alloc_mw_exit1;
+@@ -713,8 +713,8 @@
+
+ h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
+- "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
++ ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p "
++ "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
+ h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
+ e_mw->ipz_mw_handle.handle);
+ return ehca2ib_return_code(h_ret);
+@@ -840,7 +840,7 @@
+ goto map_phys_fmr_exit0;
+ if (iova % e_fmr->fmr_page_size) {
+ /* only whole-numbered pages */
+- ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
++ ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
+ iova, e_fmr->fmr_page_size);
+ ret = -EINVAL;
+ goto map_phys_fmr_exit0;
+@@ -878,7 +878,7 @@
+ map_phys_fmr_exit0:
+ if (ret)
+ ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
+- "iova=%llx", ret, fmr, page_list, list_len, iova);
++ "iova=%lx", ret, fmr, page_list, list_len, iova);
+ return ret;
+ } /* end ehca_map_phys_fmr() */
+
+@@ -964,8 +964,8 @@
+
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
+- "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
++ ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%li e_fmr=%p "
++ "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle, fmr->lkey);
+ ret = ehca2ib_return_code(h_ret);
+@@ -1007,8 +1007,8 @@
+ (u64)iova_start, size, hipz_acl,
+ e_pd->fw_pd, &hipzout);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
+- "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
++ ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%li "
++ "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
+ ret = ehca2ib_return_code(h_ret);
+ goto ehca_reg_mr_exit0;
+ }
+@@ -1033,9 +1033,9 @@
+ ehca_reg_mr_exit1:
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
+- "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
+- "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
++ ehca_err(&shca->ib_device, "h_ret=%li shca=%p e_mr=%p "
++ "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
++ "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%i",
+ h_ret, shca, e_mr, iova_start, size, acl, e_pd,
+ hipzout.lkey, pginfo, pginfo->num_kpages,
+ pginfo->num_hwpages, ret);
+@@ -1045,8 +1045,8 @@
+ ehca_reg_mr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
+- "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
+- "num_kpages=%llx num_hwpages=%llx",
++ "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
++ "num_kpages=%lx num_hwpages=%lx",
+ ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
+ pginfo->num_kpages, pginfo->num_hwpages);
+ return ret;
+@@ -1116,8 +1116,8 @@
+ */
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "last "
+- "hipz_reg_rpage_mr failed, h_ret=%lli "
+- "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
++ "hipz_reg_rpage_mr failed, h_ret=%li "
++ "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
+ " lkey=%x", h_ret, e_mr, i,
+ shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle,
+@@ -1128,8 +1128,8 @@
+ ret = 0;
+ } else if (h_ret != H_PAGE_REGISTERED) {
+ ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
+- "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
+- "mr_hndl=%llx", h_ret, e_mr, i,
++ "h_ret=%li e_mr=%p i=%x lkey=%x hca_hndl=%lx "
++ "mr_hndl=%lx", h_ret, e_mr, i,
+ e_mr->ib.ib_mr.lkey,
+ shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle);
+@@ -1145,7 +1145,7 @@
+ ehca_reg_mr_rpages_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
+- "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
++ "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
+ pginfo, pginfo->num_kpages, pginfo->num_hwpages);
+ return ret;
+ } /* end ehca_reg_mr_rpages() */
+@@ -1184,7 +1184,7 @@
+ ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
+ if (ret) {
+ ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
+- "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
++ "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
+ "kpage=%p", e_mr, pginfo, pginfo->type,
+ pginfo->num_kpages, pginfo->num_hwpages, kpage);
+ goto ehca_rereg_mr_rereg1_exit1;
+@@ -1205,13 +1205,13 @@
+ * (MW bound or MR is shared)
+ */
+ ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
+- "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
++ "(Rereg1), h_ret=%li e_mr=%p", h_ret, e_mr);
+ *pginfo = pginfo_save;
+ ret = -EAGAIN;
+ } else if ((u64 *)hipzout.vaddr != iova_start) {
+ ehca_err(&shca->ib_device, "PHYP changed iova_start in "
+- "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
+- "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
++ "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
++ "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
+ hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
+ e_mr->ib.ib_mr.lkey, hipzout.lkey);
+ ret = -EFAULT;
+@@ -1235,7 +1235,7 @@
+ ehca_rereg_mr_rereg1_exit0:
+ if ( ret && (ret != -EAGAIN) )
+ ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
+- "pginfo=%p num_kpages=%llx num_hwpages=%llx",
++ "pginfo=%p num_kpages=%lx num_hwpages=%lx",
+ ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
+ pginfo->num_hwpages);
+ return ret;
+@@ -1263,7 +1263,7 @@
+ (e_mr->num_hwpages > MAX_RPAGES) ||
+ (pginfo->num_hwpages > e_mr->num_hwpages)) {
+ ehca_dbg(&shca->ib_device, "Rereg3 case, "
+- "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
++ "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
+ pginfo->num_hwpages, e_mr->num_hwpages);
+ rereg_1_hcall = 0;
+ rereg_3_hcall = 1;
+@@ -1295,7 +1295,7 @@
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+- "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
++ "h_ret=%li e_mr=%p hca_hndl=%lx mr_hndl=%lx "
+ "mr->lkey=%x",
+ h_ret, e_mr, shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle,
+@@ -1328,8 +1328,8 @@
+ ehca_rereg_mr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
+- "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
+- "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
++ "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
++ "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+ "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
+ acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
+ rereg_1_hcall, rereg_3_hcall);
+@@ -1371,8 +1371,8 @@
+ * FMRs are not shared and no MW bound to FMRs
+ */
+ ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
+- "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
+- "mr_hndl=%llx lkey=%x lkey_out=%x",
++ "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx "
++ "mr_hndl=%lx lkey=%x lkey_out=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle,
+ e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
+@@ -1383,7 +1383,7 @@
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+- "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
++ "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
+ "lkey=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle,
+@@ -1447,9 +1447,9 @@
+ (u64)iova_start, hipz_acl, e_pd->fw_pd,
+ &hipzout);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
++ ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li "
+ "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
+- "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
++ "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
+ shca->ipz_hca_handle.handle,
+ e_origmr->ipz_mr_handle.handle,
+@@ -1527,7 +1527,7 @@
+ &e_mr->ib.ib_mr.rkey);
+ if (ret) {
+ ehca_err(&shca->ib_device, "reg of internal max MR failed, "
+- "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
++ "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
+ "num_hwpages=%x", e_mr, iova_start, size_maxmr,
+ num_kpages, num_hwpages);
+ goto ehca_reg_internal_maxmr_exit1;
+@@ -1573,8 +1573,8 @@
+ (u64)iova_start, hipz_acl, e_pd->fw_pd,
+ &hipzout);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
+- "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
++ ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li "
++ "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ h_ret, e_origmr, shca->ipz_hca_handle.handle,
+ e_origmr->ipz_mr_handle.handle,
+ e_origmr->ib.ib_mr.lkey);
+@@ -1651,28 +1651,28 @@
+ /* check first buffer */
+ if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
+ ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
+- "pbuf->addr=%llx pbuf->size=%llx",
++ "pbuf->addr=%lx pbuf->size=%lx",
+ iova_start, pbuf->addr, pbuf->size);
+ return -EINVAL;
+ }
+ if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
+ (num_phys_buf > 1)) {
+- ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
+- "pbuf->size=%llx", pbuf->addr, pbuf->size);
++ ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
++ "pbuf->size=%lx", pbuf->addr, pbuf->size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_phys_buf; i++) {
+ if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
+- ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
+- "pbuf->size=%llx",
++ ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
++ "pbuf->size=%lx",
+ i, pbuf->addr, pbuf->size);
+ return -EINVAL;
+ }
+ if (((i > 0) && /* not 1st */
+ (i < (num_phys_buf - 1)) && /* not last */
+ (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
+- ehca_gen_err("bad size, i=%x pbuf->size=%llx",
++ ehca_gen_err("bad size, i=%x pbuf->size=%lx",
+ i, pbuf->size);
+ return -EINVAL;
+ }
+@@ -1705,7 +1705,7 @@
+ page = page_list;
+ for (i = 0; i < list_len; i++) {
+ if (*page % e_fmr->fmr_page_size) {
+- ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
++ ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
+ "fmr_page_size=%x", i, *page, page, e_fmr,
+ e_fmr->fmr_page_size);
+ return -EINVAL;
+@@ -1743,9 +1743,9 @@
+ (pginfo->next_hwpage *
+ pginfo->hwpage_size));
+ if ( !(*kpage) ) {
+- ehca_gen_err("pgaddr=%llx "
+- "chunk->page_list[i]=%llx "
+- "i=%x next_hwpage=%llx",
++ ehca_gen_err("pgaddr=%lx "
++ "chunk->page_list[i]=%lx "
++ "i=%x next_hwpage=%lx",
+ pgaddr, (u64)sg_dma_address(
+ &chunk->page_list[i]),
+ i, pginfo->next_hwpage);
+@@ -1795,11 +1795,11 @@
+ for (t = start_idx; t <= end_idx; t++) {
+ u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
+ if (ehca_debug_level >= 3)
+- ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
++ ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
+ *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
+ if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
+- ehca_gen_err("uncontiguous page found pgaddr=%llx "
+- "prev_pgaddr=%llx page_list_i=%x",
++ ehca_gen_err("uncontiguous page found pgaddr=%lx "
++ "prev_pgaddr=%lx page_list_i=%x",
+ pgaddr, *prev_pgaddr, t);
+ return -EINVAL;
+ }
+@@ -1833,7 +1833,7 @@
+ << PAGE_SHIFT );
+ *kpage = phys_to_abs(pgaddr);
+ if ( !(*kpage) ) {
+- ehca_gen_err("pgaddr=%llx i=%x",
++ ehca_gen_err("pgaddr=%lx i=%x",
+ pgaddr, i);
+ ret = -EFAULT;
+ return ret;
+@@ -1846,8 +1846,8 @@
+ if (pginfo->hwpage_cnt) {
+ ehca_gen_err(
+ "invalid alignment "
+- "pgaddr=%llx i=%x "
+- "mr_pgsize=%llx",
++ "pgaddr=%lx i=%x "
++ "mr_pgsize=%lx",
+ pgaddr, i,
+ pginfo->hwpage_size);
+ ret = -EFAULT;
+@@ -1866,8 +1866,8 @@
+ if (ehca_debug_level >= 3) {
+ u64 val = *(u64 *)abs_to_virt(
+ phys_to_abs(pgaddr));
+- ehca_gen_dbg("kpage=%llx chunk_page=%llx "
+- "value=%016llx",
++ ehca_gen_dbg("kpage=%lx chunk_page=%lx "
++ "value=%016lx",
+ *kpage, pgaddr, val);
+ }
+ prev_pgaddr = pgaddr;
+@@ -1944,9 +1944,9 @@
+ if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
+ (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
+ ehca_gen_err("kpage_cnt >= num_kpages, "
+- "kpage_cnt=%llx num_kpages=%llx "
+- "hwpage_cnt=%llx "
+- "num_hwpages=%llx i=%x",
++ "kpage_cnt=%lx num_kpages=%lx "
++ "hwpage_cnt=%lx "
++ "num_hwpages=%lx i=%x",
+ pginfo->kpage_cnt,
+ pginfo->num_kpages,
+ pginfo->hwpage_cnt,
+@@ -1957,8 +1957,8 @@
+ (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
+ (pginfo->next_hwpage * pginfo->hwpage_size));
+ if ( !(*kpage) && pbuf->addr ) {
+- ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
+- "next_hwpage=%llx", pbuf->addr,
++ ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx "
++ "next_hwpage=%lx", pbuf->addr,
+ pbuf->size, pginfo->next_hwpage);
+ return -EFAULT;
+ }
+@@ -1996,8 +1996,8 @@
+ *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
+ pginfo->next_hwpage * pginfo->hwpage_size);
+ if ( !(*kpage) ) {
+- ehca_gen_err("*fmrlist=%llx fmrlist=%p "
+- "next_listelem=%llx next_hwpage=%llx",
++ ehca_gen_err("*fmrlist=%lx fmrlist=%p "
++ "next_listelem=%lx next_hwpage=%lx",
+ *fmrlist, fmrlist,
+ pginfo->u.fmr.next_listelem,
+ pginfo->next_hwpage);
+@@ -2025,7 +2025,7 @@
+ ~(pginfo->hwpage_size - 1));
+ if (prev + pginfo->u.fmr.fmr_pgsize != p) {
+ ehca_gen_err("uncontiguous fmr pages "
+- "found prev=%llx p=%llx "
++ "found prev=%lx p=%lx "
+ "idx=%x", prev, p, i + j);
+ return -EINVAL;
+ }
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_qp.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_qp.c 2009-09-25 12:40:28.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_qp.c 2009-09-25 13:31:12.000000000 +0200
+@@ -331,7 +331,7 @@
+ if (cnt == (nr_q_pages - 1)) { /* last page! */
+ if (h_ret != expected_hret) {
+ ehca_err(ib_dev, "hipz_qp_register_rpage() "
+- "h_ret=%lli", h_ret);
++ "h_ret=%li", h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto init_qp_queue1;
+ }
+@@ -345,7 +345,7 @@
+ } else {
+ if (h_ret != H_PAGE_REGISTERED) {
+ ehca_err(ib_dev, "hipz_qp_register_rpage() "
+- "h_ret=%lli", h_ret);
++ "h_ret=%li", h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto init_qp_queue1;
+ }
+@@ -711,7 +711,7 @@
+
+ h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
++ ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li",
+ h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto create_qp_exit1;
+@@ -1015,7 +1015,7 @@
+ mqpcb, my_qp->galpas.kernel);
+ if (hret != H_SUCCESS) {
+ ehca_err(pd->device, "Could not modify SRQ to INIT "
+- "ehca_qp=%p qp_num=%x h_ret=%lli",
++ "ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, my_qp->real_qp_num, hret);
+ goto create_srq2;
+ }
+@@ -1029,7 +1029,7 @@
+ mqpcb, my_qp->galpas.kernel);
+ if (hret != H_SUCCESS) {
+ ehca_err(pd->device, "Could not enable SRQ "
+- "ehca_qp=%p qp_num=%x h_ret=%lli",
++ "ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, my_qp->real_qp_num, hret);
+ goto create_srq2;
+ }
+@@ -1043,7 +1043,7 @@
+ mqpcb, my_qp->galpas.kernel);
+ if (hret != H_SUCCESS) {
+ ehca_err(pd->device, "Could not modify SRQ to RTR "
+- "ehca_qp=%p qp_num=%x h_ret=%lli",
++ "ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, my_qp->real_qp_num, hret);
+ goto create_srq2;
+ }
+@@ -1083,7 +1083,7 @@
+ &bad_send_wqe_p, NULL, 2);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
+- " ehca_qp=%p qp_num=%x h_ret=%lli",
++ " ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, qp_num, h_ret);
+ return ehca2ib_return_code(h_ret);
+ }
+@@ -1139,7 +1139,7 @@
+
+ if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
+ ehca_gen_err("Invalid offset for calculating left cqes "
+- "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
++ "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v);
+ return -EFAULT;
+ }
+
+@@ -1173,7 +1173,7 @@
+ &send_wqe_p, &recv_wqe_p, 4);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "disable_and_get_wqe() "
+- "failed ehca_qp=%p qp_num=%x h_ret=%lli",
++ "failed ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, qp_num, h_ret);
+ return ehca2ib_return_code(h_ret);
+ }
+@@ -1267,7 +1267,7 @@
+ mqpcb, my_qp->galpas.kernel);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(ibqp->device, "hipz_h_query_qp() failed "
+- "ehca_qp=%p qp_num=%x h_ret=%lli",
++ "ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, ibqp->qp_num, h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto modify_qp_exit1;
+@@ -1698,7 +1698,7 @@
+
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+- ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
++ ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li "
+ "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
+ goto modify_qp_exit2;
+ }
+@@ -1731,7 +1731,7 @@
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(ibqp->device, "ENABLE in context of "
+ "RESET_2_INIT failed! Maybe you didn't get "
+- "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
++ "a LID h_ret=%li ehca_qp=%p qp_num=%x",
+ h_ret, my_qp, ibqp->qp_num);
+ goto modify_qp_exit2;
+ }
+@@ -1919,7 +1919,7 @@
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(qp->device, "hipz_h_query_qp() failed "
+- "ehca_qp=%p qp_num=%x h_ret=%lli",
++ "ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, qp->qp_num, h_ret);
+ goto query_qp_exit1;
+ }
+@@ -2077,7 +2077,7 @@
+
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+- ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
++ ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li "
+ "ehca_qp=%p qp_num=%x",
+ h_ret, my_qp, my_qp->real_qp_num);
+ }
+@@ -2111,7 +2111,7 @@
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(srq->device, "hipz_h_query_qp() failed "
+- "ehca_qp=%p qp_num=%x h_ret=%lli",
++ "ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, my_qp->real_qp_num, h_ret);
+ goto query_srq_exit1;
+ }
+@@ -2183,7 +2183,7 @@
+
+ h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
+ if (h_ret != H_SUCCESS) {
+- ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
++ ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
+ "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
+ return ehca2ib_return_code(h_ret);
+ }
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_reqs.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_reqs.c 2009-09-25 12:40:29.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_reqs.c 2009-09-25 13:31:12.000000000 +0200
+@@ -826,7 +826,7 @@
+ offset = qmap->next_wqe_idx * ipz_queue->qe_size;
+ wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
+ if (!wqe) {
+- ehca_err(cq->device, "Invalid wqe offset=%#llx on "
++ ehca_err(cq->device, "Invalid wqe offset=%#lx on "
+ "qp_num=%#x", offset, my_qp->real_qp_num);
+ return nr;
+ }
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_sqp.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_sqp.c 2009-09-25 12:40:29.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_sqp.c 2009-09-25 13:31:12.000000000 +0200
+@@ -85,7 +85,7 @@
+
+ if (ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device,
+- "Can't define AQP1 for port %x. h_ret=%lli",
++ "Can't define AQP1 for port %x. h_ret=%li",
+ port, ret);
+ return ret;
+ }
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_tools.h
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_tools.h 2009-09-23 15:08:25.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_tools.h 2009-09-25 13:31:12.000000000 +0200
+@@ -116,7 +116,7 @@
+ unsigned char *deb = (unsigned char *)(adr); \
+ for (x = 0; x < l; x += 16) { \
+ printk(KERN_INFO "EHCA_DMP:%s " format \
+- " adr=%p ofs=%04x %016llx %016llx\n", \
++ " adr=%p ofs=%04x %016lx %016lx\n", \
+ __func__, ##args, deb, x, \
+ *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
+ deb += 16; \
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_uverbs.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/ehca_uverbs.c 2009-09-23 15:08:25.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/ehca_uverbs.c 2009-09-25 13:31:12.000000000 +0200
+@@ -114,7 +114,7 @@
+
+ physical = galpas->user.fw_handle;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+- ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
++ ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
+ /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
+ ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
+ vma->vm_page_prot);
+Index: ofa_kernel-1.5/drivers/infiniband/hw/ehca/hcp_if.c
+===================================================================
+--- ofa_kernel-1.5.orig/drivers/infiniband/hw/ehca/hcp_if.c 2009-09-25 12:40:28.000000000 +0200
++++ ofa_kernel-1.5/drivers/infiniband/hw/ehca/hcp_if.c 2009-09-25 13:31:12.000000000 +0200
+@@ -249,7 +249,7 @@
+ *eq_ist = (u32)outs[5];
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+- ehca_gen_err("Not enough resource - ret=%lli ", ret);
++ ehca_gen_err("Not enough resource - ret=%li ", ret);
+
+ return ret;
+ }
+@@ -287,7 +287,7 @@
+ hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+- ehca_gen_err("Not enough resources. ret=%lli", ret);
++ ehca_gen_err("Not enough resources. ret=%li", ret);
+
+ return ret;
+ }
+@@ -362,7 +362,7 @@
+ hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+- ehca_gen_err("Not enough resources. ret=%lli", ret);
++ ehca_gen_err("Not enough resources. ret=%li", ret);
+
+ return ret;
+ }
+@@ -454,7 +454,7 @@
+ const u64 count)
+ {
+ if (count != 1) {
+- ehca_gen_err("Ppage counter=%llx", count);
++ ehca_gen_err("Ppage counter=%lx", count);
+ return H_PARAMETER;
+ }
+ return hipz_h_register_rpage(adapter_handle,
+@@ -489,7 +489,7 @@
+ const struct h_galpa gal)
+ {
+ if (count != 1) {
+- ehca_gen_err("Page counter=%llx", count);
++ ehca_gen_err("Page counter=%lx", count);
+ return H_PARAMETER;
+ }
+
+@@ -508,7 +508,7 @@
+ const struct h_galpa galpa)
+ {
+ if (count > 1) {
+- ehca_gen_err("Page counter=%llx", count);
++ ehca_gen_err("Page counter=%lx", count);
+ return H_PARAMETER;
+ }
+
+@@ -557,7 +557,7 @@
+ 0, 0, 0, 0, 0);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+- ehca_gen_err("Insufficient resources ret=%lli", ret);
++ ehca_gen_err("Insufficient resources ret=%li", ret);
+
+ return ret;
+ }
+@@ -593,7 +593,7 @@
+ qp->ipz_qp_handle.handle, /* r6 */
+ 0, 0, 0, 0, 0, 0);
+ if (ret == H_HARDWARE)
+- ehca_gen_err("HCA not operational. ret=%lli", ret);
++ ehca_gen_err("HCA not operational. ret=%li", ret);
+
+ ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+@@ -601,7 +601,7 @@
+ 0, 0, 0, 0, 0);
+
+ if (ret == H_RESOURCE)
+- ehca_gen_err("Resource still in use. ret=%lli", ret);
++ ehca_gen_err("Resource still in use. ret=%li", ret);
+
+ return ret;
+ }
+@@ -636,7 +636,7 @@
+ *bma_qp_nr = (u32)outs[1];
+
+ if (ret == H_ALIAS_EXIST)
+- ehca_gen_err("AQP1 already exists. ret=%lli", ret);
++ ehca_gen_err("AQP1 already exists. ret=%li", ret);
+
+ return ret;
+ }
+@@ -658,7 +658,7 @@
+ 0, 0);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+- ehca_gen_err("Not enough resources. ret=%lli", ret);
++ ehca_gen_err("Not enough resources. ret=%li", ret);
+
+ return ret;
+ }
+@@ -697,7 +697,7 @@
+ 0, 0, 0, 0);
+
+ if (ret == H_RESOURCE)
+- ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
++ ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret);
+
+ return ret;
+ }
+@@ -719,7 +719,7 @@
+ 0, 0, 0, 0, 0);
+
+ if (ret == H_RESOURCE)
+- ehca_gen_err("Resource in use. ret=%lli ", ret);
++ ehca_gen_err("Resource in use. ret=%li ", ret);
+
+ return ret;
+ }
+@@ -774,9 +774,9 @@
+
+ if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
+ ehca_gen_err("logical_address_of_page not on a 4k boundary "
+- "adapter_handle=%llx mr=%p mr_handle=%llx "
++ "adapter_handle=%lx mr=%p mr_handle=%lx "
+ "pagesize=%x queue_type=%x "
+- "logical_address_of_page=%llx count=%llx",
++ "logical_address_of_page=%lx count=%lx",
+ adapter_handle.handle, mr,
+ mr->ipz_mr_handle.handle, pagesize, queue_type,
+ logical_address_of_page, count);
diff -Nurp ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-060-revert_interface_change.patch ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-060-revert_interface_change.patch
--- ofa_kernel-1.5.old/kernel_patches/backport/2.6.16_sles10_sp2/ehca-060-revert_interface_change.patch 1970-01-01 01:00:00.000000000 +0100
+++ ofa_kernel-1.5.new/kernel_patches/backport/2.6.16_sles10_sp2/ehca-060-revert_interface_change.patch 2009-10-01 12:51:46.000000000 +0200
@@ -0,0 +1,25 @@
+From 7ef1f7881a8f660654e7d1567213638b37adbbb5 Mon Sep 17 00:00:00 2001
+From: Stefan Roscher <stefan.roscher at de.ibm.com>
+Date: Wed, 6 Aug 2008 16:27:25 +0200
+Subject: [PATCH] Revert "infiniband: use performance variant for_each_cpu_mask_nr"
+
+This reverts commit 5d7bfd0c4d463d288422032c9903d0452dee141d.
+---
+ drivers/infiniband/hw/ehca/ehca_irq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: ofed_kernel/drivers/infiniband/hw/ehca/ehca_irq.c
+===================================================================
+--- ofed_kernel.orig/drivers/infiniband/hw/ehca/ehca_irq.c
++++ ofed_kernel/drivers/infiniband/hw/ehca/ehca_irq.c
+@@ -650,8 +650,8 @@ static inline int find_next_online_cpu(s
+ ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+
+ spin_lock_irqsave(&pool->last_cpu_lock, flags);
+- cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+- if (cpu >= nr_cpu_ids)
++ cpu = next_cpu(pool->last_cpu, cpu_online_map);
++ if (cpu == NR_CPUS)
+ cpu = first_cpu(cpu_online_map);
+ pool->last_cpu = cpu;
+ spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
More information about the ewg
mailing list