[ofa-general] [PATCH 9/10] nes: support systems with other than 4k page size
Glenn Grundstrom NetEffect
glenn at lists.openfabrics.org
Thu Dec 20 12:37:49 PST 2007
Originally assumed 4k page size. Get the page size from
the kernel and map the memory accordingly.
Signed-off-by: Glenn Grundstrom <ggrundstrom at neteffect.com>
---
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 515133d..5646e5a 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -259,7 +259,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
}
/* no adapter found */
- num_pds = pci_resource_len(nesdev->pcidev, BAR_1) / 4096;
+ num_pds = pci_resource_len(nesdev->pcidev, BAR_1) >> PAGE_SHIFT;
if ((hw_rev != NE020_REV) && (hw_rev != NE020_REV1)) {
nes_debug(NES_DBG_INIT, "NE020 driver detected unknown hardware revision 0x%x\n",
hw_rev);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 9751d0d..aa53cbd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -839,7 +839,7 @@ static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
return ERR_PTR(-ENOMEM);
nes_ucontext->nesdev = nesdev;
- nes_ucontext->mmap_wq_offset = ((uresp.max_pds * 4096) + PAGE_SIZE-1) / PAGE_SIZE;
+ nes_ucontext->mmap_wq_offset = uresp.max_pds;
nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
((sizeof(struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) /
PAGE_SIZE;
@@ -960,7 +960,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
nespd, nesvnic->nesibdev->ibdev.name);
- nespd->pd_id = pd_num + nesadapter->base_pd;
+ nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
if (context) {
nesucontext = to_nesucontext(context);
@@ -1018,7 +1018,7 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
nes_debug(NES_DBG_PD, "Deallocating PD%u structure located @%p.\n",
nespd->pd_id, nespd);
nes_free_resource(nesadapter, nesadapter->allocated_pds,
- nespd->pd_id-nesadapter->base_pd);
+ (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12));
kfree(nespd);
return 0;
@@ -1089,8 +1089,8 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
pbl = nespbl->pbl_vbase; /* points to first pbl entry */
/* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
/* the first pbl to be fro the rq_vbase... */
- rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> PAGE_SHIFT;
- sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> PAGE_SHIFT;
+ rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
+ sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
nesqp->hwqp.sq_pbase = (le32_to_cpu (((u32 *)pbl)[0]) ) | ((u64)((le32_to_cpu (((u32 *)pbl)[1]))) << 32);
if (!nespbl->page) {
nes_debug(NES_DBG_QP, "QP nespbl->page is NULL \n");
@@ -2433,6 +2433,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u32 driver_key;
u32 root_pbl_index = 0;
u32 cur_pbl_index = 0;
+ u32 skip_pages;
u16 pbl_count;
u8 single_page = 1;
u8 stag_key;
@@ -2442,8 +2443,12 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return (struct ib_mr *)region;
}
- nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u\n",
- (unsigned long int)start, (unsigned long int)virt, (u32)length);
+ nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
+ " offset = %u, page size = %u.\n",
+ (unsigned long int)start, (unsigned long int)virt, (u32)length,
+ region->offset, region->page_size);
+
+ skip_pages = ((u32)region->offset) >> 12;
if (ib_copy_from_udata(&req, udata, sizeof(req)))
return ERR_PTR(-EFAULT);
@@ -2460,7 +2465,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
get_random_bytes(&next_stag_index, sizeof(next_stag_index));
stag_key = (u8)next_stag_index;
- driver_key = 0;
+ driver_key = next_stag_index & 0x70000000;
next_stag_index >>= 8;
next_stag_index %= nesadapter->max_mr;
@@ -2484,70 +2489,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n",
chunk->nents, chunk->nmap);
for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
- if ((page_count&0x01FF) == 0) {
- if (page_count>(1024*512)) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter,
- nesadapter->allocated_mrs, stag_index);
- kfree(nesmr);
- return ERR_PTR(-E2BIG);
- }
- if (root_pbl_index == 1) {
- root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
- 8192, &root_vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
- root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
- if (!root_vpbl.pbl_vbase) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- return ERR_PTR(-ENOMEM);
- }
- root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
- GFP_KERNEL);
- if (!root_vpbl.leaf_vpbl) {
- ib_umem_release(region);
- pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
- root_vpbl.pbl_pbase);
- pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
- vpbl.pbl_pbase);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs,
- stag_index);
- kfree(nesmr);
- return ERR_PTR(-ENOMEM);
- }
- root_vpbl.pbl_vbase[0].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[0].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
- root_vpbl.leaf_vpbl[0] = vpbl;
- }
- vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &vpbl.pbl_pbase);
- nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
- vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
- if (!vpbl.pbl_vbase) {
- ib_umem_release(region);
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
- ibmr = ERR_PTR(-ENOMEM);
- kfree(nesmr);
- goto reg_user_mr_err;
- }
- if (1 <= root_pbl_index) {
- root_vpbl.pbl_vbase[root_pbl_index].pa_low =
- cpu_to_le32((u32)vpbl.pbl_pbase);
- root_vpbl.pbl_vbase[root_pbl_index].pa_high =
- cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
- root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
- }
- root_pbl_index++;
- cur_pbl_index = 0;
- }
if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) {
ib_umem_release(region);
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
@@ -2569,35 +2510,106 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
region_length += sg_dma_len(&chunk->page_list[nmap_index]);
- chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> PAGE_SHIFT;
- for (page_index=0; page_index < chunk_pages; page_index++) {
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
+ region_length -= skip_pages << 12;
+ for (page_index=skip_pages; page_index < chunk_pages; page_index++) {
+ skip_pages = 0;
+ if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length)
+ goto enough_pages;
+ if ((page_count&0x01FF) == 0) {
+ if (page_count>(1024*512)) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter,
+ nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ nesmr = ERR_PTR(-E2BIG);
+ goto reg_user_mr_err;
+ }
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
+ 8192, &root_vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
+ root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
+ if (!root_vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ kfree(nesmr);
+ nesmr = ERR_PTR(-ENOMEM);
+ goto reg_user_mr_err;
+ }
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
+ GFP_KERNEL);
+ if (!root_vpbl.leaf_vpbl) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ kfree(nesmr);
+ nesmr = ERR_PTR(-ENOMEM);
+ goto reg_user_mr_err;
+ }
+ root_vpbl.pbl_vbase[0].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[0].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[0] = vpbl;
+ }
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
+ vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
+ if (!vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+ if (1 <= root_pbl_index) {
+ root_vpbl.pbl_vbase[root_pbl_index].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[root_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
+ root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
+ }
+ root_pbl_index++;
+ cur_pbl_index = 0;
+ }
if (single_page) {
if (page_count != 0) {
- if ((last_dma_addr+PAGE_SIZE) !=
+ if ((last_dma_addr+4096) !=
(sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*PAGE_SIZE)))
+ (page_index*4096)))
single_page = 0;
last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*PAGE_SIZE);
+ (page_index*4096);
} else {
first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*PAGE_SIZE);
+ (page_index*4096);
last_dma_addr = first_dma_addr;
}
}
vpbl.pbl_vbase[cur_pbl_index].pa_low =
cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*PAGE_SIZE)));
+ (page_index*4096)));
vpbl.pbl_vbase[cur_pbl_index].pa_high =
cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*PAGE_SIZE))) >> 32)));
+ (page_index*4096))) >> 32)));
cur_pbl_index++;
page_count++;
}
}
}
-
+ enough_pages:
nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
" stag_key=0x%08x\n",
stag_index, driver_key, stag_key);
@@ -2609,12 +2621,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
iova_start = virt;
- nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X,"
- " index = 0x%08X, region->length=0x%08llx\n",
- stag, (unsigned int)iova_start,
- (unsigned int)region_length, stag_index,
- (unsigned long long)region->length);
-
/* Make the leaf PBL the root if only one PBL */
if (root_pbl_index == 1) {
root_vpbl.pbl_pbase = vpbl.pbl_pbase;
@@ -2626,6 +2632,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
pbl_count = root_pbl_index;
first_dma_addr = 0;
}
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X,"
+ " index = 0x%08X, region->length=0x%08llx, pbl_count = %u\n",
+ stag, (unsigned int)iova_start,
+ (unsigned int)region_length, stag_index,
+ (unsigned long long)region->length, pbl_count);
ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl,
first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start);
@@ -2684,8 +2695,8 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
nesmr->region = region;
nes_ucontext = to_nesucontext(pd->uobject->context);
- pbl_depth = region->length >> PAGE_SHIFT;
- pbl_depth += (region->length & ~PAGE_MASK) ? 1 : 0;
+ pbl_depth = region->length >> 12;
+ pbl_depth += (region->length & (4096-1)) ? 1 : 0;
nespbl->pbl_size = pbl_depth*sizeof(u64);
if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
nes_debug(NES_DBG_MR, "Attempting to allocate QP PBL memory");
@@ -2714,15 +2725,16 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
list_for_each_entry(chunk, ®ion->chunk_list, list) {
for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
- chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> PAGE_SHIFT;
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
+ chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0;
nespbl->page = sg_page(&chunk->page_list[0]);
for (page_index=0; page_index<chunk_pages; page_index++) {
((u32 *)pbl)[0] = cpu_to_le32((u32)
(sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*PAGE_SIZE)));
+ (page_index*4096)));
((u32 *)pbl)[1] = cpu_to_le32(((u64)
(sg_dma_address(&chunk->page_list[nmap_index])+
- (page_index*PAGE_SIZE)))>>32);
+ (page_index*4096)))>>32);
nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
(unsigned long long)*pbl,
le32_to_cpu(((u32 *)pbl)[1]), le32_to_cpu(((u32 *)pbl)[0]));
More information about the general
mailing list