[ofa-general] [PATCH ofed_1_2] iw_cxgb3: Reserve the pages of dma coherent memory for older kernels.

Steve Wise swise at opengridcomputing.com
Mon Mar 19 12:13:51 PDT 2007


Hey Vlad,

This change, along with a libcxgb3 fix resolves bug 353.

You can pull this ofed_1_2 change directly from:

git://staging.openfabrics.org/~swise/ofed_1_2 ofed_1_2

Thanks,

Steve.


---------------------------

Reserve the pages of dma coherent memory for older kernels.

Signed-off-by: Steve Wise <swise at opengridcomputing.com>
---

 .../2.6.5_sles9_sp3/cxio_hal_to_2.6.14.patch       |  127 +++++++++++++++++++++++
 .../backport/2.6.9_U2/cxio_hal_to_2.6.14.patch     |  127 +++++++++++++++++++++++
 .../2.6.9_U2/iwch_provider_to_2.6.9_U4.patch       |   16 +++
 .../backport/2.6.9_U3/cxio_hal_to_2.6.14.patch     |  127 +++++++++++++++++++++++
 .../2.6.9_U3/iwch_provider_to_2.6.9_U4.patch       |   16 +++
 .../backport/2.6.9_U4/cxio_hal_to_2.6.14.patch     |  127 +++++++++++++++++++++++
 6 files changed, 540 insertions(+), 0 deletions(-)

diff --git a/kernel_patches/backport/2.6.5_sles9_sp3/cxio_hal_to_2.6.14.patch b/kernel_patches/backport/2.6.5_sles9_sp3/cxio_hal_to_2.6.14.patch
new file mode 100644
index 0000000..34556bb
--- /dev/null
+++ b/kernel_patches/backport/2.6.5_sles9_sp3/cxio_hal_to_2.6.14.patch
@@ -0,0 +1,127 @@
+Reserve pages to support userspace mapping in older kernels.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+This is needed for kernels prior to 2.6.15 to correctly map kernel
+memory into userspace.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/core/cxio_hal.c |   53 +++++++++++++++++++--------
+ 1 files changed, 38 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+index 229edd5..067fe46 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+@@ -170,10 +170,30 @@ int cxio_hal_clear_qp_ctx(struct cxio_rd
+ 	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
+ }
+ 
++static void reserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		SetPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
++static void unreserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		ClearPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
+ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	struct rdma_cq_setup setup;
+-	int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
+ 
+ 	cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
+ 	if (!cq->cqid)
+@@ -181,16 +201,15 @@ int cxio_create_cq(struct cxio_rdev *rde
+ 	cq->sw_queue = kzalloc(size, GFP_KERNEL);
+ 	if (!cq->sw_queue)
+ 		return -ENOMEM;
+-	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     (1UL << (cq->size_log2)) *
+-					     sizeof(struct t3_cqe),
+-					     &(cq->dma_addr), GFP_KERNEL);
++	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(cq->dma_addr), GFP_KERNEL);
+ 	if (!cq->queue) {
+ 		kfree(cq->sw_queue);
+ 		return -ENOMEM;
+ 	}
+ 	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ 	memset(cq->queue, 0, size);
++	reserve_pages(cq->queue, size);
+ 	setup.id = cq->cqid;
+ 	setup.base_addr = (u64) (cq->dma_addr);
+ 	setup.size = 1UL << cq->size_log2;
+@@ -288,6 +307,7 @@ int cxio_create_qp(struct cxio_rdev *rde
+ {
+ 	int depth = 1UL << wq->size_log2;
+ 	int rqsize = 1UL << wq->rq_size_log2;
++	int size = PAGE_ALIGN(depth * sizeof(union t3_wr));
+ 
+ 	wq->qpid = get_qpid(rdev_p, uctx);
+ 	if (!wq->qpid)
+@@ -305,14 +325,15 @@ int cxio_create_qp(struct cxio_rdev *rde
+ 	if (!wq->sq)
+ 		goto err3;
+ 	
+-	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     depth * sizeof(union t3_wr),
+-					     &(wq->dma_addr), GFP_KERNEL);
++	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(wq->dma_addr), GFP_KERNEL);
+ 	if (!wq->queue)
+ 		goto err4;
+ 
+-	memset(wq->queue, 0, depth * sizeof(union t3_wr));
+ 	pci_unmap_addr_set(wq, mapping, wq->dma_addr);
++	memset(wq->queue, 0, size);
++	reserve_pages(wq->queue, size);
++
+ 	wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
+ 	if (!kernel_domain)
+ 		wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
+@@ -334,11 +355,12 @@ err1:
+ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	int err;
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
++
+ 	err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
+ 	kfree(cq->sw_queue);
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (cq->size_log2))
+-			  * sizeof(struct t3_cqe), cq->queue,
++	unreserve_pages(cq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, cq->queue,
+ 			  pci_unmap_addr(cq, mapping));
+ 	cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
+ 	return err;
+@@ -347,9 +369,10 @@ int cxio_destroy_cq(struct cxio_rdev *rd
+ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
+ 		    struct cxio_ucontext *uctx)
+ {
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (wq->size_log2))
+-			  * sizeof(union t3_wr), wq->queue,
++	int size = PAGE_ALIGN((1UL << (wq->size_log2)) * sizeof(union t3_wr));
++
++	unreserve_pages(wq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, wq->queue,
+ 			  pci_unmap_addr(wq, mapping));
+ 	kfree(wq->sq);
+ 	cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
diff --git a/kernel_patches/backport/2.6.9_U2/cxio_hal_to_2.6.14.patch b/kernel_patches/backport/2.6.9_U2/cxio_hal_to_2.6.14.patch
new file mode 100644
index 0000000..34556bb
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U2/cxio_hal_to_2.6.14.patch
@@ -0,0 +1,127 @@
+Reserve pages to support userspace mapping in older kernels.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+This is needed for kernels prior to 2.6.15 to correctly map kernel
+memory into userspace.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/core/cxio_hal.c |   53 +++++++++++++++++++--------
+ 1 files changed, 38 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+index 229edd5..067fe46 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+@@ -170,10 +170,30 @@ int cxio_hal_clear_qp_ctx(struct cxio_rd
+ 	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
+ }
+ 
++static void reserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		SetPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
++static void unreserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		ClearPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
+ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	struct rdma_cq_setup setup;
+-	int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
+ 
+ 	cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
+ 	if (!cq->cqid)
+@@ -181,16 +201,15 @@ int cxio_create_cq(struct cxio_rdev *rde
+ 	cq->sw_queue = kzalloc(size, GFP_KERNEL);
+ 	if (!cq->sw_queue)
+ 		return -ENOMEM;
+-	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     (1UL << (cq->size_log2)) *
+-					     sizeof(struct t3_cqe),
+-					     &(cq->dma_addr), GFP_KERNEL);
++	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(cq->dma_addr), GFP_KERNEL);
+ 	if (!cq->queue) {
+ 		kfree(cq->sw_queue);
+ 		return -ENOMEM;
+ 	}
+ 	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ 	memset(cq->queue, 0, size);
++	reserve_pages(cq->queue, size);
+ 	setup.id = cq->cqid;
+ 	setup.base_addr = (u64) (cq->dma_addr);
+ 	setup.size = 1UL << cq->size_log2;
+@@ -288,6 +307,7 @@ int cxio_create_qp(struct cxio_rdev *rde
+ {
+ 	int depth = 1UL << wq->size_log2;
+ 	int rqsize = 1UL << wq->rq_size_log2;
++	int size = PAGE_ALIGN(depth * sizeof(union t3_wr));
+ 
+ 	wq->qpid = get_qpid(rdev_p, uctx);
+ 	if (!wq->qpid)
+@@ -305,14 +325,15 @@ int cxio_create_qp(struct cxio_rdev *rde
+ 	if (!wq->sq)
+ 		goto err3;
+ 	
+-	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     depth * sizeof(union t3_wr),
+-					     &(wq->dma_addr), GFP_KERNEL);
++	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(wq->dma_addr), GFP_KERNEL);
+ 	if (!wq->queue)
+ 		goto err4;
+ 
+-	memset(wq->queue, 0, depth * sizeof(union t3_wr));
+ 	pci_unmap_addr_set(wq, mapping, wq->dma_addr);
++	memset(wq->queue, 0, size);
++	reserve_pages(wq->queue, size);
++
+ 	wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
+ 	if (!kernel_domain)
+ 		wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
+@@ -334,11 +355,12 @@ err1:
+ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	int err;
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
++
+ 	err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
+ 	kfree(cq->sw_queue);
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (cq->size_log2))
+-			  * sizeof(struct t3_cqe), cq->queue,
++	unreserve_pages(cq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, cq->queue,
+ 			  pci_unmap_addr(cq, mapping));
+ 	cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
+ 	return err;
+@@ -347,9 +369,10 @@ int cxio_destroy_cq(struct cxio_rdev *rd
+ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
+ 		    struct cxio_ucontext *uctx)
+ {
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (wq->size_log2))
+-			  * sizeof(union t3_wr), wq->queue,
++	int size = PAGE_ALIGN((1UL << (wq->size_log2)) * sizeof(union t3_wr));
++
++	unreserve_pages(wq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, wq->queue,
+ 			  pci_unmap_addr(wq, mapping));
+ 	kfree(wq->sq);
+ 	cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
diff --git a/kernel_patches/backport/2.6.9_U2/iwch_provider_to_2.6.9_U4.patch b/kernel_patches/backport/2.6.9_U2/iwch_provider_to_2.6.9_U4.patch
new file mode 100644
index 0000000..1fbc717
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U2/iwch_provider_to_2.6.9_U4.patch
@@ -0,0 +1,16 @@
+--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c	2007-01-17 09:22:39.000000000 -0600
++++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c	2007-01-22 17:46:16.000000000 -0600
+@@ -337,13 +337,6 @@ static int iwch_mmap(struct ib_ucontext 
+ 	    (pgaddr < (rdev_p->rnic_info.udbell_physbase +
+ 		       rdev_p->rnic_info.udbell_len))) {
+ 
+-		/*
+-		 * Map T3 DB register.
+-		 */
+-		if (vma->vm_flags & VM_READ) {
+-                	return -EPERM;
+-		}
+-
+ 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ 		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ 		vma->vm_flags &= ~VM_MAYREAD;
diff --git a/kernel_patches/backport/2.6.9_U3/cxio_hal_to_2.6.14.patch b/kernel_patches/backport/2.6.9_U3/cxio_hal_to_2.6.14.patch
new file mode 100644
index 0000000..34556bb
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U3/cxio_hal_to_2.6.14.patch
@@ -0,0 +1,127 @@
+Reserve pages to support userspace mapping in older kernels.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+This is needed for kernels prior to 2.6.15 to correctly map kernel
+memory into userspace.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/core/cxio_hal.c |   53 +++++++++++++++++++--------
+ 1 files changed, 38 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+index 229edd5..067fe46 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+@@ -170,10 +170,30 @@ int cxio_hal_clear_qp_ctx(struct cxio_rd
+ 	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
+ }
+ 
++static void reserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		SetPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
++static void unreserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		ClearPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
+ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	struct rdma_cq_setup setup;
+-	int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
+ 
+ 	cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
+ 	if (!cq->cqid)
+@@ -181,16 +201,15 @@ int cxio_create_cq(struct cxio_rdev *rde
+ 	cq->sw_queue = kzalloc(size, GFP_KERNEL);
+ 	if (!cq->sw_queue)
+ 		return -ENOMEM;
+-	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     (1UL << (cq->size_log2)) *
+-					     sizeof(struct t3_cqe),
+-					     &(cq->dma_addr), GFP_KERNEL);
++	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(cq->dma_addr), GFP_KERNEL);
+ 	if (!cq->queue) {
+ 		kfree(cq->sw_queue);
+ 		return -ENOMEM;
+ 	}
+ 	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ 	memset(cq->queue, 0, size);
++	reserve_pages(cq->queue, size);
+ 	setup.id = cq->cqid;
+ 	setup.base_addr = (u64) (cq->dma_addr);
+ 	setup.size = 1UL << cq->size_log2;
+@@ -288,6 +307,7 @@ int cxio_create_qp(struct cxio_rdev *rde
+ {
+ 	int depth = 1UL << wq->size_log2;
+ 	int rqsize = 1UL << wq->rq_size_log2;
++	int size = PAGE_ALIGN(depth * sizeof(union t3_wr));
+ 
+ 	wq->qpid = get_qpid(rdev_p, uctx);
+ 	if (!wq->qpid)
+@@ -305,14 +325,15 @@ int cxio_create_qp(struct cxio_rdev *rde
+ 	if (!wq->sq)
+ 		goto err3;
+ 	
+-	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     depth * sizeof(union t3_wr),
+-					     &(wq->dma_addr), GFP_KERNEL);
++	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(wq->dma_addr), GFP_KERNEL);
+ 	if (!wq->queue)
+ 		goto err4;
+ 
+-	memset(wq->queue, 0, depth * sizeof(union t3_wr));
+ 	pci_unmap_addr_set(wq, mapping, wq->dma_addr);
++	memset(wq->queue, 0, size);
++	reserve_pages(wq->queue, size);
++
+ 	wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
+ 	if (!kernel_domain)
+ 		wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
+@@ -334,11 +355,12 @@ err1:
+ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	int err;
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
++
+ 	err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
+ 	kfree(cq->sw_queue);
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (cq->size_log2))
+-			  * sizeof(struct t3_cqe), cq->queue,
++	unreserve_pages(cq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, cq->queue,
+ 			  pci_unmap_addr(cq, mapping));
+ 	cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
+ 	return err;
+@@ -347,9 +369,10 @@ int cxio_destroy_cq(struct cxio_rdev *rd
+ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
+ 		    struct cxio_ucontext *uctx)
+ {
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (wq->size_log2))
+-			  * sizeof(union t3_wr), wq->queue,
++	int size = PAGE_ALIGN((1UL << (wq->size_log2)) * sizeof(union t3_wr));
++
++	unreserve_pages(wq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, wq->queue,
+ 			  pci_unmap_addr(wq, mapping));
+ 	kfree(wq->sq);
+ 	cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
diff --git a/kernel_patches/backport/2.6.9_U3/iwch_provider_to_2.6.9_U4.patch b/kernel_patches/backport/2.6.9_U3/iwch_provider_to_2.6.9_U4.patch
new file mode 100644
index 0000000..1fbc717
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U3/iwch_provider_to_2.6.9_U4.patch
@@ -0,0 +1,16 @@
+--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c	2007-01-17 09:22:39.000000000 -0600
++++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c	2007-01-22 17:46:16.000000000 -0600
+@@ -337,13 +337,6 @@ static int iwch_mmap(struct ib_ucontext 
+ 	    (pgaddr < (rdev_p->rnic_info.udbell_physbase +
+ 		       rdev_p->rnic_info.udbell_len))) {
+ 
+-		/*
+-		 * Map T3 DB register.
+-		 */
+-		if (vma->vm_flags & VM_READ) {
+-                	return -EPERM;
+-		}
+-
+ 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ 		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ 		vma->vm_flags &= ~VM_MAYREAD;
diff --git a/kernel_patches/backport/2.6.9_U4/cxio_hal_to_2.6.14.patch b/kernel_patches/backport/2.6.9_U4/cxio_hal_to_2.6.14.patch
new file mode 100644
index 0000000..34556bb
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/cxio_hal_to_2.6.14.patch
@@ -0,0 +1,127 @@
+Reserve pages to support userspace mapping in older kernels.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+This is needed for kernels prior to 2.6.15 to correctly map kernel
+memory into userspace.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/core/cxio_hal.c |   53 +++++++++++++++++++--------
+ 1 files changed, 38 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+index 229edd5..067fe46 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_hal.c
+@@ -170,10 +170,30 @@ int cxio_hal_clear_qp_ctx(struct cxio_rd
+ 	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
+ }
+ 
++static void reserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		SetPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
++static void unreserve_pages(void *p, int size)
++{
++	while (size > 0) {
++		ClearPageReserved(virt_to_page(p));
++		p += PAGE_SIZE;
++		size -= PAGE_SIZE;
++	}
++	BUG_ON(size < 0);
++}
++
+ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	struct rdma_cq_setup setup;
+-	int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
+ 
+ 	cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
+ 	if (!cq->cqid)
+@@ -181,16 +201,15 @@ int cxio_create_cq(struct cxio_rdev *rde
+ 	cq->sw_queue = kzalloc(size, GFP_KERNEL);
+ 	if (!cq->sw_queue)
+ 		return -ENOMEM;
+-	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     (1UL << (cq->size_log2)) *
+-					     sizeof(struct t3_cqe),
+-					     &(cq->dma_addr), GFP_KERNEL);
++	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(cq->dma_addr), GFP_KERNEL);
+ 	if (!cq->queue) {
+ 		kfree(cq->sw_queue);
+ 		return -ENOMEM;
+ 	}
+ 	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ 	memset(cq->queue, 0, size);
++	reserve_pages(cq->queue, size);
+ 	setup.id = cq->cqid;
+ 	setup.base_addr = (u64) (cq->dma_addr);
+ 	setup.size = 1UL << cq->size_log2;
+@@ -288,6 +307,7 @@ int cxio_create_qp(struct cxio_rdev *rde
+ {
+ 	int depth = 1UL << wq->size_log2;
+ 	int rqsize = 1UL << wq->rq_size_log2;
++	int size = PAGE_ALIGN(depth * sizeof(union t3_wr));
+ 
+ 	wq->qpid = get_qpid(rdev_p, uctx);
+ 	if (!wq->qpid)
+@@ -305,14 +325,15 @@ int cxio_create_qp(struct cxio_rdev *rde
+ 	if (!wq->sq)
+ 		goto err3;
+ 	
+-	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
+-					     depth * sizeof(union t3_wr),
+-					     &(wq->dma_addr), GFP_KERNEL);
++	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
++				       &(wq->dma_addr), GFP_KERNEL);
+ 	if (!wq->queue)
+ 		goto err4;
+ 
+-	memset(wq->queue, 0, depth * sizeof(union t3_wr));
+ 	pci_unmap_addr_set(wq, mapping, wq->dma_addr);
++	memset(wq->queue, 0, size);
++	reserve_pages(wq->queue, size);
++
+ 	wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
+ 	if (!kernel_domain)
+ 		wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
+@@ -334,11 +355,12 @@ err1:
+ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+ {
+ 	int err;
++	int size = PAGE_ALIGN((1UL << (cq->size_log2)) * sizeof(struct t3_cqe));
++
+ 	err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
+ 	kfree(cq->sw_queue);
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (cq->size_log2))
+-			  * sizeof(struct t3_cqe), cq->queue,
++	unreserve_pages(cq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, cq->queue,
+ 			  pci_unmap_addr(cq, mapping));
+ 	cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
+ 	return err;
+@@ -347,9 +369,10 @@ int cxio_destroy_cq(struct cxio_rdev *rd
+ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
+ 		    struct cxio_ucontext *uctx)
+ {
+-	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
+-			  (1UL << (wq->size_log2))
+-			  * sizeof(union t3_wr), wq->queue,
++	int size = PAGE_ALIGN((1UL << (wq->size_log2)) * sizeof(union t3_wr));
++
++	unreserve_pages(wq->queue, size);
++	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), size, wq->queue,
+ 			  pci_unmap_addr(wq, mapping));
+ 	kfree(wq->sq);
+ 	cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));




More information about the general mailing list