[openib-general] [PATCH v2 1/2] ofed_1_2 Changes to kernel_patches/ for Chelsio T3 Support.
Steve Wise
swise at opengridcomputing.com
Thu Jan 11 14:14:54 PST 2007
- genalloc backport.
- qp_num -> qp ptr patch for cxgb3.
Signed-off-by: Steve Wise <swise at opengridcomputing.com>
---
kernel_patches/fixes/cxgb3_wc_qpn_to_qp.patch | 23 +
kernel_patches/fixes/genalloc.patch | 392 +++++++++++++++++++++++++
2 files changed, 415 insertions(+), 0 deletions(-)
diff --git a/kernel_patches/fixes/cxgb3_wc_qpn_to_qp.patch b/kernel_patches/fixes/cxgb3_wc_qpn_to_qp.patch
new file mode 100644
index 0000000..0763f70
--- /dev/null
+++ b/kernel_patches/fixes/cxgb3_wc_qpn_to_qp.patch
@@ -0,0 +1,23 @@
+Update T3 driver: qp_num no longer in ib_wc.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/iwch_cq.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
+index ff09509..122f7b4 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
+@@ -80,7 +80,7 @@ int iwch_poll_cq_one(struct iwch_dev *rh
+ ret = 1;
+
+ wc->wr_id = cookie;
+- wc->qp_num = qhp->wq.qpid;
++ wc->qp = &qhp->ibqp;
+ wc->vendor_err = CQE_STATUS(cqe);
+
+ PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
diff --git a/kernel_patches/fixes/genalloc.patch b/kernel_patches/fixes/genalloc.patch
new file mode 100644
index 0000000..c44a98f
--- /dev/null
+++ b/kernel_patches/fixes/genalloc.patch
@@ -0,0 +1,392 @@
+Backport of the Linux 2.6.20 generic allocator.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/Kconfig | 1
+ drivers/infiniband/hw/cxgb3/Makefile | 3
+ drivers/infiniband/hw/cxgb3/core/cxio_hal.h | 4
+ drivers/infiniband/hw/cxgb3/core/cxio_resource.c | 20 +-
+ drivers/infiniband/hw/cxgb3/core/cxio_resource.h | 2
+ drivers/infiniband/hw/cxgb3/core/genalloc.c | 196 ++++++++++++++++++++++
+ drivers/infiniband/hw/cxgb3/core/genalloc.h | 36 ++++
+ 7 files changed, 247 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
+index d3db264..0361a72 100644
+--- a/drivers/infiniband/hw/cxgb3/Kconfig
++++ b/drivers/infiniband/hw/cxgb3/Kconfig
+@@ -1,7 +1,6 @@
+ config INFINIBAND_CXGB3
+ tristate "Chelsio RDMA Driver"
+ depends on CHELSIO_T3 && INFINIBAND
+- select GENERIC_ALLOCATOR
+ ---help---
+ This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
+ 10GbE adapters.
+diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
+index 7a89f6d..12e7a94 100644
+--- a/drivers/infiniband/hw/cxgb3/Makefile
++++ b/drivers/infiniband/hw/cxgb3/Makefile
+@@ -4,7 +4,8 @@ EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/
+ obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
+
+ iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
+- iwch_provider.o iwch.o core/cxio_hal.o core/cxio_resource.o
++ iwch_provider.o iwch.o core/cxio_hal.o core/cxio_resource.o \
++ core/genalloc.o
+
+ ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
+ EXTRA_CFLAGS += -DDEBUG -g
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_hal.h b/drivers/infiniband/hw/cxgb3/core/cxio_hal.h
+index e5e702d..a9e8452 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_hal.h
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_hal.h
+@@ -104,8 +104,8 @@ struct cxio_rdev {
+ u32 qpnr;
+ u32 qpmask;
+ struct cxio_ucontext uctx;
+- struct gen_pool *pbl_pool;
+- struct gen_pool *rqt_pool;
++ struct iwch_gen_pool *pbl_pool;
++ struct iwch_gen_pool *rqt_pool;
+ };
+
+ static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_resource.c b/drivers/infiniband/hw/cxgb3/core/cxio_resource.c
+index d1d8722..cecb27b 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_resource.c
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_resource.c
+@@ -265,7 +265,7 @@ #define PBL_CHUNK 2*1024*1024
+
+ u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
+ {
+- unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
++ unsigned long addr = iwch_gen_pool_alloc(rdev_p->pbl_pool, size);
+ PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size);
+ return (u32)addr;
+ }
+@@ -273,24 +273,24 @@ u32 cxio_hal_pblpool_alloc(struct cxio_r
+ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
+ {
+ PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size);
+- gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
++ iwch_gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
+ }
+
+ int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
+ {
+ unsigned long i;
+- rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
++ rdev_p->pbl_pool = iwch_gen_pool_create(MIN_PBL_SHIFT, -1);
+ if (rdev_p->pbl_pool)
+ for (i = rdev_p->rnic_info.pbl_base;
+ i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1;
+ i += PBL_CHUNK)
+- gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1);
++ iwch_gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1);
+ return rdev_p->pbl_pool ? 0 : -ENOMEM;
+ }
+
+ void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
+ {
+- gen_pool_destroy(rdev_p->pbl_pool);
++ iwch_gen_pool_destroy(rdev_p->pbl_pool);
+ }
+
+ /*
+@@ -302,7 +302,7 @@ #define RQT_CHUNK 2*1024*1024
+
+ u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
+ {
+- unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
++ unsigned long addr = iwch_gen_pool_alloc(rdev_p->rqt_pool, size << 6);
+ PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6);
+ return (u32)addr;
+ }
+@@ -310,22 +310,22 @@ u32 cxio_hal_rqtpool_alloc(struct cxio_r
+ void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
+ {
+ PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6);
+- gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
++ iwch_gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
+ }
+
+ int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
+ {
+ unsigned long i;
+- rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
++ rdev_p->rqt_pool = iwch_gen_pool_create(MIN_RQT_SHIFT, -1);
+ if (rdev_p->rqt_pool)
+ for (i = rdev_p->rnic_info.rqt_base;
+ i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
+ i += RQT_CHUNK)
+- gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
++ iwch_gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
+ return rdev_p->rqt_pool ? 0 : -ENOMEM;
+ }
+
+ void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
+ {
+- gen_pool_destroy(rdev_p->rqt_pool);
++ iwch_gen_pool_destroy(rdev_p->rqt_pool);
+ }
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_resource.h b/drivers/infiniband/hw/cxgb3/core/cxio_resource.h
+index a6bbe83..06a8076 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_resource.h
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_resource.h
+@@ -39,7 +39,7 @@ #include <linux/slab.h>
+ #include <linux/kfifo.h>
+ #include <linux/spinlock.h>
+ #include <linux/errno.h>
+-#include <linux/genalloc.h>
++#include "genalloc.h"
+ #include "cxio_hal.h"
+
+ extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
+diff --git a/drivers/infiniband/hw/cxgb3/core/genalloc.c b/drivers/infiniband/hw/cxgb3/core/genalloc.c
+new file mode 100644
+index 0000000..27ba8ec
+--- /dev/null
++++ b/drivers/infiniband/hw/cxgb3/core/genalloc.c
+@@ -0,0 +1,196 @@
++/*
++ * Basic general purpose allocator for managing special purpose memory
++ * not managed by the regular kmalloc/kfree interface.
++ * Uses for this includes on-device special memory, uncached memory
++ * etc.
++ *
++ * Copyright 2005 (C) Jes Sorensen <jes at trained-monkey.org>
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2. See the file COPYING for more details.
++ */
++
++#include <linux/module.h>
++#include "genalloc.h"
++
++
++/**
++ * iwch_gen_pool_create - create a new special memory pool
++ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
++ * @nid: node id of the node the pool structure should be allocated on, or -1
++ *
++ * Create a new special memory pool that can be used to manage special purpose
++ * memory not managed by the regular kmalloc/kfree interface.
++ */
++struct iwch_gen_pool *iwch_gen_pool_create(int min_alloc_order, int nid)
++{
++ struct iwch_gen_pool *pool;
++
++ pool = kmalloc_node(sizeof(struct iwch_gen_pool), GFP_KERNEL, nid);
++ if (pool != NULL) {
++ rwlock_init(&pool->lock);
++ INIT_LIST_HEAD(&pool->chunks);
++ pool->min_alloc_order = min_alloc_order;
++ }
++ return pool;
++}
++
++/**
++ * iwch_gen_pool_add - add a new chunk of special memory to the pool
++ * @pool: pool to add new memory chunk to
++ * @addr: starting address of memory chunk to add to pool
++ * @size: size in bytes of the memory chunk to add to pool
++ * @nid: node id of the node the chunk structure and bitmap should be
++ * allocated on, or -1
++ *
++ * Add a new chunk of special memory to the specified pool.
++ */
++int iwch_gen_pool_add(struct iwch_gen_pool *pool, unsigned long addr, size_t size, int nid)
++{
++ struct iwch_gen_pool_chunk *chunk;
++ int nbits = size >> pool->min_alloc_order;
++ int nbytes = sizeof(struct iwch_gen_pool_chunk) +
++ (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
++
++ chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
++ if (unlikely(chunk == NULL))
++ return -1;
++
++ memset(chunk, 0, nbytes);
++ spin_lock_init(&chunk->lock);
++ chunk->start_addr = addr;
++ chunk->end_addr = addr + size;
++
++ write_lock(&pool->lock);
++ list_add(&chunk->next_chunk, &pool->chunks);
++ write_unlock(&pool->lock);
++
++ return 0;
++}
++
++/**
++ * iwch_gen_pool_destroy - destroy a special memory pool
++ * @pool: pool to destroy
++ *
++ * Destroy the specified special memory pool. Verifies that there are no
++ * outstanding allocations.
++ */
++void iwch_gen_pool_destroy(struct iwch_gen_pool *pool)
++{
++ struct list_head *_chunk, *_next_chunk;
++ struct iwch_gen_pool_chunk *chunk;
++ int order = pool->min_alloc_order;
++ int bit, end_bit;
++
++
++ write_lock(&pool->lock);
++ list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
++ chunk = list_entry(_chunk, struct iwch_gen_pool_chunk,
++ next_chunk);
++ list_del(&chunk->next_chunk);
++
++ end_bit = (chunk->end_addr - chunk->start_addr) >> order;
++ bit = find_next_bit(chunk->bits, end_bit, 0);
++ BUG_ON(bit < end_bit);
++
++ kfree(chunk);
++ }
++ kfree(pool);
++ return;
++}
++
++/**
++ * iwch_gen_pool_alloc - allocate special memory from the pool
++ * @pool: pool to allocate from
++ * @size: number of bytes to allocate from the pool
++ *
++ * Allocate the requested number of bytes from the specified pool.
++ * Uses a first-fit algorithm.
++ */
++unsigned long iwch_gen_pool_alloc(struct iwch_gen_pool *pool, size_t size)
++{
++ struct list_head *_chunk;
++ struct iwch_gen_pool_chunk *chunk;
++ unsigned long addr, flags;
++ int order = pool->min_alloc_order;
++ int nbits, bit, start_bit, end_bit;
++
++ if (size == 0)
++ return 0;
++
++ nbits = (size + (1UL << order) - 1) >> order;
++
++ read_lock(&pool->lock);
++ list_for_each(_chunk, &pool->chunks) {
++ chunk = list_entry(_chunk, struct iwch_gen_pool_chunk,
++ next_chunk);
++
++ end_bit = (chunk->end_addr - chunk->start_addr) >> order;
++ end_bit -= nbits + 1;
++
++ spin_lock_irqsave(&chunk->lock, flags);
++ bit = -1;
++ while (bit + 1 < end_bit) {
++ bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
++ if (bit >= end_bit)
++ break;
++
++ start_bit = bit;
++ if (nbits > 1) {
++ bit = find_next_bit(chunk->bits, bit + nbits,
++ bit + 1);
++ if (bit - start_bit < nbits)
++ continue;
++ }
++
++ addr = chunk->start_addr +
++ ((unsigned long)start_bit << order);
++ while (nbits--)
++ __set_bit(start_bit++, &chunk->bits);
++ spin_unlock_irqrestore(&chunk->lock, flags);
++ read_unlock(&pool->lock);
++ return addr;
++ }
++ spin_unlock_irqrestore(&chunk->lock, flags);
++ }
++ read_unlock(&pool->lock);
++ return 0;
++}
++
++/**
++ * iwch_gen_pool_free - free allocated special memory back to the pool
++ * @pool: pool to free to
++ * @addr: starting address of memory to free back to pool
++ * @size: size in bytes of memory to free
++ *
++ * Free previously allocated special memory back to the specified pool.
++ */
++void iwch_gen_pool_free(struct iwch_gen_pool *pool, unsigned long addr,
++ size_t size)
++{
++ struct list_head *_chunk;
++ struct iwch_gen_pool_chunk *chunk;
++ unsigned long flags;
++ int order = pool->min_alloc_order;
++ int bit, nbits;
++
++ nbits = (size + (1UL << order) - 1) >> order;
++
++ read_lock(&pool->lock);
++ list_for_each(_chunk, &pool->chunks) {
++ chunk = list_entry(_chunk, struct iwch_gen_pool_chunk,
++ next_chunk);
++
++ if (addr >= chunk->start_addr && addr < chunk->end_addr) {
++ BUG_ON(addr + size > chunk->end_addr);
++ spin_lock_irqsave(&chunk->lock, flags);
++ bit = (addr - chunk->start_addr) >> order;
++ while (nbits--)
++ __clear_bit(bit++, &chunk->bits);
++ spin_unlock_irqrestore(&chunk->lock, flags);
++ break;
++ }
++ }
++ BUG_ON(nbits > 0);
++ read_unlock(&pool->lock);
++}
+diff --git a/drivers/infiniband/hw/cxgb3/core/genalloc.h b/drivers/infiniband/hw/cxgb3/core/genalloc.h
+new file mode 100644
+index 0000000..1dc336c
+--- /dev/null
++++ b/drivers/infiniband/hw/cxgb3/core/genalloc.h
+@@ -0,0 +1,36 @@
++/*
++ * Basic general purpose allocator for managing special purpose memory
++ * not managed by the regular kmalloc/kfree interface.
++ * Uses for this includes on-device special memory, uncached memory
++ * etc.
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2. See the file COPYING for more details.
++ */
++
++
++/*
++ * General purpose special memory pool descriptor.
++ */
++struct iwch_gen_pool {
++ rwlock_t lock;
++ struct list_head chunks; /* list of chunks in this pool */
++ int min_alloc_order; /* minimum allocation order */
++};
++
++/*
++ * General purpose special memory pool chunk descriptor.
++ */
++struct iwch_gen_pool_chunk {
++ spinlock_t lock;
++ struct list_head next_chunk; /* next chunk in pool */
++ unsigned long start_addr; /* starting address of memory chunk */
++ unsigned long end_addr; /* ending address of memory chunk */
++ unsigned long bits[0]; /* bitmap for allocating memory chunk */
++};
++
++extern struct iwch_gen_pool *iwch_gen_pool_create(int, int);
++extern int iwch_gen_pool_add(struct iwch_gen_pool *, unsigned long, size_t, int);
++extern void iwch_gen_pool_destroy(struct iwch_gen_pool *);
++extern unsigned long iwch_gen_pool_alloc(struct iwch_gen_pool *, size_t);
++extern void iwch_gen_pool_free(struct iwch_gen_pool *, unsigned long, size_t);
More information about the general
mailing list