[openib-general] [PATCH 1/2] ofed_1_2 Changes to kernel_patches/ for Chelsio T3 Support.

Steve Wise swise at opengridcomputing.com
Mon Jan 8 11:13:48 PST 2007


- rdma core changes needed for T3 Support.
- genalloc backport.
- modified the qp_num -> qp ptr patch to include cxgb3.

Signed-off-by: Steve Wise <swise at opengridcomputing.com>
---

 kernel_patches/fixes/genalloc.patch        |  392 ++++++++++++++++++++++++++++
 kernel_patches/fixes/ib_wc_qpn_to_qp.patch |   13 +
 kernel_patches/fixes/t3_core_changes.patch |  202 ++++++++++++++
 3 files changed, 607 insertions(+), 0 deletions(-)

diff --git a/kernel_patches/fixes/genalloc.patch b/kernel_patches/fixes/genalloc.patch
new file mode 100644
index 0000000..c44a98f
--- /dev/null
+++ b/kernel_patches/fixes/genalloc.patch
@@ -0,0 +1,392 @@
+Backport of the Linux 2.6.20 generic allocator.
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/hw/cxgb3/Kconfig              |    1 
+ drivers/infiniband/hw/cxgb3/Makefile             |    3 
+ drivers/infiniband/hw/cxgb3/core/cxio_hal.h      |    4 
+ drivers/infiniband/hw/cxgb3/core/cxio_resource.c |   20 +-
+ drivers/infiniband/hw/cxgb3/core/cxio_resource.h |    2 
+ drivers/infiniband/hw/cxgb3/core/genalloc.c      |  196 ++++++++++++++++++++++
+ drivers/infiniband/hw/cxgb3/core/genalloc.h      |   36 ++++
+ 7 files changed, 247 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
+index d3db264..0361a72 100644
+--- a/drivers/infiniband/hw/cxgb3/Kconfig
++++ b/drivers/infiniband/hw/cxgb3/Kconfig
+@@ -1,7 +1,6 @@
+ config INFINIBAND_CXGB3
+ 	tristate "Chelsio RDMA Driver"
+ 	depends on CHELSIO_T3 && INFINIBAND
+-	select GENERIC_ALLOCATOR
+ 	---help---
+ 	  This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
+ 	  10GbE adapters.
+diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
+index 7a89f6d..12e7a94 100644
+--- a/drivers/infiniband/hw/cxgb3/Makefile
++++ b/drivers/infiniband/hw/cxgb3/Makefile
+@@ -4,7 +4,8 @@ EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/
+ obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
+ 
+ iw_cxgb3-y :=  iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
+-	       iwch_provider.o iwch.o core/cxio_hal.o core/cxio_resource.o
++	       iwch_provider.o iwch.o core/cxio_hal.o core/cxio_resource.o \
++		core/genalloc.o
+ 
+ ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
+ EXTRA_CFLAGS += -DDEBUG -g 
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_hal.h b/drivers/infiniband/hw/cxgb3/core/cxio_hal.h
+index e5e702d..a9e8452 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_hal.h
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_hal.h
+@@ -104,8 +104,8 @@ struct cxio_rdev {
+ 	u32 qpnr;
+ 	u32 qpmask;
+ 	struct cxio_ucontext uctx;
+-	struct gen_pool *pbl_pool;
+-	struct gen_pool *rqt_pool;
++	struct iwch_gen_pool *pbl_pool;
++	struct iwch_gen_pool *rqt_pool;
+ };
+ 
+ static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_resource.c b/drivers/infiniband/hw/cxgb3/core/cxio_resource.c
+index d1d8722..cecb27b 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_resource.c
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_resource.c
+@@ -265,7 +265,7 @@ #define PBL_CHUNK 2*1024*1024 		
+ 
+ u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
+ {
+-	unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
++	unsigned long addr = iwch_gen_pool_alloc(rdev_p->pbl_pool, size);
+ 	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size);
+ 	return (u32)addr;
+ }
+@@ -273,24 +273,24 @@ u32 cxio_hal_pblpool_alloc(struct cxio_r
+ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
+ {
+ 	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size);
+-	gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
++	iwch_gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
+ }
+ 
+ int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
+ {
+ 	unsigned long i;
+-	rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
++	rdev_p->pbl_pool = iwch_gen_pool_create(MIN_PBL_SHIFT, -1);
+ 	if (rdev_p->pbl_pool)
+ 		for (i = rdev_p->rnic_info.pbl_base;
+ 		     i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1;
+ 		     i += PBL_CHUNK)
+-			gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1);
++			iwch_gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1);
+ 	return rdev_p->pbl_pool ? 0 : -ENOMEM;
+ }
+ 
+ void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
+ {
+-	gen_pool_destroy(rdev_p->pbl_pool);
++	iwch_gen_pool_destroy(rdev_p->pbl_pool);
+ }
+ 
+ /*
+@@ -302,7 +302,7 @@ #define RQT_CHUNK 2*1024*1024 		
+ 
+ u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
+ {
+-	unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
++	unsigned long addr = iwch_gen_pool_alloc(rdev_p->rqt_pool, size << 6);
+ 	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6);
+ 	return (u32)addr;
+ }
+@@ -310,22 +310,22 @@ u32 cxio_hal_rqtpool_alloc(struct cxio_r
+ void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
+ {
+ 	PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6);
+-	gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
++	iwch_gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
+ }
+ 
+ int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
+ {
+ 	unsigned long i;
+-	rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
++	rdev_p->rqt_pool = iwch_gen_pool_create(MIN_RQT_SHIFT, -1);
+ 	if (rdev_p->rqt_pool)
+ 		for (i = rdev_p->rnic_info.rqt_base;
+ 		     i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
+ 		     i += RQT_CHUNK)
+-			gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
++			iwch_gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
+ 	return rdev_p->rqt_pool ? 0 : -ENOMEM;
+ }
+ 
+ void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
+ {
+-	gen_pool_destroy(rdev_p->rqt_pool);
++	iwch_gen_pool_destroy(rdev_p->rqt_pool);
+ }
+diff --git a/drivers/infiniband/hw/cxgb3/core/cxio_resource.h b/drivers/infiniband/hw/cxgb3/core/cxio_resource.h
+index a6bbe83..06a8076 100644
+--- a/drivers/infiniband/hw/cxgb3/core/cxio_resource.h
++++ b/drivers/infiniband/hw/cxgb3/core/cxio_resource.h
+@@ -39,7 +39,7 @@ #include <linux/slab.h>
+ #include <linux/kfifo.h>
+ #include <linux/spinlock.h>
+ #include <linux/errno.h>
+-#include <linux/genalloc.h>
++#include "genalloc.h"
+ #include "cxio_hal.h"
+ 
+ extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
+diff --git a/drivers/infiniband/hw/cxgb3/core/genalloc.c b/drivers/infiniband/hw/cxgb3/core/genalloc.c
+new file mode 100644
+index 0000000..27ba8ec
+--- /dev/null
++++ b/drivers/infiniband/hw/cxgb3/core/genalloc.c
+@@ -0,0 +1,196 @@
++/*
++ * Basic general purpose allocator for managing special purpose memory
++ * not managed by the regular kmalloc/kfree interface.
++ * Uses for this includes on-device special memory, uncached memory
++ * etc.
++ *
++ * Copyright 2005 (C) Jes Sorensen <jes at trained-monkey.org>
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2.  See the file COPYING for more details.
++ */
++
++#include <linux/module.h>
++#include "genalloc.h"
++
++
++/**
++ * iwch_gen_pool_create - create a new special memory pool
++ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
++ * @nid: node id of the node the pool structure should be allocated on, or -1
++ *
++ * Create a new special memory pool that can be used to manage special purpose
++ * memory not managed by the regular kmalloc/kfree interface.
++ */
++struct iwch_gen_pool *iwch_gen_pool_create(int min_alloc_order, int nid)
++{
++	struct iwch_gen_pool *pool;
++
++	pool = kmalloc_node(sizeof(struct iwch_gen_pool), GFP_KERNEL, nid);
++	if (pool != NULL) {
++		rwlock_init(&pool->lock);
++		INIT_LIST_HEAD(&pool->chunks);
++		pool->min_alloc_order = min_alloc_order;
++	}
++	return pool;
++}
++
++/**
++ * iwch_gen_pool_add - add a new chunk of special memory to the pool
++ * @pool: pool to add new memory chunk to
++ * @addr: starting address of memory chunk to add to pool
++ * @size: size in bytes of the memory chunk to add to pool
++ * @nid: node id of the node the chunk structure and bitmap should be
++ *       allocated on, or -1
++ *
++ * Add a new chunk of special memory to the specified pool.
++ */
++int iwch_gen_pool_add(struct iwch_gen_pool *pool, unsigned long addr, 				      size_t size, int nid)
++{
++	struct iwch_gen_pool_chunk *chunk;
++	int nbits = size >> pool->min_alloc_order;
++	int nbytes = sizeof(struct iwch_gen_pool_chunk) +
++				(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
++
++	chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
++	if (unlikely(chunk == NULL))
++		return -1;
++
++	memset(chunk, 0, nbytes);
++	spin_lock_init(&chunk->lock);
++	chunk->start_addr = addr;
++	chunk->end_addr = addr + size;
++
++	write_lock(&pool->lock);
++	list_add(&chunk->next_chunk, &pool->chunks);
++	write_unlock(&pool->lock);
++
++	return 0;
++}
++
++/**
++ * iwch_gen_pool_destroy - destroy a special memory pool
++ * @pool: pool to destroy
++ *
++ * Destroy the specified special memory pool. Verifies that there are no
++ * outstanding allocations.
++ */
++void iwch_gen_pool_destroy(struct iwch_gen_pool *pool)
++{
++	struct list_head *_chunk, *_next_chunk;
++	struct iwch_gen_pool_chunk *chunk;
++	int order = pool->min_alloc_order;
++	int bit, end_bit;
++
++
++	write_lock(&pool->lock);
++	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
++		chunk = list_entry(_chunk, struct iwch_gen_pool_chunk, 
++				   next_chunk);
++		list_del(&chunk->next_chunk);
++
++		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
++		bit = find_next_bit(chunk->bits, end_bit, 0);
++		BUG_ON(bit < end_bit);
++
++		kfree(chunk);
++	}
++	kfree(pool);
++	return;
++}
++
++/**
++ * iwch_gen_pool_alloc - allocate special memory from the pool
++ * @pool: pool to allocate from
++ * @size: number of bytes to allocate from the pool
++ *
++ * Allocate the requested number of bytes from the specified pool.
++ * Uses a first-fit algorithm.
++ */
++unsigned long iwch_gen_pool_alloc(struct iwch_gen_pool *pool, size_t size)
++{
++	struct list_head *_chunk;
++	struct iwch_gen_pool_chunk *chunk;
++	unsigned long addr, flags;
++	int order = pool->min_alloc_order;
++	int nbits, bit, start_bit, end_bit;
++
++	if (size == 0)
++		return 0;
++
++	nbits = (size + (1UL << order) - 1) >> order;
++
++	read_lock(&pool->lock);
++	list_for_each(_chunk, &pool->chunks) {
++		chunk = list_entry(_chunk, struct iwch_gen_pool_chunk, 
++				   next_chunk);
++
++		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
++		end_bit -= nbits + 1;
++
++		spin_lock_irqsave(&chunk->lock, flags);
++		bit = -1;
++		while (bit + 1 < end_bit) {
++			bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
++			if (bit >= end_bit)
++				break;
++
++			start_bit = bit;
++			if (nbits > 1) {
++				bit = find_next_bit(chunk->bits, bit + nbits,
++							bit + 1);
++				if (bit - start_bit < nbits)
++					continue;
++			}
++
++			addr = chunk->start_addr +
++					    ((unsigned long)start_bit << order);
++			while (nbits--)
++				__set_bit(start_bit++, &chunk->bits);
++			spin_unlock_irqrestore(&chunk->lock, flags);
++			read_unlock(&pool->lock);
++			return addr;
++		}
++		spin_unlock_irqrestore(&chunk->lock, flags);
++	}
++	read_unlock(&pool->lock);
++	return 0;
++}
++
++/**
++ * iwch_gen_pool_free - free allocated special memory back to the pool
++ * @pool: pool to free to
++ * @addr: starting address of memory to free back to pool
++ * @size: size in bytes of memory to free
++ *
++ * Free previously allocated special memory back to the specified pool.
++ */
++void iwch_gen_pool_free(struct iwch_gen_pool *pool, unsigned long addr, 
++			size_t size)
++{
++	struct list_head *_chunk;
++	struct iwch_gen_pool_chunk *chunk;
++	unsigned long flags;
++	int order = pool->min_alloc_order;
++	int bit, nbits;
++
++	nbits = (size + (1UL << order) - 1) >> order;
++
++	read_lock(&pool->lock);
++	list_for_each(_chunk, &pool->chunks) {
++		chunk = list_entry(_chunk, struct iwch_gen_pool_chunk, 
++				   next_chunk);
++
++		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
++			BUG_ON(addr + size > chunk->end_addr);
++			spin_lock_irqsave(&chunk->lock, flags);
++			bit = (addr - chunk->start_addr) >> order;
++			while (nbits--)
++				__clear_bit(bit++, &chunk->bits);
++			spin_unlock_irqrestore(&chunk->lock, flags);
++			break;
++		}
++	}
++	BUG_ON(nbits > 0);
++	read_unlock(&pool->lock);
++}
+diff --git a/drivers/infiniband/hw/cxgb3/core/genalloc.h b/drivers/infiniband/hw/cxgb3/core/genalloc.h
+new file mode 100644
+index 0000000..1dc336c
+--- /dev/null
++++ b/drivers/infiniband/hw/cxgb3/core/genalloc.h
+@@ -0,0 +1,36 @@
++/*
++ * Basic general purpose allocator for managing special purpose memory
++ * not managed by the regular kmalloc/kfree interface.
++ * Uses for this includes on-device special memory, uncached memory
++ * etc.
++ *
++ * This source code is licensed under the GNU General Public License,
++ * Version 2.  See the file COPYING for more details.
++ */
++
++
++/*
++ *  General purpose special memory pool descriptor.
++ */
++struct iwch_gen_pool {
++	rwlock_t lock;
++	struct list_head chunks;	/* list of chunks in this pool */
++	int min_alloc_order;		/* minimum allocation order */
++};
++
++/*
++ *  General purpose special memory pool chunk descriptor.
++ */
++struct iwch_gen_pool_chunk {
++	spinlock_t lock;
++	struct list_head next_chunk;	/* next chunk in pool */
++	unsigned long start_addr;	/* starting address of memory chunk */
++	unsigned long end_addr;		/* ending address of memory chunk */
++	unsigned long bits[0];		/* bitmap for allocating memory chunk */
++};
++
++extern struct iwch_gen_pool *iwch_gen_pool_create(int, int);
++extern int iwch_gen_pool_add(struct iwch_gen_pool *, unsigned long, size_t, int);
++extern void iwch_gen_pool_destroy(struct iwch_gen_pool *);
++extern unsigned long iwch_gen_pool_alloc(struct iwch_gen_pool *, size_t);
++extern void iwch_gen_pool_free(struct iwch_gen_pool *, unsigned long, size_t);
diff --git a/kernel_patches/fixes/ib_wc_qpn_to_qp.patch b/kernel_patches/fixes/ib_wc_qpn_to_qp.patch
index 67f9da5..571d579 100644
--- a/kernel_patches/fixes/ib_wc_qpn_to_qp.patch
+++ b/kernel_patches/fixes/ib_wc_qpn_to_qp.patch
@@ -309,3 +309,16 @@ index 0bfa332..54cde37 100644
  	u32			src_qp;
  	int			wc_flags;
  	u16			pkey_index;
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
+index ff09509..122f7b4 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
+@@ -80,7 +80,7 @@ int iwch_poll_cq_one(struct iwch_dev *rh
+ 	ret = 1;
+ 
+ 	wc->wr_id = cookie;
+-	wc->qp_num = qhp->wq.qpid;
++	wc->qp = &qhp->ibqp;
+ 	wc->vendor_err = CQE_STATUS(cqe);
+ 
+ 	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
diff --git a/kernel_patches/fixes/t3_core_changes.patch b/kernel_patches/fixes/t3_core_changes.patch
new file mode 100644
index 0000000..c4631e7
--- /dev/null
+++ b/kernel_patches/fixes/t3_core_changes.patch
@@ -0,0 +1,202 @@
+Linux RDMA Core Changes
+
+From: Steve Wise <swise at opengridcomputing.com>
+
+Support provider-specific data in ib_uverbs_cmd_req_notify_cq().
+The Chelsio iwarp provider library needs to pass information to the
+kernel verb for re-arming the CQ.
+
+Signed-off-by: Steve Wise <swise at opengridcomputing.com>
+---
+
+ drivers/infiniband/core/uverbs_cmd.c      |    9 +++++++--
+ drivers/infiniband/hw/amso1100/c2.h       |    2 +-
+ drivers/infiniband/hw/amso1100/c2_cq.c    |    3 ++-
+ drivers/infiniband/hw/ehca/ehca_iverbs.h  |    3 ++-
+ drivers/infiniband/hw/ehca/ehca_reqs.c    |    3 ++-
+ drivers/infiniband/hw/ipath/ipath_cq.c    |    4 +++-
+ drivers/infiniband/hw/ipath/ipath_verbs.h |    3 ++-
+ drivers/infiniband/hw/mthca/mthca_cq.c    |    6 ++++--
+ drivers/infiniband/hw/mthca/mthca_dev.h   |    4 ++--
+ include/rdma/ib_verbs.h                   |    5 +++--
+ 10 files changed, 28 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index 743247e..5dd1de9 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -959,6 +959,7 @@ ssize_t ib_uverbs_req_notify_cq(struct i
+ 				int out_len)
+ {
+ 	struct ib_uverbs_req_notify_cq cmd;
++	struct ib_udata		      udata;
+ 	struct ib_cq                  *cq;
+ 
+ 	if (copy_from_user(&cmd, buf, sizeof cmd))
+@@ -968,8 +969,12 @@ ssize_t ib_uverbs_req_notify_cq(struct i
+ 	if (!cq)
+ 		return -EINVAL;
+ 
+-	ib_req_notify_cq(cq, cmd.solicited_only ?
+-			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
++	INIT_UDATA(&udata, buf + sizeof cmd, 0,
++		   in_len - sizeof cmd, 0); 
++
++	cq->device->req_notify_cq(cq, cmd.solicited_only ?
++				  IB_CQ_SOLICITED : IB_CQ_NEXT_COMP,
++				  &udata);
+ 
+ 	put_cq_read(cq);
+ 
+diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
+index 04a9db5..9a76869 100644
+--- a/drivers/infiniband/hw/amso1100/c2.h
++++ b/drivers/infiniband/hw/amso1100/c2.h
+@@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2
+ extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
+ extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
+ extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+-extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
++extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify, struct ib_udata *udata);
+ 
+ /* CM */
+ extern int c2_llp_connect(struct iw_cm_id *cm_id,
+diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
+index 05c9154..7ce8bca 100644
+--- a/drivers/infiniband/hw/amso1100/c2_cq.c
++++ b/drivers/infiniband/hw/amso1100/c2_cq.c
+@@ -217,7 +217,8 @@ int c2_poll_cq(struct ib_cq *ibcq, int n
+ 	return npolled;
+ }
+ 
+-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
++int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify,
++	      struct ib_udata *udata)
+ {
+ 	struct c2_mq_shared __iomem *shared;
+ 	struct c2_cq *cq;
+diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
+index 3720e30..566b30c 100644
+--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
++++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
+@@ -135,7 +135,8 @@ int ehca_poll_cq(struct ib_cq *cq, int n
+ 
+ int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
+ 
+-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify);
++int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify,
++		       struct ib_udata *udata);
+ 
+ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
+ 			     struct ib_qp_init_attr *init_attr,
+diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
+index b46bda1..3ed6992 100644
+--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
++++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
+@@ -634,7 +634,8 @@ poll_cq_exit0:
+ 	return ret;
+ }
+ 
+-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
++int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify,
++		       struct ib_udata *udata)
+ {
+ 	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ 
+diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
+index 87462e0..27ba4db 100644
+--- a/drivers/infiniband/hw/ipath/ipath_cq.c
++++ b/drivers/infiniband/hw/ipath/ipath_cq.c
+@@ -307,13 +307,15 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
+  * ipath_req_notify_cq - change the notification type for a completion queue
+  * @ibcq: the completion queue
+  * @notify: the type of notification to request
++ * @udata: user data 
+  *
+  * Returns 0 for success.
+  *
+  * This may be called from interrupt context.  Also called by
+  * ib_req_notify_cq() in the generic verbs code.
+  */
+-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
++int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify,
++			struct ib_udata *udata)
+ {
+ 	struct ipath_cq *cq = to_icq(ibcq);
+ 	unsigned long flags;
+diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
+index c0c8d5b..7db01ae 100644
+--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
++++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
+@@ -716,7 +716,8 @@ struct ib_cq *ipath_create_cq(struct ib_
+ 
+ int ipath_destroy_cq(struct ib_cq *ibcq);
+ 
+-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
++int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify,
++			struct ib_udata *udata);
+ 
+ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+ 
+diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
+index 283d50b..15cbd49 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cq.c
++++ b/drivers/infiniband/hw/mthca/mthca_cq.c
+@@ -722,7 +722,8 @@ repoll:
+ 	return err == 0 || err == -EAGAIN ? npolled : err;
+ }
+ 
+-int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
++int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify, 
++		       struct ib_udata *udata)
+ {
+ 	__be32 doorbell[2];
+ 
+@@ -739,7 +740,8 @@ int mthca_tavor_arm_cq(struct ib_cq *cq,
+ 	return 0;
+ }
+ 
+-int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
++int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify,
++		       struct ib_udata *udata)
+ {
+ 	struct mthca_cq *cq = to_mcq(ibcq);
+ 	__be32 doorbell[2];
+diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
+index fe5cecf..6b9ccf6 100644
+--- a/drivers/infiniband/hw/mthca/mthca_dev.h
++++ b/drivers/infiniband/hw/mthca/mthca_dev.h
+@@ -493,8 +493,8 @@ void mthca_unmap_eq_icm(struct mthca_dev
+ 
+ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
+ 		  struct ib_wc *entry);
+-int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
+-int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
++int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify, struct ib_udata *udata);
++int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify, struct ib_udata *udata);
+ int mthca_init_cq(struct mthca_dev *dev, int nent,
+ 		  struct mthca_ucontext *ctx, u32 pdn,
+ 		  struct mthca_cq *cq);
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 0bfa332..4dc771f 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -986,7 +986,8 @@ struct ib_device {
+ 					      struct ib_wc *wc);
+ 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
+ 	int                        (*req_notify_cq)(struct ib_cq *cq,
+-						    enum ib_cq_notify cq_notify);
++						    enum ib_cq_notify cq_notify,
++						    struct ib_udata *udata);
+ 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
+ 						      int wc_cnt);
+ 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
+@@ -1420,7 +1421,7 @@ int ib_peek_cq(struct ib_cq *cq, int wc_
+ static inline int ib_req_notify_cq(struct ib_cq *cq,
+ 				   enum ib_cq_notify cq_notify)
+ {
+-	return cq->device->req_notify_cq(cq, cq_notify);
++	return cq->device->req_notify_cq(cq, cq_notify, NULL);
+ }
+ 
+ /**




More information about the general mailing list