[openib-general] Proposed updates for 2.6.13

Roland Dreier rolandd at cisco.com
Wed Jul 27 11:00:36 PDT 2005


Here is the set of patches I am planning on sending upstream this week
for inclusion in 2.6.13.  In addition to bug fixes, I am including
fasync support for uverbs -- it seems worth having this in 2.6.13, so
that code can rely on it any released kernel.

Please let me know if you think there are other changes we should send
upstream, or if you think some of the changes I'm including here
should be held for 2.6.14.

 - R.


Id: cf3efcf0bebe11027caa231666d2392640c63c32
tree 941fb82a37b295d23e172eb49e55477130a82dc5
parent 6b6a93c6876ea1c530d5d3f68e3678093a27fab0
author Roland Dreier <roland at eddore.topspincom.com> 1122348605 -0700
committer Roland Dreier <roland at eddore.topspincom.com> 1122348605 -0700

Fix handling of error CQ entries on mem-free HCAs: the doorbell count is never
valid so we shouldn't look at it.

Signed-off-by: Roland Dreier <rolandd at cisco.com>


======== diff against 6b6a93c6876ea1c530d5d3f68e3678093a27fab0 ========
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -373,8 +373,12 @@ static int handle_error_cqe(struct mthca
 	 * If we're at the end of the WQE chain, or we've used up our
 	 * doorbell count, free the CQE.  Otherwise just update it for
 	 * the next poll operation.
+	 *
+	 * This does not apply to mem-free HCAs: they don't use the
+	 * doorbell count field, and so we should always free the CQE.
 	 */
-	if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
+	if (mthca_is_memfree(dev) ||
+	    !(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
 		return 0;
 
 	cqe->db_cnt   = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
======== end ========

Id: 53877608748c50917b400d8b2be71782179260d9
tree 668a14bdfabff09e73d3317ca43ddbfc042323f8
parent cf3efcf0bebe11027caa231666d2392640c63c32
author Roland Dreier <roland at eddore.topspincom.com> 1122349011 -0700
committer Roland Dreier <roland at eddore.topspincom.com> 1122349011 -0700

From: Gleb Natapov <glebn at voltaire.com>

Add support for O_ASYNC notifications on uverbs completion and asynchronous
event file descriptors.

Signed-off-by: Gleb Natapov <glebn at voltaire.com>
Signed-off-by: Roland Dreier <rolandd at cisco.com>


======== diff against cf3efcf0bebe11027caa231666d2392640c63c32 ========
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -61,6 +61,7 @@ struct ib_uverbs_event_file {
 	int					fd;
 	int					is_async;
 	wait_queue_head_t			poll_wait;
+	struct fasync_struct		       *async_queue;
 	struct list_head			event_list;
 };
 
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -257,11 +257,19 @@ static void ib_uverbs_event_release(stru
 	spin_unlock_irq(&file->lock);
 }
 
+static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
+{
+	struct ib_uverbs_event_file *file = filp->private_data;
+
+	return fasync_helper(fd, filp, on, &file->async_queue);
+}
+
 static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
 {
 	struct ib_uverbs_event_file *file = filp->private_data;
 
 	ib_uverbs_event_release(file);
+	ib_uverbs_event_fasync(-1, filp, 0);
 	kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
 
 	return 0;
@@ -276,7 +284,8 @@ static struct file_operations uverbs_eve
 	 */
 	.read 	 = ib_uverbs_event_read,
 	.poll    = ib_uverbs_event_poll,
-	.release = ib_uverbs_event_close
+	.release = ib_uverbs_event_close,
+	.fasync  = ib_uverbs_event_fasync
 };
 
 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -296,6 +305,7 @@ void ib_uverbs_comp_handler(struct ib_cq
 	spin_unlock_irqrestore(&file->comp_file[0].lock, flags);
 
 	wake_up_interruptible(&file->comp_file[0].poll_wait);
+	kill_fasync(&file->comp_file[0].async_queue, SIGIO, POLL_IN);
 }
 
 static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
@@ -316,6 +326,7 @@ static void ib_uverbs_async_handler(stru
 	spin_unlock_irqrestore(&file->async_file.lock, flags);
 
 	wake_up_interruptible(&file->async_file.poll_wait);
+	kill_fasync(&file->async_file.async_queue, SIGIO, POLL_IN);
 }
 
 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
@@ -350,6 +361,7 @@ static int ib_uverbs_event_init(struct i
 	INIT_LIST_HEAD(&file->event_list);
 	init_waitqueue_head(&file->poll_wait);
 	file->uverbs_file = uverbs_file;
+	file->async_queue = NULL;
 
 	file->fd = get_unused_fd();
 	if (file->fd < 0)
======== end ========

Id: a42b5922ba01c304e81ecb94996b4f5b5def4c5f
tree 86c8e7b81466309f4d632587f477454754247016
parent 53877608748c50917b400d8b2be71782179260d9
author Roland Dreier <roland at eddore.topspincom.com> 1122349418 -0700
committer Roland Dreier <roland at eddore.topspincom.com> 1122349418 -0700

Fix handling of tx_head/tx_tail comparisons to handle wraparound.

Signed-off-by: Roland Dreier <rolandd at cisco.com>


======== diff against 53877608748c50917b400d8b2be71782179260d9 ========
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -81,7 +81,7 @@ void ipoib_free_ah(struct kref *kref)
 
 	unsigned long flags;
 
-	if (ah->last_send <= priv->tx_tail) {
+	if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
 		ipoib_dbg(priv, "Freeing ah %p\n", ah->ah);
 		ib_destroy_ah(ah->ah);
 		kfree(ah);
@@ -355,7 +355,7 @@ static void __ipoib_reap_ah(struct net_d
 
 	spin_lock_irq(&priv->lock);
 	list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
-		if (ah->last_send <= priv->tx_tail) {
+		if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
 			list_del(&ah->list);
 			list_add_tail(&ah->list, &remove_list);
 		}
@@ -486,7 +486,7 @@ int ipoib_ib_dev_stop(struct net_device 
 			 * assume the HW is wedged and just free up
 			 * all our pending work requests.
 			 */
-			while (priv->tx_tail < priv->tx_head) {
+			while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
 				tx_req = &priv->tx_ring[priv->tx_tail &
 							(IPOIB_TX_RING_SIZE - 1)];
 				dma_unmap_single(priv->ca->dma_device,
======== end ========

Id: 7b0c5a421d9fe71c6d72d1562bb9be8d34680cd5
tree 1f825143f41fd79eee4d246885d631aed840bf0e
parent a42b5922ba01c304e81ecb94996b4f5b5def4c5f
author Roland Dreier <roland at eddore.topspincom.com> 1122349504 -0700
committer Roland Dreier <roland at eddore.topspincom.com> 1122349504 -0700

From: Michael S. Tsirkin <mst at mellanox.co.il>

Use io_remap_pfn_range to remap IO pages (remap_pfn_range is for memory).

Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd at cisco.com>


======== diff against a42b5922ba01c304e81ecb94996b4f5b5def4c5f ========
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -349,9 +349,9 @@ static int mthca_mmap_uar(struct ib_ucon
 
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-	if (remap_pfn_range(vma, vma->vm_start,
-			    to_mucontext(context)->uar.pfn,
-			    PAGE_SIZE, vma->vm_page_prot))
+	if (io_remap_pfn_range(vma, vma->vm_start,
+			       to_mucontext(context)->uar.pfn,
+			       PAGE_SIZE, vma->vm_page_prot))
 		return -EAGAIN;
 
 	return 0;
======== end ========



More information about the general mailing list