[openib-general] [PATCH] new kernel side of stale CQ event handling

Roland Dreier rolandd at cisco.com
Tue Sep 6 14:29:26 PDT 2005


This is completely analogous to the async events change we just made.
I did take the opportunity to clean up some of the code by
consolidating struct ib_uverbs_async_event and struct
ib_uverbs_comp_event into a single struct ib_uverbs_event.

 - R.

--- infiniband/include/rdma/ib_user_verbs.h	(revision 3324)
+++ infiniband/include/rdma/ib_user_verbs.h	(working copy)
@@ -297,7 +297,8 @@ struct ib_uverbs_destroy_cq {
 };
 
 struct ib_uverbs_destroy_cq_resp {
-	__u32 events_reported;
+	__u32 comp_events_reported;
+	__u32 async_events_reported;
 };
 
 struct ib_uverbs_create_qp {
--- infiniband/core/uverbs_main.c	(revision 3324)
+++ infiniband/core/uverbs_main.c	(working copy)
@@ -128,7 +128,7 @@ static int ib_dealloc_ucontext(struct ib
 		idr_remove(&ib_uverbs_cq_idr, uobj->id);
 		ib_destroy_cq(cq);
 		list_del(&uobj->list);
-		kfree(container_of(uobj, struct ib_uevent_object, uobject));
+		kfree(container_of(uobj, struct ib_ucq_object, uobject));
 	}
 
 	list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
@@ -182,9 +182,7 @@ static ssize_t ib_uverbs_event_read(stru
 				    size_t count, loff_t *pos)
 {
 	struct ib_uverbs_event_file *file = filp->private_data;
-	struct ib_uverbs_async_event *async_evt = NULL;
-	u32 *counter = NULL;
-	void *event;
+	struct ib_uverbs_event *event;
 	int eventsz;
 	int ret = 0;
 
@@ -209,28 +207,22 @@ static ssize_t ib_uverbs_event_read(stru
 		return -ENODEV;
 	}
 
-	if (file->is_async) {
-		async_evt = list_entry(file->event_list.next,
-				       struct ib_uverbs_async_event, list);
-		event     = async_evt;
-		eventsz   = sizeof *async_evt;
-		counter   = async_evt->counter;
+	event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
 
-		if (counter)
-			++*counter;
-	} else {
-		event   = list_entry(file->event_list.next,
-				     struct ib_uverbs_comp_event, list);
+	if (file->is_async)
+		eventsz = sizeof (struct ib_uverbs_async_event_desc);
+	else
 		eventsz = sizeof (struct ib_uverbs_comp_event_desc);
-	}
 
 	if (eventsz > count) {
 		ret   = -EINVAL;
 		event = NULL;
 	} else {
 		list_del(file->event_list.next);
-		if (counter)
-			list_del(&async_evt->obj_list);
+		if (event->counter) {
+			++(*event->counter);
+			list_del(&event->obj_list);
+		}
 	}
 
 	spin_unlock_irq(&file->lock);
@@ -267,16 +259,13 @@ static unsigned int ib_uverbs_event_poll
 
 static void ib_uverbs_event_release(struct ib_uverbs_event_file *file)
 {
-	struct list_head *entry, *tmp;
+	struct ib_uverbs_event *entry, *tmp;
 
 	spin_lock_irq(&file->lock);
 	if (file->fd != -1) {
 		file->fd = -1;
-		list_for_each_safe(entry, tmp, &file->event_list)
-			if (file->is_async)
-				kfree(list_entry(entry, struct ib_uverbs_async_event, list));
-			else
-				kfree(list_entry(entry, struct ib_uverbs_comp_event, list));
+		list_for_each_entry_safe(entry, tmp, &file->event_list, list)
+			kfree(entry);
 	}
 	spin_unlock_irq(&file->lock);
 }
@@ -314,18 +303,23 @@ static struct file_operations uverbs_eve
 
 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
 {
-	struct ib_uverbs_file       *file = cq_context;
-	struct ib_uverbs_comp_event *entry;
-	unsigned long                flags;
+	struct ib_uverbs_file  *file = cq_context;
+	struct ib_ucq_object *uobj;
+	struct ib_uverbs_event *entry;
+	unsigned long           flags;
 
 	entry = kmalloc(sizeof *entry, GFP_ATOMIC);
 	if (!entry)
 		return;
 
-	entry->desc.cq_handle = cq->uobject->user_handle;
+	uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
+
+	entry->desc.comp.cq_handle = cq->uobject->user_handle;
+	entry->counter		   = &uobj->comp_events_reported;
 
 	spin_lock_irqsave(&file->comp_file[0].lock, flags);
 	list_add_tail(&entry->list, &file->comp_file[0].event_list);
+	list_add_tail(&entry->obj_list, &uobj->comp_list);
 	spin_unlock_irqrestore(&file->comp_file[0].lock, flags);
 
 	wake_up_interruptible(&file->comp_file[0].poll_wait);
@@ -337,16 +331,16 @@ static void ib_uverbs_async_handler(stru
 				    struct list_head *obj_list,
 				    u32 *counter)
 {
-	struct ib_uverbs_async_event *entry;
+	struct ib_uverbs_event *entry;
 	unsigned long flags;
 
 	entry = kmalloc(sizeof *entry, GFP_ATOMIC);
 	if (!entry)
 		return;
 
-	entry->desc.element    = element;
-	entry->desc.event_type = event;
-	entry->counter         = counter;
+	entry->desc.async.element    = element;
+	entry->desc.async.event_type = event;
+	entry->counter               = counter;
 
 	spin_lock_irqsave(&file->async_file.lock, flags);
 	list_add_tail(&entry->list, &file->async_file.event_list);
@@ -360,14 +354,14 @@ static void ib_uverbs_async_handler(stru
 
 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
 {
-	struct ib_uevent_object *uobj;
+	struct ib_ucq_object *uobj;
 
 	uobj = container_of(event->element.cq->uobject,
-			    struct ib_uevent_object, uobject);
+			    struct ib_ucq_object, uobject);
 
 	ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
-				event->event, &uobj->event_list,
-				&uobj->events_reported);
+				event->event, &uobj->async_list,
+				&uobj->async_events_reported);
 				
 }
 
--- infiniband/core/uverbs.h	(revision 3324)
+++ infiniband/core/uverbs.h	(working copy)
@@ -76,24 +76,30 @@ struct ib_uverbs_file {
 	struct ib_uverbs_event_file	        comp_file[1];
 };
 
-struct ib_uverbs_async_event {
-	struct ib_uverbs_async_event_desc	desc;
+struct ib_uverbs_event {
+	union {
+		struct ib_uverbs_async_event_desc	async;
+		struct ib_uverbs_comp_event_desc	comp;
+	}					desc;
 	struct list_head			list;
 	struct list_head			obj_list;
 	u32				       *counter;
 };
 
-struct ib_uverbs_comp_event {
-	struct ib_uverbs_comp_event_desc	desc;
-	struct list_head			list;
-};
-
 struct ib_uevent_object {
 	struct ib_uobject	uobject;
 	struct list_head	event_list;
 	u32			events_reported;
 };
 
+struct ib_ucq_object {
+	struct ib_uobject	uobject;
+	struct list_head	comp_list;
+	struct list_head	async_list;
+	u32			comp_events_reported;
+	u32			async_events_reported;
+};
+
 extern struct semaphore ib_uverbs_idr_mutex;
 extern struct idr ib_uverbs_pd_idr;
 extern struct idr ib_uverbs_mr_idr;
--- infiniband/core/uverbs_cmd.c	(revision 3324)
+++ infiniband/core/uverbs_cmd.c	(working copy)
@@ -590,7 +590,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uv
 	struct ib_uverbs_create_cq      cmd;
 	struct ib_uverbs_create_cq_resp resp;
 	struct ib_udata                 udata;
-	struct ib_uevent_object        *uobj;
+	struct ib_ucq_object           *uobj;
 	struct ib_cq                   *cq;
 	int                             ret;
 
@@ -611,10 +611,12 @@ ssize_t ib_uverbs_create_cq(struct ib_uv
 	if (!uobj)
 		return -ENOMEM;
 
-	uobj->uobject.user_handle = cmd.user_handle;
-	uobj->uobject.context     = file->ucontext;
-	uobj->events_reported     = 0;
-	INIT_LIST_HEAD(&uobj->event_list);
+	uobj->uobject.user_handle   = cmd.user_handle;
+	uobj->uobject.context       = file->ucontext;
+	uobj->comp_events_reported  = 0;
+	uobj->async_events_reported = 0;
+	INIT_LIST_HEAD(&uobj->comp_list);
+	INIT_LIST_HEAD(&uobj->async_list);
 
 	cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
 					     file->ucontext, &udata);
@@ -685,8 +687,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_u
 	struct ib_uverbs_destroy_cq      cmd;
 	struct ib_uverbs_destroy_cq_resp resp;
 	struct ib_cq               	*cq;
-	struct ib_uevent_object        	*uobj;
-	struct ib_uverbs_async_event	*evt, *tmp;
+	struct ib_ucq_object        	*uobj;
+	struct ib_uverbs_event		*evt, *tmp;
+	u64				 user_handle;
 	int                        	 ret = -EINVAL;
 
 	if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -700,7 +703,8 @@ ssize_t ib_uverbs_destroy_cq(struct ib_u
 	if (!cq || cq->uobject->context != file->ucontext)
 		goto out;
 
-	uobj = container_of(cq->uobject, struct ib_uevent_object, uobject);
+	user_handle = cq->uobject->user_handle;
+	uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
 
 	ret = ib_destroy_cq(cq);
 	if (ret)
@@ -712,14 +716,22 @@ ssize_t ib_uverbs_destroy_cq(struct ib_u
 	list_del(&uobj->uobject.list);
 	spin_unlock_irq(&file->ucontext->lock);
 
+	spin_lock_irq(&file->comp_file[0].lock);
+	list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
+		list_del(&evt->list);
+		kfree(evt);
+	}
+	spin_unlock_irq(&file->comp_file[0].lock);
+
 	spin_lock_irq(&file->async_file.lock);
-	list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
+	list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
 		list_del(&evt->list);
 		kfree(evt);
 	}
 	spin_unlock_irq(&file->async_file.lock);
 
-	resp.events_reported = uobj->events_reported;
+	resp.comp_events_reported  = uobj->comp_events_reported;
+	resp.async_events_reported = uobj->async_events_reported;
 
 	kfree(uobj);
 
@@ -955,7 +967,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_u
 	struct ib_uverbs_destroy_qp_resp resp;
 	struct ib_qp               	*qp;
 	struct ib_uevent_object        	*uobj;
-	struct ib_uverbs_async_event	*evt, *tmp;
+	struct ib_uverbs_event		*evt, *tmp;
 	int                        	 ret = -EINVAL;
 
 	if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1193,7 +1205,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_
 	struct ib_uverbs_destroy_srq_resp resp;
 	struct ib_srq               	 *srq;
 	struct ib_uevent_object        	 *uobj;
-	struct ib_uverbs_async_event	*evt, *tmp;
+	struct ib_uverbs_event		 *evt, *tmp;
 	int                         	  ret = -EINVAL;
 
 	if (copy_from_user(&cmd, buf, sizeof cmd))



More information about the general mailing list