[openib-general] Re: [RFC] [PATCH 1/2] multicast support for multiple users

Hal Rosenstock halr at voltaire.com
Thu Apr 6 13:38:14 PDT 2006


On Thu, 2006-04-06 at 14:13, Sean Hefty wrote:
> Add kernel support that tracks joining and leaving multicast groups.
> The SA tracks join/leave operations on a per port basis.  In order
> to support multiple users of the same multicast group, we need to
> track join / leave requests locally.

On initial read, this looks pretty good. Aside from the switch port 0
comment/issue, I have 2 comments/questions.

1. My main initial comment is that I think that cmp_rec needs to be more
complicated that the matching which is there. The selectors include
things like greater than, less than, and largest available in addition
to equal to which is what is supported there now. I'm not sure whether
any of this is used right now so may not be an issue for IPoIB.

2. The other comment is I didn't yet follow how multiple joins of
different JoinStates are handled. I can see there are different slots in
the groups but I didn't see whether all the joins go out on the wire
(one per JoinState) or whether there is some "promotion"/"demotion" of
these.
 
I will also look at this some more because I need at least a second read
as I didn't follow things sufficiently yet.

-- Hal

> Signed-off-by: Sean Hefty <sean.hefty at intel.com>
> 
> ---
> 
> This patch depends on the sa query patch that adds retries to that API.
> I spent considerable time (and a couple of rewrites of the code) trying to
> ensure that all race conditions were handled, and in a way that was as
> simple as possible.  Some additional review of the code that looked for
> race conditions would be appreciated.
> 
> Also note that this code has the bug that Michael pointed out:
> http://openib.org/pipermail/openib-general/2006-April/019643.html
> I believe that this bug should be fixed, once we agree on a solution, but
> I don't think that the bug is likely enough to occur that it should delay
> the check-in.
> 
> 
> Index: include/rdma/ib_multicast.h
> ===================================================================
> --- include/rdma/ib_multicast.h	(revision 0)
> +++ include/rdma/ib_multicast.h	(revision 0)
> @@ -0,0 +1,85 @@
> +/*
> + * Copyright (c) 2006 Intel Corporation.  All rights reserved.
> + *
> + * This software is available to you under a choice of one of two
> + * licenses.  You may choose to be licensed under the terms of the GNU
> + * General Public License (GPL) Version 2, available from the file
> + * COPYING in the main directory of this source tree, or the
> + * OpenIB.org BSD license below:
> + *
> + *     Redistribution and use in source and binary forms, with or
> + *     without modification, are permitted provided that the following
> + *     conditions are met:
> + *
> + *      - Redistributions of source code must retain the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer.
> + *
> + *      - Redistributions in binary form must reproduce the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer in the documentation and/or other materials
> + *        provided with the distribution.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
> + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
> + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> + * SOFTWARE.
> + */
> +
> +#ifndef IB_MULTICAST_H
> +#define IB_MULTICAST_H
> +
> +#include <rdma/ib_sa.h>
> +
> +struct ib_multicast {
> +	struct ib_sa_mcmember_rec rec;
> +	ib_sa_comp_mask		comp_mask;
> +	int			(*callback)(int status,
> +					    struct ib_multicast *multicast);
> +	void			*context;
> +};
> +
> +/**
> + * ib_join_multicast - Initiates a join request to the specified multicast
> + *   group.
> + * @device: Device associated with the multicast group.
> + * @port_num: Port on the specified device to associate with the multicast
> + *   group.
> + * @rec: SA multicast member record specifying group attributes.
> + * @comp_mask: Component mask indicating which group attributes of %rec are
> + *   valid.
> + * @gfp_mask: GFP mask for memory allocations.
> + * @callback: User callback invoked once the join operation completes.
> + * @context: User specified context stored with the ib_multicast structure.
> + *
> + * This call initiates a multicast join request with the SA for the specified
> + * multicast group.  If the join operation is started successfully, it returns
> + * an ib_multicast structure that is used to track the multicast operation.
> + * Users must free this structure by calling ib_free_multicast, even if the
> + * join operation later fails.  (The callback status is non-zero.)
> + */
> +struct ib_multicast *ib_join_multicast(struct ib_device *device, u8 port_num,
> +				       struct ib_sa_mcmember_rec *rec,
> +				       ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
> +				       int (*callback)(int status,
> +						       struct ib_multicast
> +							      *multicast),
> +				       void *context);
> +
> +/**
> + * ib_free_multicast - Frees the multicast tracking structure, and releases
> + *    any reference on the multicast group.
> + * @multicast: Multicast tracking structure allocated by ib_join_multicast.
> + *
> + * This call blocks until the connection identifier is destroyed.  It may
> + * not be called from within the multicast callback; however, returning a non-
> + * zero value from the callback will result in destroying the multicast
> + * tracking structure.
> + */
> +void ib_free_multicast(struct ib_multicast *multicast);
> +
> +#endif /* IB_MULTICAST_H */
> Index: core/multicast.c
> ===================================================================
> --- core/multicast.c	(revision 0)
> +++ core/multicast.c	(revision 0)
> @@ -0,0 +1,659 @@
> +/*
> + * Copyright (c) 2006 Intel Corporation.  All rights reserved.
> + *
> + * This software is available to you under a choice of one of two
> + * licenses.  You may choose to be licensed under the terms of the GNU
> + * General Public License (GPL) Version 2, available from the file
> + * COPYING in the main directory of this source tree, or the
> + * OpenIB.org BSD license below:
> + *
> + *     Redistribution and use in source and binary forms, with or
> + *     without modification, are permitted provided that the following
> + *     conditions are met:
> + *
> + *      - Redistributions of source code must retain the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer.
> + *
> + *      - Redistributions in binary form must reproduce the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer in the documentation and/or other materials
> + *        provided with the distribution.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
> + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
> + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> + * SOFTWARE.
> + */
> +
> +#include <linux/dma-mapping.h>
> +#include <linux/err.h>
> +#include <linux/interrupt.h>
> +#include <linux/index.h>
> +#include <linux/pci.h>
> +#include <linux/bitops.h>
> +
> +#include <rdma/ib_multicast.h>
> +
> +MODULE_AUTHOR("Sean Hefty");
> +MODULE_DESCRIPTION("InfiniBand multicast membership handling");
> +MODULE_LICENSE("Dual BSD/GPL");
> +
> +static int retry_timer = 5000; /* 5 sec */
> +module_param(retry_timer, int, 0444);
> +MODULE_PARM_DESC(retry_timer, "Time in ms between retried requests.");
> +
> +static int retries = 3;
> +module_param(retries, int, 0444);
> +MODULE_PARM_DESC(retries, "Number of times to retry a request.");
> +
> +static void mcast_add_one(struct ib_device *device);
> +static void mcast_remove_one(struct ib_device *device);
> +
> +static struct ib_client mcast_client = {
> +	.name   = "ib_multicast",
> +	.add    = mcast_add_one,
> +	.remove = mcast_remove_one
> +};
> +
> +static struct workqueue_struct	*mcast_wq;
> +
> +struct mcast_device;
> +
> +struct mcast_port {
> +	struct mcast_device	*dev;
> +	spinlock_t		lock;
> +	struct rb_root		table;
> +	atomic_t		refcount;
> +	wait_queue_head_t	wait;
> +	u8			port_num;
> +};
> +
> +struct mcast_device {
> +	struct ib_device	*device;
> +	struct mcast_port	port[0];
> +};
> +
> +enum mcast_state {
> +	MCAST_IDLE,
> +	MCAST_JOINING,
> +	MCAST_MEMBER,
> +	MCAST_BUSY,
> +};
> +
> +struct mcast_member;
> +
> +struct mcast_group {
> +	struct ib_sa_mcmember_rec rec;
> +	struct rb_node		node;
> +	struct mcast_port	*port;
> +	spinlock_t		lock;
> +	struct work_struct	work;
> +	struct list_head	pending_list;
> +	struct mcast_member	*last_join;
> +	int			members[3];
> +	atomic_t		refcount;
> +	enum mcast_state	state;
> +	struct ib_sa_query	*query;
> +	int			query_id;
> +};
> +
> +struct mcast_member {
> +	struct ib_multicast	multicast;
> +	struct mcast_group	*group;
> +	struct list_head	list;
> +	enum mcast_state	state;
> +	atomic_t		refcount;
> +	wait_queue_head_t	wait;
> +};
> +
> +static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
> +			 void *context);
> +static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
> +			  void *context);
> +
> +static struct mcast_group *mcast_find(struct mcast_port *port,
> +				      union ib_gid *mgid)
> +{
> +	struct rb_node *node = port->table.rb_node;
> +	struct mcast_group *group;
> +	int ret;
> +
> +	while (node) {
> +		group = rb_entry(node, struct mcast_group, node);
> +		ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
> +		if (!ret)
> +			return group;
> +
> +		if (ret < 0)
> +			node = node->rb_left;
> +		else
> +			node = node->rb_right;
> +	}
> +	return NULL;
> +}
> +
> +static struct mcast_group *mcast_insert(struct mcast_port *port,
> +					struct mcast_group *group)
> +{
> +	struct rb_node **link = &port->table.rb_node;
> +	struct rb_node *parent = NULL;
> +	struct mcast_group *cur_group;
> +	int ret;
> +
> +	while (*link) {
> +		parent = *link;
> +		cur_group = rb_entry(parent, struct mcast_group, node);
> +
> +		ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
> +			     sizeof group->rec.mgid);
> +		if (ret < 0)
> +			link = &(*link)->rb_left;
> +		else if (ret > 0)
> +			link = &(*link)->rb_right;
> +		else
> +			return cur_group;
> +	}
> +	rb_link_node(&group->node, parent, link);
> +	rb_insert_color(&group->node, &port->table);
> +	return NULL;
> +}
> +
> +static void deref_port(struct mcast_port *port)
> +{
> +	if (atomic_dec_and_test(&port->refcount))
> +		wake_up(&port->wait);
> +}
> +
> +static void release_group(struct mcast_group *group)
> +{
> +	struct mcast_port *port = group->port;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&port->lock, flags);
> +	if (atomic_dec_and_test(&group->refcount)) {
> +		rb_erase(&group->node, &port->table);
> +		spin_unlock_irqrestore(&port->lock, flags);
> +		kfree(group);
> +		deref_port(port);
> +	} else
> +		spin_unlock_irqrestore(&port->lock, flags);
> +}
> +
> +static void deref_member(struct mcast_member *member)
> +{
> +	if (atomic_dec_and_test(&member->refcount))
> +		wake_up(&member->wait);
> +}
> +
> +static void queue_join(struct mcast_member *member)
> +{
> +	struct mcast_group *group = member->group;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&group->lock, flags);
> +	list_add(&member->list, &group->pending_list);
> +	if (group->state == MCAST_IDLE) {
> +		group->state = MCAST_BUSY;
> +		spin_unlock_irqrestore(&group->lock, flags);
> +		atomic_inc(&group->refcount);
> +		queue_work(mcast_wq, &group->work);
> +	} else
> +		spin_unlock_irqrestore(&group->lock, flags);
> +}
> +
> +static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
> +{
> +	int i;
> +
> +	for (i = 0; i < 3; i++, join_state >>= 1)
> +		if (join_state & 0x1)
> +			group->members[i] += inc;
> +}
> +
> +static u8 get_leave_state(struct mcast_group *group)
> +{
> +	u8 leave_state = 0;
> +	int i;
> +
> +	for (i = 0; i < 3; i++)
> +		if (!group->members[i])
> +			leave_state |= (0x1 << i);
> +
> +	return leave_state & group->rec.join_state;
> +}
> +
> +static int cmp_rec(struct ib_sa_mcmember_rec *src,
> +		   struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
> +{
> +	/* MGID must already match */
> +
> +	if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
> +	    memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_MTU_SELECTOR &&
> +	    src->mtu_selector != dst->mtu_selector)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_MTU && src->mtu != dst->mtu)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
> +	    src->traffic_class != dst->traffic_class)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_RATE_SELECTOR &&
> +	    src->rate_selector != dst->rate_selector)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_RATE && src->rate != dst->rate)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR &&
> +	    src->packet_life_time_selector != dst->packet_life_time_selector)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME &&
> +	    src->packet_life_time != dst->packet_life_time)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
> +	    src->flow_label != dst->flow_label)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
> +	    src->hop_limit != dst->hop_limit)
> +		return -EINVAL;
> +	if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
> +		return -EINVAL;
> +
> +	/* join_state checked separately, proxy_join ignored */
> +
> +	return 0;
> +}
> +
> +static int send_join(struct mcast_group *group, struct mcast_member *member)
> +{
> +	struct mcast_port *port = group->port;
> +	int ret;
> +
> +	ret = ib_sa_mcmember_rec_set(port->dev->device, port->port_num,
> +				     &member->multicast.rec,
> +				     member->multicast.comp_mask,
> +				     retry_timer, retries, GFP_KERNEL,
> +				     join_handler, group, &group->query);
> +	if (ret > 0) {
> +		group->query_id = ret;
> +		ret = 0;
> +	}
> +	return ret;
> +}
> +
> +static int send_leave(struct mcast_group *group, u8 leave_state)
> +{
> +	struct mcast_port *port = group->port;
> +	struct ib_sa_mcmember_rec rec;
> +	int ret;
> +
> +	rec = group->rec;
> +	rec.join_state = leave_state;
> +
> +	ret = ib_sa_mcmember_rec_delete(port->dev->device, port->port_num, &rec,
> +					IB_SA_MCMEMBER_REC_MGID     |
> +					IB_SA_MCMEMBER_REC_PORT_GID |
> +					IB_SA_MCMEMBER_REC_JOIN_STATE,
> +					retry_timer, retries, GFP_KERNEL,
> +					leave_handler, group, &group->query);
> +	if (ret > 0) {
> +		group->query_id = ret;
> +		ret = 0;
> +	}
> +	return ret;
> +}
> +
> +static void join_group(struct mcast_group *group, struct mcast_member *member,
> +		       u8 join_state)
> +{
> +	adjust_membership(group, join_state, 1);
> +	group->rec.join_state |= join_state;
> +	member->multicast.rec = group->rec;
> +	member->multicast.rec.join_state = join_state;
> +}
> +
> +static int fail_join(struct mcast_group *group, struct mcast_member *member,
> +		     int status)
> +{
> +	spin_lock_irq(&group->lock);
> +	list_del_init(&member->list);
> +	spin_unlock_irq(&group->lock);
> +	return member->multicast.callback(status, &member->multicast);
> +}
> +
> +static void mcast_work_handler(void *data)
> +{
> +	struct mcast_group *group = data;
> +	struct mcast_member *member;
> +	struct ib_multicast *multicast;
> +	int status, ret;
> +	u8 join_state;
> +
> +retest:
> +	spin_lock_irq(&group->lock);
> +	while (!list_empty(&group->pending_list)) {
> +		member = list_entry(group->pending_list.next,
> +				    struct mcast_member, list);
> +		multicast = &member->multicast;
> +		join_state = multicast->rec.join_state;
> +		atomic_inc(&member->refcount);
> +
> +		if (join_state == (group->rec.join_state & join_state)) {
> +			status = cmp_rec(&group->rec, &multicast->rec,
> +					 multicast->comp_mask);
> +			if (!status)
> +				join_group(group, member, join_state);
> +
> +			list_del_init(&member->list);
> +			spin_unlock_irq(&group->lock);
> +			ret = multicast->callback(status, multicast);
> +		} else {
> +			spin_unlock_irq(&group->lock);
> +			status = send_join(group, member);
> +			if (!status) {
> +				deref_member(member);
> +				return;
> +			}
> +			ret = fail_join(group, member, status);
> +		}
> +
> +		deref_member(member);
> +		if (ret)
> +			ib_free_multicast(&member->multicast);
> +		spin_lock_irq(&group->lock);
> +	}
> +
> +	join_state = get_leave_state(group);
> +	if (join_state) {
> +		group->rec.join_state &= ~join_state;
> +		spin_unlock_irq(&group->lock);
> +		if (send_leave(group, join_state))
> +			goto retest;
> +	} else {
> +		group->state = MCAST_IDLE;
> +		spin_unlock_irq(&group->lock);
> +		release_group(group);
> +	}
> +}
> +
> +/*
> + * Fail a join request if it is still active - at the head of the pending queue.
> + */
> +static void process_join_error(struct mcast_group *group, int status)
> +{
> +	struct mcast_member *member;
> +	int ret;
> +
> +	spin_lock_irq(&group->lock);
> +	member = list_entry(group->pending_list.next,
> +			    struct mcast_member, list);
> +	if (group->last_join == member) {
> +		atomic_inc(&member->refcount);
> +		list_del_init(&member->list);
> +		spin_unlock_irq(&group->lock);
> +		ret = member->multicast.callback(status, &member->multicast);
> +		deref_member(member);
> +		if (ret)
> +			ib_free_multicast(&member->multicast);
> +	} else
> +		spin_unlock_irq(&group->lock);
> +}
> +
> +static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
> +			 void *context)
> +{
> +	struct mcast_group *group = context;
> +
> +	if (status)
> +		process_join_error(group, status);
> +	else {
> +		spin_lock_irq(&group->lock);
> +		group->rec = *rec;
> +		spin_unlock_irq(&group->lock);
> +	}
> +	mcast_work_handler(group);
> +}
> +
> +static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
> +			  void *context)
> +{
> +	mcast_work_handler(context);
> +}
> +
> +static struct mcast_group *acquire_group(struct mcast_port *port,
> +					 union ib_gid *mgid, gfp_t gfp_mask)
> +{
> +	struct mcast_group *group, *cur_group;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&port->lock, flags);
> +	group = mcast_find(port, mgid);
> +	if (group)
> +		goto found;
> +	spin_unlock_irqrestore(&port->lock, flags);
> +
> +	group = kzalloc(sizeof *group, gfp_mask);
> +	if (!group)
> +		return NULL;
> +
> +	group->port = port;
> +	group->rec.mgid = *mgid;
> +	INIT_LIST_HEAD(&group->pending_list);
> +	INIT_WORK(&group->work, mcast_work_handler, group);
> +	spin_lock_init(&group->lock);
> +
> +	spin_lock_irqsave(&port->lock, flags);
> +	cur_group = mcast_insert(port, group);
> +	if (cur_group) {
> +		kfree(group);
> +		group = cur_group;
> +	} else
> +		atomic_inc(&port->refcount);
> +found:
> +	atomic_inc(&group->refcount);
> +	spin_unlock_irqrestore(&port->lock, flags);
> +	return group;
> +}
> +
> +/*
> + * We serialize all join requests to a single group to make our lives much
> + * easier.  Otherwise, two users could try to join the same group
> + * simultaneously, with different configurations, one could leave while the
> + * join is in progress, etc., which makes locking around error recovery
> + * difficult.
> + */
> +struct ib_multicast *ib_join_multicast(struct ib_device *device, u8 port_num,
> +				       struct ib_sa_mcmember_rec *rec,
> +				       ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
> +				       int (*callback)(int status,
> +						       struct ib_multicast
> +							      *multicast),
> +				       void *context)
> +{
> +	struct mcast_device *dev;
> +	struct mcast_member *member;
> +	struct ib_multicast *multicast;
> +	int ret;
> +
> +	dev = ib_get_client_data(device, &mcast_client);
> +	if (!dev)
> +		return ERR_PTR(-ENODEV);
> +
> +	member = kzalloc(sizeof *member, gfp_mask);
> +	if (!member)
> +		return ERR_PTR(-ENOMEM);
> +
> +	member->multicast.rec = *rec;
> +	member->multicast.comp_mask = comp_mask;
> +	member->multicast.callback = callback;
> +	member->multicast.context = context;
> +	init_waitqueue_head(&member->wait);
> +	atomic_set(&member->refcount, 1);
> +	member->state = MCAST_JOINING;
> +
> +	member->group = acquire_group(&dev->port[port_num - 1],
> +				      &rec->mgid, gfp_mask);
> +	if (!member->group) {
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	/*
> +	 * The user will get the multicast structure in their callback.  They
> +	 * could then free the multicast structure before we can return from
> +	 * this routine.  So we save the pointer to return before queuing
> +	 * any callback.
> +	 */
> +	multicast = &member->multicast;
> +	queue_join(member);
> +	return multicast;
> +
> +err:
> +	kfree(member);
> +	return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL(ib_join_multicast);
> +
> +void ib_free_multicast(struct ib_multicast *multicast)
> +{
> +	struct mcast_member *member;
> +	struct mcast_group *group;
> +
> +	member = container_of(multicast, struct mcast_member, multicast);
> +	group = member->group;
> +
> +	spin_lock_irq(&group->lock);
> +	switch (member->state) {
> +	case MCAST_MEMBER:
> +		adjust_membership(group, multicast->rec.join_state, -1);
> +		break;
> +	case MCAST_JOINING:
> +		list_del_init(&member->list);
> +		break;
> +	default:
> +		break;
> +	}
> +
> +	if (group->state == MCAST_IDLE) {
> +		group->state = MCAST_BUSY;
> +		spin_unlock_irq(&group->lock);
> +		/* Continue to hold reference on group until callback */
> +		queue_work(mcast_wq, &group->work);
> +	} else {
> +		spin_unlock_irq(&group->lock);
> +		release_group(group);
> +	}
> +
> +	atomic_dec(&member->refcount);
> +	wait_event(member->wait, !atomic_read(&member->refcount));
> +	kfree(member);
> +}
> +EXPORT_SYMBOL(ib_free_multicast);
> +
> +static void mcast_add_one(struct ib_device *device)
> +{
> +	struct mcast_device *dev;
> +	struct mcast_port *port;
> +	int i;
> +
> +	if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
> +		return;
> +
> +	dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
> +		      GFP_KERNEL);
> +	if (!dev)
> +		return;
> +
> +	for (i = 1; i <= device->phys_port_cnt; i++) {
> +		port = &dev->port[i - 1];
> +		port->dev = dev;
> +		port->port_num = i;
> +		spin_lock_init(&port->lock);
> +		port->table = RB_ROOT;
> +		init_waitqueue_head(&port->wait);
> +		atomic_set(&port->refcount, 1);
> +	}
> +
> +	dev->device = device;
> +	ib_set_client_data(device, &mcast_client, dev);
> +}
> +
> +/*
> + * Mark any existing groups as no longer having any members.  This will force
> + * cleanup of the groups when all outstanding leave requests complete.
> + */
> +static void leave_groups(struct mcast_port *port)
> +{
> +	struct mcast_group *group;
> +	struct rb_node *node;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&port->lock, flags);
> +	for (node = rb_first(&port->table); node; node = rb_next(node)) {
> +		group = rb_entry(node, struct mcast_group, node);
> +		group->rec.join_state = 0;
> +		ib_sa_cancel_query(group->query_id, group->query);
> +	}
> +	spin_unlock_irqrestore(&port->lock, flags);
> +}
> +
> +static void mcast_remove_one(struct ib_device *device)
> +{
> +	struct mcast_device *dev;
> +	struct mcast_port *port;
> +	int i;
> +
> +	dev = ib_get_client_data(device, &mcast_client);
> +	if (!dev)
> +		return;
> +
> +	flush_workqueue(mcast_wq);
> +
> +	for (i = 0; i < device->phys_port_cnt; i++) {
> +		port = &dev->port[i];
> +		leave_groups(port);
> +		atomic_dec(&port->refcount);
> +		wait_event(port->wait, !atomic_read(&port->refcount));
> +	}
> +
> +	kfree(dev);
> +}
> +
> +static int __init mcast_init(void)
> +{
> +	int ret;
> +
> +	mcast_wq = create_singlethread_workqueue("ib_mcast_wq");
> +	if (!mcast_wq)
> +		return -ENOMEM;
> +
> +	ret = ib_register_client(&mcast_client);
> +	if (ret)
> +		goto err;
> +	return 0;
> +
> +err:
> +	destroy_workqueue(mcast_wq);
> +	return ret;
> +}
> +
> +static void __exit mcast_cleanup(void)
> +{
> +	ib_unregister_client(&mcast_client);
> +	destroy_workqueue(mcast_wq);
> +}
> +
> +module_init(mcast_init);
> +module_exit(mcast_cleanup);
> Index: core/Makefile
> ===================================================================
> --- core/Makefile	(revision 6230)
> +++ core/Makefile	(working copy)
> @@ -5,7 +5,7 @@ user_access-$(CONFIG_INFINIBAND_ADDR_TRA
>  
>  obj-$(CONFIG_INFINIBAND) +=		ib_core.o ib_mad.o ib_ping.o ib_cm.o \
>  					ib_sa.o ib_at.o $(infiniband-y) \
> -					findex.o
> +					findex.o ib_multicast.o
>  obj-$(CONFIG_INFINIBAND_USER_MAD) += 	ib_umad.o
>  obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o ib_uat.o $(user_access-y)
>  
> @@ -30,6 +30,8 @@ ib_sa-y :=			sa_query.o
>  
>  ib_local_sa-y :=		local_sa.o
>  
> +ib_multicast-y :=		multicast.o
> +
>  ib_umad-y :=			user_mad.o
>  
>  ib_uverbs-y :=			uverbs_main.o uverbs_cmd.o uverbs_mem.o \
> 




More information about the general mailing list