[openib-general] [RFC] [PATCH 2/2] for 2.6.21/OFED1.2 rdma_cm: add multicast support
Steve Wise
swise at opengridcomputing.com
Sat Jan 27 08:00:39 PST 2007
Sean, were you able to try this with an iWARP device to check for
regressions?
----- Original Message -----
From: "Sean Hefty" <sean.hefty at intel.com>
To: "Hefty, Sean" <sean.hefty at intel.com>; <openib-general at openib.org>;
"'Roland Dreier'" <rdreier at cisco.com>
Cc: "'Or Gerlitz'" <ogerlitz at voltaire.com>
Sent: Friday, January 26, 2007 6:19 PM
Subject: [openib-general] [RFC] [PATCH 2/2] for 2.6.21/OFED1.2 rdma_cm:
add multicast support
> Extend the rdma_cm to support multicast communication. Multicast
> support is added to the existing RDMA_PS_UDP port space, as well as
> to new port space, RDMA_PS_IPOIB. The latter port space allows
> joining the multicast groups used by ipoib, which enable offloading
> ipoib traffic to a separate QP. The port space determines the
> signature
> used in the MGID when joining the group. The newly added
> RDMA_PS_IPOIB port space also allows for unicast operations.
>
> Supporting RDMA_PS_IPOIB requires changing how UD QPs are initialized,
> since we can no longer assume that the qkey is constant. This
> requires
> saving the qkey to use when attaching to a device, so that it is
> available when creating the QP. The qkey information is exported to
> the user through the existing rdma_init_qp_attr() routine.
>
> Multicast support is exported to userspace through the rdma_ucm.
>
> Signed-off-by: Sean Hefty <sean.hefty at intel.com>
> ---
> Changes from previous patches include adding the RDMA_PS_IPOIB port
> space.
>
> diff --git a/drivers/infiniband/core/cma.c
> b/drivers/infiniband/core/cma.c
> index 9e0ab04..827df2a 100644
> --- a/drivers/infiniband/core/cma.c
> +++ b/drivers/infiniband/core/cma.c
> @@ -71,6 +71,7 @@ static struct workqueue_struct *cma_wq;
> static DEFINE_IDR(sdp_ps);
> static DEFINE_IDR(tcp_ps);
> static DEFINE_IDR(udp_ps);
> +static DEFINE_IDR(ipoib_ps);
>
> struct cma_device {
> struct list_head list;
> @@ -115,6 +116,7 @@ struct rdma_id_private {
> struct list_head list;
> struct list_head listen_list;
> struct cma_device *cma_dev;
> + struct list_head mc_list;
>
> enum cma_state state;
> spinlock_t lock;
> @@ -133,10 +135,23 @@ struct rdma_id_private {
> } cm_id;
>
> u32 seq_num;
> + u32 qkey;
> u32 qp_num;
> u8 srq;
> };
>
> +struct cma_multicast {
> + struct rdma_id_private *id_priv;
> + union {
> + struct ib_sa_multicast *ib;
> + } multicast;
> + struct list_head list;
> + void *context;
> + struct sockaddr addr;
> + u8 pad[sizeof(struct sockaddr_in6) -
> + sizeof(struct sockaddr)];
> +};
> +
> struct cma_work {
> struct work_struct work;
> struct rdma_id_private *id;
> @@ -242,6 +257,11 @@ static inline void sdp_set_ip_ver(struct sdp_hh
> *hh, u8 ip_ver)
> hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
> }
>
> +static inline int cma_is_ud_ps(enum rdma_port_space ps)
> +{
> + return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
> +}
> +
> static void cma_attach_to_dev(struct rdma_id_private *id_priv,
> struct cma_device *cma_dev)
> {
> @@ -264,19 +284,41 @@ static void cma_detach_from_dev(struct
> rdma_id_private *id_priv)
> id_priv->cma_dev = NULL;
> }
>
> +static int cma_set_qkey(struct ib_device *device, u8 port_num,
> + enum rdma_port_space ps,
> + struct rdma_dev_addr *dev_addr, u32 *qkey)
> +{
> + struct ib_sa_mcmember_rec rec;
> + int ret = 0;
> +
> + switch (ps) {
> + case RDMA_PS_UDP:
> + *qkey = RDMA_UDP_QKEY;
> + break;
> + case RDMA_PS_IPOIB:
> + ib_addr_get_mgid(dev_addr, &rec.mgid);
> + ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
> + *qkey = be32_to_cpu(rec.qkey);
> + break;
> + default:
> + break;
> + }
> + return ret;
> +}
> +
> static int cma_acquire_dev(struct rdma_id_private *id_priv)
> {
> - enum rdma_node_type dev_type =
> id_priv->id.route.addr.dev_addr.dev_type;
> + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
> struct cma_device *cma_dev;
> union ib_gid gid;
> int ret = -ENODEV;
>
> - switch (rdma_node_get_transport(dev_type)) {
> + switch (rdma_node_get_transport(dev_addr->dev_type)) {
> case RDMA_TRANSPORT_IB:
> - ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
> + ib_addr_get_sgid(dev_addr, &gid);
> break;
> case RDMA_TRANSPORT_IWARP:
> - iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
> + iw_addr_get_sgid(dev_addr, &gid);
> break;
> default:
> return -ENODEV;
> @@ -286,7 +328,12 @@ static int cma_acquire_dev(struct rdma_id_private
> *id_priv)
> ret = ib_find_cached_gid(cma_dev->device, &gid,
> &id_priv->id.port_num, NULL);
> if (!ret) {
> - cma_attach_to_dev(id_priv, cma_dev);
> + ret = cma_set_qkey(cma_dev->device,
> + id_priv->id.port_num,
> + id_priv->id.ps, dev_addr,
> + &id_priv->qkey);
> + if (!ret)
> + cma_attach_to_dev(id_priv, cma_dev);
> break;
> }
> }
> @@ -324,40 +371,50 @@ struct rdma_cm_id
> *rdma_create_id(rdma_cm_event_handler event_handler,
> init_waitqueue_head(&id_priv->wait_remove);
> atomic_set(&id_priv->dev_remove, 0);
> INIT_LIST_HEAD(&id_priv->listen_list);
> + INIT_LIST_HEAD(&id_priv->mc_list);
> get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
>
> return &id_priv->id;
> }
> EXPORT_SYMBOL(rdma_create_id);
>
> -static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct
> ib_qp *qp)
> +static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct
> ib_qp *qp)
> {
> struct ib_qp_attr qp_attr;
> - struct rdma_dev_addr *dev_addr;
> - int ret;
> + int qp_attr_mask, ret;
>
> - dev_addr = &id_priv->id.route.addr.dev_addr;
> - ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
> - ib_addr_get_pkey(dev_addr),
> - &qp_attr.pkey_index);
> + qp_attr.qp_state = IB_QPS_INIT;
> + ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
> if (ret)
> return ret;
>
> - qp_attr.qp_state = IB_QPS_INIT;
> - qp_attr.qp_access_flags = 0;
> - qp_attr.port_num = id_priv->id.port_num;
> - return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
> - IB_QP_PKEY_INDEX | IB_QP_PORT);
> + ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
> + if (ret)
> + return ret;
> +
> + qp_attr.qp_state = IB_QPS_RTR;
> + ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
> + if (ret)
> + return ret;
> +
> + qp_attr.qp_state = IB_QPS_RTS;
> + qp_attr.sq_psn = 0;
> + ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
> +
> + return ret;
> }
>
> -static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct
> ib_qp *qp)
> +static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct
> ib_qp *qp)
> {
> struct ib_qp_attr qp_attr;
> + int qp_attr_mask, ret;
>
> qp_attr.qp_state = IB_QPS_INIT;
> - qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
> + ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
> + if (ret)
> + return ret;
>
> - return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS);
> + return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
> }
>
> int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
> @@ -375,18 +432,10 @@ int rdma_create_qp(struct rdma_cm_id *id, struct
> ib_pd *pd,
> if (IS_ERR(qp))
> return PTR_ERR(qp);
>
> - switch (rdma_node_get_transport(id->device->node_type)) {
> - case RDMA_TRANSPORT_IB:
> - ret = cma_init_ib_qp(id_priv, qp);
> - break;
> - case RDMA_TRANSPORT_IWARP:
> - ret = cma_init_iw_qp(id_priv, qp);
> - break;
> - default:
> - ret = -ENOSYS;
> - break;
> - }
> -
> + if (cma_is_ud_ps(id_priv->id.ps))
> + ret = cma_init_ud_qp(id_priv, qp);
> + else
> + ret = cma_init_conn_qp(id_priv, qp);
> if (ret)
> goto err;
>
> @@ -459,23 +508,55 @@ static int cma_modify_qp_err(struct rdma_cm_id
> *id)
> return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE);
> }
>
> +static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
> + struct ib_qp_attr *qp_attr, int *qp_attr_mask)
> +{
> + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
> + int ret;
> +
> + ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
> + ib_addr_get_pkey(dev_addr),
> + &qp_attr->pkey_index);
> + if (ret)
> + return ret;
> +
> + qp_attr->port_num = id_priv->id.port_num;
> + *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
> +
> + if (cma_is_ud_ps(id_priv->id.ps)) {
> + qp_attr->qkey = id_priv->qkey;
> + *qp_attr_mask |= IB_QP_QKEY;
> + } else {
> + qp_attr->qp_access_flags = 0;
> + *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
> + }
> + return 0;
> +}
> +
> int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr
> *qp_attr,
> int *qp_attr_mask)
> {
> struct rdma_id_private *id_priv;
> - int ret;
> + int ret = 0;
>
> id_priv = container_of(id, struct rdma_id_private, id);
> switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
> case RDMA_TRANSPORT_IB:
> - ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
> - qp_attr_mask);
> + if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
> + ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
> + else
> + ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
> + qp_attr_mask);
> if (qp_attr->qp_state == IB_QPS_RTR)
> qp_attr->rq_psn = id_priv->seq_num;
> break;
> case RDMA_TRANSPORT_IWARP:
> - ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
> - qp_attr_mask);
> + if (!id_priv->cm_id.iw) {
> + qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
> + *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
> + } else
> + ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
> + qp_attr_mask);
> break;
> default:
> ret = -ENOSYS;
> @@ -697,6 +778,19 @@ static void cma_release_port(struct
> rdma_id_private *id_priv)
> mutex_unlock(&lock);
> }
>
> +static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
> +{
> + struct cma_multicast *mc;
> +
> + while (!list_empty(&id_priv->mc_list)) {
> + mc = container_of(id_priv->mc_list.next,
> + struct cma_multicast, list);
> + list_del(&mc->list);
> + ib_sa_free_multicast(mc->multicast.ib);
> + kfree(mc);
> + }
> +}
> +
> void rdma_destroy_id(struct rdma_cm_id *id)
> {
> struct rdma_id_private *id_priv;
> @@ -721,6 +815,7 @@ void rdma_destroy_id(struct rdma_cm_id *id)
> default:
> break;
> }
> + cma_leave_mc_groups(id_priv);
> mutex_lock(&lock);
> cma_detach_from_dev(id_priv);
> }
> @@ -971,7 +1066,7 @@ static int cma_req_handler(struct ib_cm_id
> *cm_id, struct ib_cm_event
> *ib_event)
> memset(&event, 0, sizeof event);
> offset = cma_user_data_offset(listen_id->id.ps);
> event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
> - if (listen_id->id.ps == RDMA_PS_UDP) {
> + if (cma_is_ud_ps(listen_id->id.ps)) {
> conn_id = cma_new_udp_id(&listen_id->id, ib_event);
> event.param.ud.private_data = ib_event->private_data + offset;
> event.param.ud.private_data_len =
> @@ -1805,6 +1900,9 @@ static int cma_get_port(struct rdma_id_private
> *id_priv)
> case RDMA_PS_UDP:
> ps = &udp_ps;
> break;
> + case RDMA_PS_IPOIB:
> + ps = &ipoib_ps;
> + break;
> default:
> return -EPROTONOSUPPORT;
> }
> @@ -1919,7 +2017,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id
> *cm_id,
> event.status = ib_event->param.sidr_rep_rcvd.status;
> break;
> }
> - if (rep->qkey != RDMA_UD_QKEY) {
> + if (id_priv->qkey != rep->qkey) {
> event.event = RDMA_CM_EVENT_UNREACHABLE;
> event.status = -EINVAL;
> break;
> @@ -2118,7 +2216,7 @@ int rdma_connect(struct rdma_cm_id *id, struct
> rdma_conn_param *conn_param)
>
> switch (rdma_node_get_transport(id->device->node_type)) {
> case RDMA_TRANSPORT_IB:
> - if (id->ps == RDMA_PS_UDP)
> + if (cma_is_ud_ps(id->ps))
> ret = cma_resolve_ib_udp(id_priv, conn_param);
> else
> ret = cma_connect_ib(id_priv, conn_param);
> @@ -2214,7 +2312,7 @@ static int cma_send_sidr_rep(struct
> rdma_id_private *id_priv,
> rep.status = status;
> if (status == IB_SIDR_SUCCESS) {
> rep.qp_num = id_priv->qp_num;
> - rep.qkey = RDMA_UD_QKEY;
> + rep.qkey = id_priv->qkey;
> }
> rep.private_data = private_data;
> rep.private_data_len = private_data_len;
> @@ -2238,7 +2336,7 @@ int rdma_accept(struct rdma_cm_id *id, struct
> rdma_conn_param *conn_param)
>
> switch (rdma_node_get_transport(id->device->node_type)) {
> case RDMA_TRANSPORT_IB:
> - if (id->ps == RDMA_PS_UDP)
> + if (cma_is_ud_ps(id->ps))
> ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
> conn_param->private_data,
> conn_param->private_data_len);
> @@ -2299,7 +2397,7 @@ int rdma_reject(struct rdma_cm_id *id, const
> void *private_data,
>
> switch (rdma_node_get_transport(id->device->node_type)) {
> case RDMA_TRANSPORT_IB:
> - if (id->ps == RDMA_PS_UDP)
> + if (cma_is_ud_ps(id->ps))
> ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
> private_data, private_data_len);
> else
> @@ -2350,6 +2448,161 @@ out:
> }
> EXPORT_SYMBOL(rdma_disconnect);
>
> +static int cma_ib_mc_handler(int status, struct ib_sa_multicast
> *multicast)
> +{
> + struct rdma_id_private *id_priv;
> + struct cma_multicast *mc = multicast->context;
> + struct rdma_cm_event event;
> + int ret;
> +
> + id_priv = mc->id_priv;
> + atomic_inc(&id_priv->dev_remove);
> + if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
> + !cma_comp(id_priv, CMA_ADDR_RESOLVED))
> + goto out;
> +
> + if (!status && id_priv->id.qp)
> + status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
> + multicast->rec.mlid);
> +
> + memset(&event, 0, sizeof event);
> + event.status = status;
> + event.param.ud.private_data = mc->context;
> + if (!status) {
> + event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
> + ib_init_ah_from_mcmember(id_priv->id.device,
> + id_priv->id.port_num, &multicast->rec,
> + &event.param.ud.ah_attr);
> + event.param.ud.qp_num = 0xFFFFFF;
> + event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
> + } else
> + event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
> +
> + ret = id_priv->id.event_handler(&id_priv->id, &event);
> + if (ret) {
> + cma_exch(id_priv, CMA_DESTROYING);
> + cma_release_remove(id_priv);
> + rdma_destroy_id(&id_priv->id);
> + return 0;
> + }
> +out:
> + cma_release_remove(id_priv);
> + return 0;
> +}
> +
> +static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
> + struct cma_multicast *mc)
> +{
> + struct ib_sa_mcmember_rec rec;
> + unsigned char mc_map[MAX_ADDR_LEN];
> + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
> + struct sockaddr_in *sin = (struct sockaddr_in *) &mc->addr;
> + ib_sa_comp_mask comp_mask;
> + int ret;
> +
> + ib_addr_get_mgid(dev_addr, &rec.mgid);
> + ret = ib_sa_get_mcmember_rec(id_priv->id.device,
> id_priv->id.port_num,
> + &rec.mgid, &rec);
> + if (ret)
> + return ret;
> +
> + ip_ib_mc_map(sin->sin_addr.s_addr, mc_map);
> + if (id_priv->id.ps == RDMA_PS_UDP) {
> + mc_map[7] = 0x01; /* Use RDMA CM signature */
> + rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
> + }
> + mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
> + mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
> +
> + rec.mgid = *(union ib_gid *) (mc_map + 4);
> + ib_addr_get_sgid(dev_addr, &rec.port_gid);
> + rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
> + rec.join_state = 1;
> +
> + comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
> + IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
> + IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
> + IB_SA_MCMEMBER_REC_FLOW_LABEL |
> + IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
> +
> + mc->multicast.ib = ib_sa_join_multicast(&sa_client,
> id_priv->id.device,
> + id_priv->id.port_num, &rec,
> + comp_mask, GFP_KERNEL,
> + cma_ib_mc_handler, mc);
> + if (IS_ERR(mc->multicast.ib))
> + return PTR_ERR(mc->multicast.ib);
> +
> + return 0;
> +}
> +
> +int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
> + void *context)
> +{
> + struct rdma_id_private *id_priv;
> + struct cma_multicast *mc;
> + int ret;
> +
> + id_priv = container_of(id, struct rdma_id_private, id);
> + if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
> + !cma_comp(id_priv, CMA_ADDR_RESOLVED))
> + return -EINVAL;
> +
> + mc = kmalloc(sizeof *mc, GFP_KERNEL);
> + if (!mc)
> + return -ENOMEM;
> +
> + memcpy(&mc->addr, addr, ip_addr_size(addr));
> + mc->context = context;
> + mc->id_priv = id_priv;
> +
> + spin_lock(&id_priv->lock);
> + list_add(&mc->list, &id_priv->mc_list);
> + spin_unlock(&id_priv->lock);
> +
> + switch (rdma_node_get_transport(id->device->node_type)) {
> + case RDMA_TRANSPORT_IB:
> + ret = cma_join_ib_multicast(id_priv, mc);
> + break;
> + default:
> + ret = -ENOSYS;
> + break;
> + }
> +
> + if (ret) {
> + spin_lock_irq(&id_priv->lock);
> + list_del(&mc->list);
> + spin_unlock_irq(&id_priv->lock);
> + kfree(mc);
> + }
> + return ret;
> +}
> +EXPORT_SYMBOL(rdma_join_multicast);
> +
> +void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr
> *addr)
> +{
> + struct rdma_id_private *id_priv;
> + struct cma_multicast *mc;
> +
> + id_priv = container_of(id, struct rdma_id_private, id);
> + spin_lock_irq(&id_priv->lock);
> + list_for_each_entry(mc, &id_priv->mc_list, list) {
> + if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
> + list_del(&mc->list);
> + spin_unlock_irq(&id_priv->lock);
> +
> + if (id->qp)
> + ib_detach_mcast(id->qp,
> + &mc->multicast.ib->rec.mgid,
> + mc->multicast.ib->rec.mlid);
> + ib_sa_free_multicast(mc->multicast.ib);
> + kfree(mc);
> + return;
> + }
> + }
> + spin_unlock_irq(&id_priv->lock);
> +}
> +EXPORT_SYMBOL(rdma_leave_multicast);
> +
> static void cma_add_one(struct ib_device *device)
> {
> struct cma_device *cma_dev;
> @@ -2476,6 +2729,7 @@ static void cma_cleanup(void)
> idr_destroy(&sdp_ps);
> idr_destroy(&tcp_ps);
> idr_destroy(&udp_ps);
> + idr_destroy(&ipoib_ps);
> }
>
> module_init(cma_init);
> diff --git a/drivers/infiniband/core/ucma.c
> b/drivers/infiniband/core/ucma.c
> index e2e8d32..f8d117a 100644
> --- a/drivers/infiniband/core/ucma.c
> +++ b/drivers/infiniband/core/ucma.c
> @@ -70,10 +70,24 @@ struct ucma_context {
> u64 uid;
>
> struct list_head list;
> + struct list_head mc_list;
> +};
> +
> +struct ucma_multicast {
> + struct ucma_context *ctx;
> + int id;
> + int events_reported;
> +
> + u64 uid;
> + struct list_head list;
> + struct sockaddr addr;
> + u8 pad[sizeof(struct sockaddr_in6) -
> + sizeof(struct sockaddr)];
> };
>
> struct ucma_event {
> struct ucma_context *ctx;
> + struct ucma_multicast *mc;
> struct list_head list;
> struct rdma_cm_id *cm_id;
> struct rdma_ucm_event_resp resp;
> @@ -81,6 +95,7 @@ struct ucma_event {
>
> static DEFINE_MUTEX(mut);
> static DEFINE_IDR(ctx_idr);
> +static DEFINE_IDR(multicast_idr);
>
> static inline struct ucma_context *_ucma_find_context(int id,
> struct ucma_file *file)
> @@ -124,6 +139,7 @@ static struct ucma_context *ucma_alloc_ctx(struct
> ucma_file *file)
>
> atomic_set(&ctx->ref, 1);
> init_completion(&ctx->comp);
> + INIT_LIST_HEAD(&ctx->mc_list);
> ctx->file = file;
>
> do {
> @@ -147,6 +163,37 @@ error:
> return NULL;
> }
>
> +static struct ucma_multicast* ucma_alloc_multicast(struct
> ucma_context *ctx)
> +{
> + struct ucma_multicast *mc;
> + int ret;
> +
> + mc = kzalloc(sizeof(*mc), GFP_KERNEL);
> + if (!mc)
> + return NULL;
> +
> + do {
> + ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
> + if (!ret)
> + goto error;
> +
> + mutex_lock(&mut);
> + ret = idr_get_new(&multicast_idr, mc, &mc->id);
> + mutex_unlock(&mut);
> + } while (ret == -EAGAIN);
> +
> + if (ret)
> + goto error;
> +
> + mc->ctx = ctx;
> + list_add_tail(&mc->list, &ctx->mc_list);
> + return mc;
> +
> +error:
> + kfree(mc);
> + return NULL;
> +}
> +
> static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
> struct rdma_conn_param *src)
> {
> @@ -180,8 +227,19 @@ static void ucma_set_event_context(struct
> ucma_context *ctx,
> struct ucma_event *uevent)
> {
> uevent->ctx = ctx;
> - uevent->resp.uid = ctx->uid;
> - uevent->resp.id = ctx->id;
> + switch (event->event) {
> + case RDMA_CM_EVENT_MULTICAST_JOIN:
> + case RDMA_CM_EVENT_MULTICAST_ERROR:
> + uevent->mc = (struct ucma_multicast *)
> + event->param.ud.private_data;
> + uevent->resp.uid = uevent->mc->uid;
> + uevent->resp.id = uevent->mc->id;
> + break;
> + default:
> + uevent->resp.uid = ctx->uid;
> + uevent->resp.id = ctx->id;
> + break;
> + }
> }
>
> static int ucma_event_handler(struct rdma_cm_id *cm_id,
> @@ -199,7 +257,7 @@ static int ucma_event_handler(struct rdma_cm_id
> *cm_id,
> ucma_set_event_context(ctx, event, uevent);
> uevent->resp.event = event->event;
> uevent->resp.status = event->status;
> - if (cm_id->ps == RDMA_PS_UDP)
> + if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
> ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
> else
> ucma_copy_conn_event(&uevent->resp.param.conn,
> @@ -290,6 +348,8 @@ static ssize_t ucma_get_event(struct ucma_file
> *file, const char __user *inbuf,
>
> list_del(&uevent->list);
> uevent->ctx->events_reported++;
> + if (uevent->mc)
> + uevent->mc->events_reported++;
> kfree(uevent);
> done:
> mutex_unlock(&file->mut);
> @@ -342,6 +402,19 @@ err1:
> return ret;
> }
>
> +static void ucma_cleanup_multicast(struct ucma_context *ctx)
> +{
> + struct ucma_multicast *mc, *tmp;
> +
> + mutex_lock(&mut);
> + list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
> + list_del(&mc->list);
> + idr_remove(&multicast_idr, mc->id);
> + kfree(mc);
> + }
> + mutex_unlock(&mut);
> +}
> +
> static void ucma_cleanup_events(struct ucma_context *ctx)
> {
> struct ucma_event *uevent, *tmp;
> @@ -360,6 +433,19 @@ static void ucma_cleanup_events(struct
> ucma_context *ctx)
> }
> }
>
> +static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
> +{
> + struct ucma_event *uevent, *tmp;
> +
> + list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list,
> list) {
> + if (uevent->mc != mc)
> + continue;
> +
> + list_del(&uevent->list);
> + kfree(uevent);
> + }
> +}
> +
> static int ucma_free_ctx(struct ucma_context *ctx)
> {
> int events_reported;
> @@ -367,6 +453,8 @@ static int ucma_free_ctx(struct ucma_context *ctx)
> /* No new events will be generated after destroying the id. */
> rdma_destroy_id(ctx->cm_id);
>
> + ucma_cleanup_multicast(ctx);
> +
> /* Cleanup events not yet reported to the user. */
> mutex_lock(&ctx->file->mut);
> ucma_cleanup_events(ctx);
> @@ -731,6 +819,114 @@ static ssize_t ucma_notify(struct ucma_file
> *file, const char __user *inbuf,
> return ret;
> }
>
> +static ssize_t ucma_join_multicast(struct ucma_file *file,
> + const char __user *inbuf,
> + int in_len, int out_len)
> +{
> + struct rdma_ucm_join_mcast cmd;
> + struct rdma_ucm_create_id_resp resp;
> + struct ucma_context *ctx;
> + struct ucma_multicast *mc;
> + int ret;
> +
> + if (out_len < sizeof(resp))
> + return -ENOSPC;
> +
> + if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
> + return -EFAULT;
> +
> + ctx = ucma_get_ctx(file, cmd.id);
> + if (IS_ERR(ctx))
> + return PTR_ERR(ctx);
> +
> + mutex_lock(&file->mut);
> + mc = ucma_alloc_multicast(ctx);
> + if (IS_ERR(mc)) {
> + ret = PTR_ERR(mc);
> + goto err1;
> + }
> +
> + mc->uid = cmd.uid;
> + memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
> + ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc);
> + if (ret)
> + goto err2;
> +
> + resp.id = mc->id;
> + if (copy_to_user((void __user *)(unsigned long)cmd.response,
> + &resp, sizeof(resp))) {
> + ret = -EFAULT;
> + goto err3;
> + }
> +
> + mutex_unlock(&file->mut);
> + ucma_put_ctx(ctx);
> + return 0;
> +
> +err3:
> + rdma_leave_multicast(ctx->cm_id, &mc->addr);
> + ucma_cleanup_mc_events(mc);
> +err2:
> + mutex_lock(&mut);
> + idr_remove(&multicast_idr, mc->id);
> + mutex_unlock(&mut);
> + list_del(&mc->list);
> + kfree(mc);
> +err1:
> + mutex_unlock(&file->mut);
> + ucma_put_ctx(ctx);
> + return ret;
> +}
> +
> +static ssize_t ucma_leave_multicast(struct ucma_file *file,
> + const char __user *inbuf,
> + int in_len, int out_len)
> +{
> + struct rdma_ucm_destroy_id cmd;
> + struct rdma_ucm_destroy_id_resp resp;
> + struct ucma_multicast *mc;
> + int ret = 0;
> +
> + if (out_len < sizeof(resp))
> + return -ENOSPC;
> +
> + if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
> + return -EFAULT;
> +
> + mutex_lock(&mut);
> + mc = idr_find(&multicast_idr, cmd.id);
> + if (!mc)
> + mc = ERR_PTR(-ENOENT);
> + else if (mc->ctx->file != file)
> + mc = ERR_PTR(-EINVAL);
> + else {
> + idr_remove(&multicast_idr, mc->id);
> + atomic_inc(&mc->ctx->ref);
> + }
> + mutex_unlock(&mut);
> +
> + if (IS_ERR(mc)) {
> + ret = PTR_ERR(mc);
> + goto out;
> + }
> +
> + rdma_leave_multicast(mc->ctx->cm_id, &mc->addr);
> + mutex_lock(&mc->ctx->file->mut);
> + ucma_cleanup_mc_events(mc);
> + list_del(&mc->list);
> + mutex_unlock(&mc->ctx->file->mut);
> +
> + ucma_put_ctx(mc->ctx);
> + resp.events_reported = mc->events_reported;
> + kfree(mc);
> +
> + if (copy_to_user((void __user *)(unsigned long)cmd.response,
> + &resp, sizeof(resp)))
> + ret = -EFAULT;
> +out:
> + return ret;
> +}
> +
> static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
> const char __user *inbuf,
> int in_len, int out_len) = {
> @@ -750,6 +946,8 @@ static ssize_t (*ucma_cmd_table[])(struct
> ucma_file *file,
> [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
> [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
> [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
> + [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
> + [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
> };
>
> static ssize_t ucma_write(struct file *filp, const char __user *buf,
> diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
> index 36cd8a8..2d6a770 100644
> --- a/include/rdma/rdma_cm.h
> +++ b/include/rdma/rdma_cm.h
> @@ -52,10 +52,13 @@ enum rdma_cm_event_type {
> RDMA_CM_EVENT_ESTABLISHED,
> RDMA_CM_EVENT_DISCONNECTED,
> RDMA_CM_EVENT_DEVICE_REMOVAL,
> + RDMA_CM_EVENT_MULTICAST_JOIN,
> + RDMA_CM_EVENT_MULTICAST_ERROR
> };
>
> enum rdma_port_space {
> RDMA_PS_SDP = 0x0001,
> + RDMA_PS_IPOIB= 0x0002,
> RDMA_PS_TCP = 0x0106,
> RDMA_PS_UDP = 0x0111,
> RDMA_PS_SCTP = 0x0183
> @@ -294,5 +297,21 @@ int rdma_reject(struct rdma_cm_id *id, const void
> *private_data,
> */
> int rdma_disconnect(struct rdma_cm_id *id);
>
> -#endif /* RDMA_CM_H */
> +/**
> + * rdma_join_multicast - Join the multicast group specified by the
> given
> + * address.
> + * @id: Communication identifier associated with the request.
> + * @addr: Multicast address identifying the group to join.
> + * @context: User-defined context associated with the join request,
> returned
> + * to the user through the private_data pointer in multicast events.
> + */
> +int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
> + void *context);
>
> +/**
> + * rdma_leave_multicast - Leave the multicast group specified by the
> given
> + * address.
> + */
> +void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr
> *addr);
> +
> +#endif /* RDMA_CM_H */
> diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
> index 9b176df..950424b 100644
> --- a/include/rdma/rdma_cm_ib.h
> +++ b/include/rdma/rdma_cm_ib.h
> @@ -44,7 +44,7 @@
> int rdma_set_ib_paths(struct rdma_cm_id *id,
> struct ib_sa_path_rec *path_rec, int num_paths);
>
> -/* Global qkey for UD QPs and multicast groups. */
> -#define RDMA_UD_QKEY 0x01234567
> +/* Global qkey for UDP QPs and multicast groups. */
> +#define RDMA_UDP_QKEY 0x01234567
>
> #endif /* RDMA_CM_IB_H */
> diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
> index 9572ab8..f632b0c 100644
> --- a/include/rdma/rdma_user_cm.h
> +++ b/include/rdma/rdma_user_cm.h
> @@ -38,7 +38,7 @@
> #include <rdma/ib_user_verbs.h>
> #include <rdma/ib_user_sa.h>
>
> -#define RDMA_USER_CM_ABI_VERSION 3
> +#define RDMA_USER_CM_ABI_VERSION 4
>
> #define RDMA_MAX_PRIVATE_DATA 256
>
> @@ -58,7 +58,9 @@ enum {
> RDMA_USER_CM_CMD_GET_EVENT,
> RDMA_USER_CM_CMD_GET_OPTION,
> RDMA_USER_CM_CMD_SET_OPTION,
> - RDMA_USER_CM_CMD_NOTIFY
> + RDMA_USER_CM_CMD_NOTIFY,
> + RDMA_USER_CM_CMD_JOIN_MCAST,
> + RDMA_USER_CM_CMD_LEAVE_MCAST
> };
>
> /*
> @@ -188,6 +190,13 @@ struct rdma_ucm_notify {
> __u32 event;
> };
>
> +struct rdma_ucm_join_mcast {
> + __u64 response; /* rdma_ucm_create_id_resp */
> + __u64 uid;
> + struct sockaddr_in6 addr;
> + __u32 id;
> +};
> +
> struct rdma_ucm_get_event {
> __u64 response;
> };
>
>
> _______________________________________________
> openib-general mailing list
> openib-general at openib.org
> http://openib.org/mailman/listinfo/openib-general
>
> To unsubscribe, please visit
> http://openib.org/mailman/listinfo/openib-general
>
>
More information about the general
mailing list