[ofa-general] [PATCHv4 06/10] ib_core: CMA device binding
Eli Cohen
eli at dev.mellanox.co.il
Wed Aug 12 01:20:45 PDT 2009
On Mon, Aug 10, 2009 at 12:31:19PM -0700, Sean Hefty wrote:
> >@@ -576,10 +586,16 @@ static int cma_ib_init_qp_attr(struct rdma_id_private
> >*id_priv,
> > {
> > struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
> > int ret;
> >+ u16 pkey;
> >+
> >+ if (rdma_port_get_transport(id_priv->id.device, id_priv->id.port_num)
> >==
>
> nit: It looks like the if is indented by spaces, instead of a tab.
Will fix, thanks.
>
> >+static int cma_resolve_rdmaoe_route(struct rdma_id_private *id_priv)
> >+{
> >+ work->old_state = CMA_ROUTE_QUERY;
> >+ work->new_state = CMA_ROUTE_RESOLVED;
> >+ if (!route->path_rec->mtu || !route->path_rec->rate) {
> >+ work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
> >+ work->event.status = -1;
>
> Any reason not to fail immediately here and leave the id state unchanged?
No real reason. Immediate failure is just as good as a deffered one. I
will change that and also remove change the rule to ommit
"!route->path_rec->rate".
>
> >+ } else {
> >+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
> >+ work->event.status = 0;
> >+ }
> >+
> >+ queue_work(cma_wq, &work->work);
> >+
> >+ kfree(mw);
> >+}
> >+
> >+static int cma_rdmaoe_join_multicast(struct rdma_id_private *id_priv,
> >+ struct cma_multicast *mc)
> >+{
> >+ struct rdmaoe_mcast_work *work;
> >+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
> >+
> >+ if (cma_zero_addr((struct sockaddr *)&mc->addr))
> >+ return -EINVAL;
> >+
> >+ work = kzalloc(sizeof *work, GFP_KERNEL);
> >+ if (!work)
> >+ return -ENOMEM;
> >+
> >+ mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
> >+ if (!mc->multicast.ib) {
> >+ kfree(work);
> >+ return -ENOMEM;
> >+ }
>
> nit: I'd prefer to goto a common cleanup area to make it easier to add changes
> in the future.
Will change that.
>
> >+
> >+ cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, &mc->multicast.ib-
> >>rec.mgid);
> >+ mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
> >+ if (id_priv->id.ps == RDMA_PS_UDP)
> >+ mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
> >+ mc->multicast.ib->rec.rate = rdmaoe_get_rate(dev_addr->src_dev);
> >+ mc->multicast.ib->rec.hop_limit = 1;
> >+ mc->multicast.ib->rec.mtu = rdmaoe_get_mtu(dev_addr->src_dev->mtu);
>
> Do we need to check the rate/mtu here, like in resolve route? Or should we be
> good since we could successfully resolve the route? Actually, can we just read
> the data from the path record that gets stored with the id?
>
I believe that querying the mtu again to get an up to date vlaue will
be better, plus adding a check for the mtu to be none zero or
returning immediately with -EINVAL
> >+ rdmaoe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid);
> >+ work->id = id_priv;
> >+ work->mc = mc;
> >+ INIT_WORK(&work->work, rdmaoe_mcast_work_handler);
> >+
> >+ queue_work(cma_wq, &work->work);
> >+
> >+ return 0;
> >+}
> >+
> > int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
> > void *context)
> > {
> >@@ -2782,6 +2918,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct
> >sockaddr *addr,
> > case RDMA_TRANSPORT_IB:
> > ret = cma_join_ib_multicast(id_priv, mc);
> > break;
> >+ case RDMA_TRANSPORT_RDMAOE:
> >+ ret = cma_rdmaoe_join_multicast(id_priv, mc);
> >+ break;
> > default:
> > ret = -ENOSYS;
> > break;
> >@@ -2793,6 +2932,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct
> >sockaddr *addr,
> > spin_unlock_irq(&id_priv->lock);
> > kfree(mc);
> > }
> >+
> > return ret;
> > }
> > EXPORT_SYMBOL(rdma_join_multicast);
> >>port_num)) {
> >+ tt = rdma_port_get_transport(ctx->cm_id->device, ctx->cm_id->port_num);
> >+ switch (tt) {
> > case RDMA_TRANSPORT_IB:
> >- ucma_copy_ib_route(&resp, &ctx->cm_id->route);
> >+ case RDMA_TRANSPORT_RDMAOE:
> >+ ucma_copy_ib_route(&resp, &ctx->cm_id->route, tt);
>
> It seems simpler to just add a new call ucma_copy_rdmaoe_route, rather than
> merging those two transports into a single copy function that then branches
> based on the transport.
Agree, will change.
>
> > break;
> > default:
> > break;
> >diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
> >index 483057b..66a848e 100644
> >--- a/include/rdma/ib_addr.h
> >+++ b/include/rdma/ib_addr.h
> >@@ -39,6 +39,8 @@
> > #include <linux/netdevice.h>
> > #include <linux/socket.h>
> > #include <rdma/ib_verbs.h>
> >+#include <linux/ethtool.h>
> >+#include <rdma/ib_pack.h>
> >
> > struct rdma_addr_client {
> > atomic_t refcount;
> >@@ -157,4 +159,89 @@ static inline void iw_addr_get_dgid(struct rdma_dev_addr
> >*dev_addr,
> > memcpy(gid, dev_addr->dst_dev_addr, sizeof *gid);
> > }
> >
> >+
> >+static inline int rdma_link_local_addr(struct in6_addr *addr)
> >+{
> >+ if (addr->s6_addr32[0] == cpu_to_be32(0xfe800000) &&
> >+ addr->s6_addr32[1] == 0)
> >+ return 1;
> >+ else
> >+ return 0;
> >+}
>
> just replace the 'if' with 'return'
Will do.
>
> >+
> >+static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac)
> >+{
> >+ memcpy(mac, &addr->s6_addr[8], 3);
> >+ memcpy(mac + 3, &addr->s6_addr[13], 3);
> >+ mac[0] ^= 2;
> >+}
> >+
> >+static inline int rdma_is_multicast_addr(struct in6_addr *addr)
> >+{
> >+ return addr->s6_addr[0] == 0xff ? 1 : 0;
> >+}
> >+
> >+static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac)
> >+{
> >+ memset(mac, 0xff, 6);
> >+}
>
> I don't think we want all of these inline, in particular rdmaoe_mac_to_ll,
> rdmaoe_get_mtu , rdmaoe_get_rate.
They're quite simple functions - what would you prefer, export them?
Why?
>
> _______________________________________________
> general mailing list
> general at lists.openfabrics.org
> http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general
>
> To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general
More information about the general
mailing list