[openib-general] [PATCH] Convert IPoIB to use new SA module
Roland Dreier
roland at topspin.com
Wed Oct 27 22:20:00 PDT 2004
This converts IPoIB to use the new SA API for PathRecord and
MCMemberRecord transactions.
Correcting the component mask used for multicast joins after the initial
broadcast group still needs to be done...
- R.
Index: ulp/ipoib/ipoib_main.c
===================================================================
--- ulp/ipoib/ipoib_main.c (revision 1085)
+++ ulp/ipoib/ipoib_main.c (working copy)
@@ -232,22 +232,24 @@
return 0;
}
-static int path_rec_completion(tTS_IB_CLIENT_QUERY_TID tid,
- int status,
- struct ib_path_record *pathrec,
- int remaining, void *path_ptr)
+static void path_rec_completion(int status,
+ struct ib_sa_path_rec *pathrec,
+ void *path_ptr)
{
struct ipoib_path *path = path_ptr;
struct ipoib_dev_priv *priv = netdev_priv(path->dev);
struct sk_buff *skb;
struct ib_ah *ah;
- if (status)
+ ipoib_dbg(priv, "status %d, LID 0x%04x for GID " IPOIB_GID_FMT "\n",
+ status, be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
+
+ if (status != IB_WC_SUCCESS)
goto err;
{
struct ib_ah_attr av = {
- .dlid = pathrec->dlid,
+ .dlid = be16_to_cpu(pathrec->dlid),
.sl = pathrec->sl,
.src_path_bits = 0,
.static_rate = 0,
@@ -273,7 +275,7 @@
"to requeue packet\n");
}
- return 1;
+ return;
err:
while ((skb = __skb_dequeue(&path->queue)))
@@ -283,15 +285,16 @@
IPOIB_PATH(path->neighbour) = NULL;
kfree(path);
-
- return 1;
}
static int path_rec_start(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path = kmalloc(sizeof *path, GFP_ATOMIC);
- tTS_IB_CLIENT_QUERY_TID tid;
+ struct ib_sa_path_rec rec = {
+ .numb_path = 1
+ };
+ struct ib_sa_query *query;
if (!path)
goto err;
@@ -303,17 +306,23 @@
__skb_queue_tail(&path->queue, skb);
path->neighbour = NULL;
+ rec.sgid = priv->local_gid;
+ memcpy(rec.dgid.raw, skb->dst->neighbour->ha + 4, 16);
+ rec.pkey = cpu_to_be16(priv->pkey);
+
/*
* XXX there's a race here if path record completion runs
* before we get to finish up. Add a lock to path struct?
*/
- if (tsIbPathRecordRequest(priv->ca, priv->port,
- priv->local_gid.raw,
- skb->dst->neighbour->ha + 4,
- priv->pkey, 0, HZ, 0,
- path_rec_completion,
- path, &tid)) {
- ipoib_warn(priv, "tsIbPathRecordRequest failed\n");
+ if (ib_sa_path_rec_get(priv->ca, priv->port, &rec,
+ IB_SA_PATH_REC_DGID |
+ IB_SA_PATH_REC_SGID |
+ IB_SA_PATH_REC_NUMB_PATH |
+ IB_SA_PATH_REC_PKEY,
+ 1000, GFP_ATOMIC,
+ path_rec_completion,
+ path, &query) < 0) {
+ ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
goto err;
}
@@ -329,21 +338,23 @@
return 0;
}
-static int unicast_arp_completion(tTS_IB_CLIENT_QUERY_TID tid,
- int status,
- struct ib_path_record *pathrec,
- int remaining, void *skb_ptr)
+static void unicast_arp_completion(int status,
+ struct ib_sa_path_rec *pathrec,
+ void *skb_ptr)
{
struct sk_buff *skb = skb_ptr;
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
struct ib_ah *ah;
+ ipoib_dbg(priv, "status %d, LID 0x%04x for GID " IPOIB_GID_FMT "\n",
+ status, be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
+
if (status)
goto err;
{
struct ib_ah_attr av = {
- .dlid = pathrec->dlid,
+ .dlid = be16_to_cpu(pathrec->dlid),
.sl = pathrec->sl,
.src_path_bits = 0,
.static_rate = 0,
@@ -363,12 +374,10 @@
ipoib_warn(priv, "dev_queue_xmit failed "
"to requeue ARP packet\n");
- return 1;
+ return;
err:
dev_kfree_skb(skb);
-
- return 1;
}
static void unicast_arp_finish(struct sk_buff *skb)
@@ -394,7 +403,10 @@
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *tmp_skb;
- tTS_IB_CLIENT_QUERY_TID tid;
+ struct ib_sa_path_rec rec = {
+ .numb_path = 1
+ };
+ struct ib_sa_query *query;
if (skb->destructor) {
tmp_skb = skb;
@@ -410,18 +422,24 @@
skb->destructor = unicast_arp_finish;
memset(skb->cb, 0, sizeof skb->cb);
+ rec.sgid = priv->local_gid;
+ memcpy(rec.dgid.raw, phdr->hwaddr + 4, 16);
+ rec.pkey = cpu_to_be16(priv->pkey);
+
/*
* XXX We need to keep a record of the skb and TID somewhere
* so that we can cancel the request if the device goes down
* before it finishes.
*/
- if (tsIbPathRecordRequest(priv->ca, priv->port,
- priv->local_gid.raw,
- phdr->hwaddr + 4,
- priv->pkey, 0, HZ, 0,
- unicast_arp_completion,
- skb, &tid)) {
- ipoib_warn(priv, "tsIbPathRecordRequest failed\n");
+ if (ib_sa_path_rec_get(priv->ca, priv->port, &rec,
+ IB_SA_PATH_REC_DGID |
+ IB_SA_PATH_REC_SGID |
+ IB_SA_PATH_REC_NUMB_PATH |
+ IB_SA_PATH_REC_PKEY,
+ 1000, GFP_ATOMIC,
+ unicast_arp_completion,
+ skb, &query) < 0) {
+ ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
++priv->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
@@ -736,6 +754,15 @@
priv->dev->broadcast[8] = priv->pkey >> 8;
priv->dev->broadcast[9] = priv->pkey & 0xff;
+ result = ib_query_gid(hca, port, 0, &priv->local_gid);
+ if (result) {
+ printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
+ hca->name, port, result);
+ goto alloc_mem_failed;
+ } else
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+
+
result = ipoib_dev_init(priv->dev, hca, port);
if (result < 0) {
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
Index: ulp/ipoib/ipoib.h
===================================================================
--- ulp/ipoib/ipoib.h (revision 1085)
+++ ulp/ipoib/ipoib.h (working copy)
@@ -39,9 +39,8 @@
#include <ib_verbs.h>
#include <ib_pack.h>
+#include <ib_sa.h>
-#include <ts_ib_sa_client.h>
-
/* constants */
enum {
@@ -102,7 +101,8 @@
struct semaphore mcast_mutex;
- tTS_IB_CLIENT_QUERY_TID mcast_tid;
+ int mcast_query_id;
+ struct ib_sa_query *mcast_query;
struct ipoib_mcast *broadcast;
struct list_head multicast_list;
Index: ulp/ipoib/ipoib_ib.c
===================================================================
--- ulp/ipoib/ipoib_ib.c (revision 1085)
+++ ulp/ipoib/ipoib_ib.c (working copy)
@@ -25,8 +25,6 @@
#include "ipoib.h"
-#include "ts_ib_sa_client.h"
-
static DECLARE_MUTEX(pkey_sem);
static int _ipoib_ib_receive(struct ipoib_dev_priv *priv,
@@ -427,7 +425,7 @@
priv->ca = ca;
priv->port = port;
priv->qp = NULL;
- priv->mcast_tid = TS_IB_CLIENT_QUERY_TID_INVALID;
+ priv->mcast_query = NULL;
if (ipoib_transport_dev_init(dev, ca)) {
printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
Index: ulp/ipoib/ipoib_multicast.c
===================================================================
--- ulp/ipoib/ipoib_multicast.c (revision 1085)
+++ ulp/ipoib/ipoib_multicast.c (working copy)
@@ -30,8 +30,6 @@
#include "ipoib.h"
-#include "ts_ib_sa_client.h"
-
static DECLARE_MUTEX(mcast_mutex);
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
@@ -43,10 +41,12 @@
unsigned long created;
unsigned long backoff;
- struct ib_multicast_member mcast_member;
+ struct ib_sa_mcmember_rec mcmember;
struct ib_ah *address_handle;
- tTS_IB_CLIENT_QUERY_TID tid;
+ int query_id;
+ struct ib_sa_query *query;
+
union ib_gid mgid;
unsigned long flags;
@@ -125,7 +125,7 @@
mcast->address_handle = NULL;
/* Will force a trigger on the first packet we need to send */
- mcast->tid = TS_IB_CLIENT_QUERY_TID_INVALID;
+ mcast->query = NULL;
return mcast;
}
@@ -189,14 +189,13 @@
/* =============================================================== */
/*..ipoib_mcast_join_finish - finish joining mcast group entry */
static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
- struct ib_multicast_member *member_ptr)
+ struct ib_sa_mcmember_rec *mcmember)
{
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
- mcast->mcast_member = *member_ptr;
- priv->qkey = priv->broadcast->mcast_member.qkey;
+ mcast->mcmember = *mcmember;
if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
ipoib_warn(priv, "multicast group " IPOIB_GID_FMT
@@ -208,9 +207,9 @@
/* Set the cached Q_Key before we attach if it's the broadcast group */
if (!memcmp(mcast->mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid)))
- priv->qkey = priv->broadcast->mcast_member.qkey;
+ priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
- ret = ipoib_mcast_attach(dev, mcast->mcast_member.mlid, &mcast->mgid);
+ ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), &mcast->mgid);
if (ret < 0) {
ipoib_warn(priv, "couldn't attach QP to multicast group "
IPOIB_GID_FMT "\n",
@@ -222,21 +221,21 @@
{
struct ib_ah_attr av = {
- .dlid = mcast->mcast_member.mlid,
+ .dlid = be16_to_cpu(mcast->mcmember.mlid),
.port_num = priv->port,
- .sl = mcast->mcast_member.sl,
+ .sl = mcast->mcmember.sl,
.src_path_bits = 0,
.static_rate = 0,
.ah_flags = IB_AH_GRH,
.grh = {
- .flow_label = mcast->mcast_member.flowlabel,
- .hop_limit = mcast->mcast_member.hoplmt,
+ .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
+ .hop_limit = mcast->mcmember.hop_limit,
.sgid_index = 0,
- .traffic_class = mcast->mcast_member.tclass
+ .traffic_class = mcast->mcmember.traffic_class
}
};
- memcpy(av.grh.dgid.raw, mcast->mcast_member.mgid,
+ memcpy(av.grh.dgid.raw, mcast->mcmember.mgid.raw,
sizeof (union ib_gid));
mcast->address_handle = ib_create_ah(priv->pd, &av);
@@ -247,8 +246,8 @@
" AV %p, LID 0x%04x, SL %d\n",
IPOIB_GID_ARG(mcast->mgid),
mcast->address_handle,
- mcast->mcast_member.mlid,
- mcast->mcast_member.sl);
+ be16_to_cpu(mcast->mcmember.mlid),
+ mcast->mcmember.sl);
}
}
@@ -268,19 +267,18 @@
/* =============================================================== */
/*..ipoib_mcast_sendonly_join_complete -- handler for multicast join */
static void
-ipoib_mcast_sendonly_join_complete(tTS_IB_CLIENT_QUERY_TID tid,
- int status,
- struct ib_multicast_member *member_ptr,
+ipoib_mcast_sendonly_join_complete(int status,
+ struct ib_sa_mcmember_rec *mcmember,
void *mcast_ptr)
{
struct ipoib_mcast *mcast = mcast_ptr;
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
- mcast->tid = TS_IB_CLIENT_QUERY_TID_INVALID;
+ mcast->query = NULL;
if (!status)
- ipoib_mcast_join_finish(mcast, member_ptr);
+ ipoib_mcast_join_finish(mcast, mcmember);
else {
if (mcast->logcount++ < 20)
ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT
@@ -311,7 +309,14 @@
{
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
- tTS_IB_CLIENT_QUERY_TID tid;
+ struct ib_sa_mcmember_rec rec = {
+#if 0 /* Some SMs don't support send-only yet */
+ .join_state = 4
+#else
+ .join_state = 1
+#endif
+ };
+ struct ib_sa_query *query;
int ret = 0;
atomic_inc(&priv->mcast_joins);
@@ -329,16 +334,20 @@
}
ipoib_mcast_get(mcast);
- ret = tsIbMulticastGroupJoin(priv->ca,
- priv->port, mcast->mgid.raw, priv->pkey,
-/* ib_sm doesn't support send only yet
- TS_IB_MULTICAST_JOIN_SEND_ONLY_NON_MEMBER,
-*/
- TS_IB_MULTICAST_JOIN_FULL_MEMBER,
- HZ,
+
+ rec.mgid = mcast->mgid;
+ rec.port_gid = priv->local_gid;
+ rec.pkey = be16_to_cpu(priv->pkey);
+
+ ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
+ IB_SA_MCMEMBER_REC_MGID |
+ IB_SA_MCMEMBER_REC_PORT_GID |
+ IB_SA_MCMEMBER_REC_PKEY |
+ IB_SA_MCMEMBER_REC_JOIN_STATE,
+ 1000, GFP_ATOMIC,
ipoib_mcast_sendonly_join_complete,
- mcast, &tid);
- if (ret) {
+ mcast, &query);
+ if (ret < 0) {
ipoib_warn(priv, "tsIbMulticastGroupJoin failed (ret = %d)\n",
ret);
ipoib_mcast_put(mcast);
@@ -347,7 +356,8 @@
", starting join\n",
IPOIB_GID_ARG(mcast->mgid));
- mcast->tid = tid;
+ mcast->query = query;
+ mcast->query_id = ret;
}
out:
@@ -359,18 +369,17 @@
/* =============================================================== */
/*..ipoib_mcast_join_complete - handle comp of mcast join */
-static void ipoib_mcast_join_complete(tTS_IB_CLIENT_QUERY_TID tid,
- int status,
- struct ib_multicast_member *member_ptr,
+static void ipoib_mcast_join_complete(int status,
+ struct ib_sa_mcmember_rec *mcmember,
void *mcast_ptr)
{
struct ipoib_mcast *mcast = mcast_ptr;
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
- priv->mcast_tid = TS_IB_CLIENT_QUERY_TID_INVALID;
+ priv->mcast_query = NULL;
- if (!status && !ipoib_mcast_join_finish(mcast, member_ptr)) {
+ if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
mcast->backoff = HZ;
down(&mcast_mutex);
if (!test_bit(IPOIB_MCAST_STOP, &priv->flags))
@@ -410,24 +419,30 @@
static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- int status;
+ struct ib_sa_mcmember_rec rec = {
+ .join_state = 1
+ };
+ int ret = 0;
ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n",
IPOIB_GID_ARG(mcast->mgid));
- status = tsIbMulticastGroupJoin(priv->ca,
- priv->port,
- mcast->mgid.raw,
- priv->pkey,
- TS_IB_MULTICAST_JOIN_FULL_MEMBER,
- mcast->backoff,
- ipoib_mcast_join_complete,
- mcast, &priv->mcast_tid);
+ rec.mgid = mcast->mgid;
+ rec.port_gid = priv->local_gid;
+ rec.pkey = be16_to_cpu(priv->pkey);
- if (status) {
- ipoib_warn(priv, "tsIbMulticastGroupJoin failed, status %d\n",
- status);
+ ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
+ IB_SA_MCMEMBER_REC_MGID |
+ IB_SA_MCMEMBER_REC_PORT_GID |
+ IB_SA_MCMEMBER_REC_PKEY |
+ IB_SA_MCMEMBER_REC_JOIN_STATE,
+ mcast->backoff * 1000, GFP_ATOMIC,
+ ipoib_mcast_join_complete,
+ mcast, &priv->mcast_query);
+ if (ret < 0) {
+ ipoib_warn(priv, "tsIbMulticastGroupJoin failed, status %d\n", ret);
+
mcast->backoff *= 2;
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
@@ -438,7 +453,8 @@
&priv->mcast_task,
mcast->backoff);
up(&mcast_mutex);
- }
+ } else
+ priv->mcast_query_id = ret;
}
/* =============================================================== */
@@ -456,6 +472,11 @@
}
up(&mcast_mutex);
+ if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
+ ipoib_warn(priv, "ib_gid_entry_get() failed\n");
+ else
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+
if (!priv->broadcast) {
priv->broadcast = ipoib_mcast_alloc(dev, 1);
if (!priv->broadcast) {
@@ -513,12 +534,7 @@
priv->local_lid = port_lid.lid;
}
- if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
- ipoib_warn(priv, "ib_gid_entry_get() failed\n");
- else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
-
- priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcast_member.mtu)
+ priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)
- IPOIB_ENCAP_LEN;
dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
@@ -554,9 +570,9 @@
down(&mcast_mutex);
- if (priv->mcast_tid != TS_IB_CLIENT_QUERY_TID_INVALID) {
- ib_client_query_cancel(priv->mcast_tid);
- priv->mcast_tid = TS_IB_CLIENT_QUERY_TID_INVALID;
+ if (priv->mcast_query) {
+ ib_sa_cancel_query(priv->mcast_query_id, priv->mcast_query);
+ priv->mcast_query = NULL;
}
set_bit(IPOIB_MCAST_STOP, &priv->flags);
@@ -580,14 +596,11 @@
return 0;
/* Remove ourselves from the multicast group */
- result = ipoib_mcast_detach(dev, mcast->mcast_member.mlid, &mcast->mgid);
+ result = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), &mcast->mgid);
if (result)
ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", result);
- result = tsIbMulticastGroupLeave(priv->ca, priv->port,
- mcast->mcast_member.mgid);
- if (result)
- ipoib_warn(priv, "tsIbMulticastGroupLeave failed (result = %d)\n", result);
+ /* XXX implement leaving SA's multicast group */
return 0;
}
@@ -648,7 +661,7 @@
}
if (!mcast->address_handle) {
- if (mcast->tid != TS_IB_CLIENT_QUERY_TID_INVALID)
+ if (mcast->query)
ipoib_dbg_mcast(priv, "no address vector, "
"but multicast join already started\n");
else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
@@ -728,8 +741,8 @@
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
list_del_init(&mcast->list);
- if (mcast->tid != TS_IB_CLIENT_QUERY_TID_INVALID)
- ib_client_query_cancel(mcast->tid);
+ if (mcast->query)
+ ib_sa_cancel_query(mcast->query_id, mcast->query);
ipoib_mcast_leave(dev, mcast);
ipoib_mcast_put(mcast);
}
@@ -845,8 +858,8 @@
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
list_del_init(&mcast->list);
- if (mcast->tid != TS_IB_CLIENT_QUERY_TID_INVALID)
- ib_client_query_cancel(mcast->tid);
+ if (mcast->query)
+ ib_sa_cancel_query(mcast->query_id, mcast->query);
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_put(mcast);
}
More information about the general
mailing list