[openib-general] [PATCH] [kdapl] Various AT changes
Hal Rosenstock
halr at voltaire.com
Mon May 16 12:52:36 PDT 2005
Various AT changes including:
Get API in line with changes on shahar-ibat branch
Integrate various changes from shahar-ibat branch which apply to the
"level of functionality" originally implemented (e.g. no Service Record
support)
Fix a couple of minor bugs found by code inspection
Better debug support
Note that this does not fix the slab corruption problem :-(
Signed-off-by: Hal Rosenstock <halr at voltaire.com>
Index: ib_at.h
===================================================================
--- ib_at.h (revision 2331)
+++ ib_at.h (working copy)
@@ -30,25 +30,28 @@
* SOFTWARE.
*
*
- * $Id:$
+ * $Id$
*/
#if !defined( IB_AT_H )
#define IB_AT_H
+#include <ib_verbs.h>
+#include <ib_sa.h>
+
enum ib_at_multipathing_type {
IB_AT_PATH_SAME_PORT = 0,
- IB_AT_PATH_SAME_HCA = 1, /* but different ports if applicable */
- IB_AT_PATH_SAME_SYSTEM = 2, /* but different ports if applicable */
+ IB_AT_PATH_SAME_HCA = 1, /* but different ports if applicable */
+ IB_AT_PATH_SAME_SYSTEM = 2, /* but different ports if applicable */
IB_AT_PATH_INDEPENDENT_HCA = 3,
- IB_AT_PATH_SRC_ROUTE = 4, /* application controlled multipathing */
+ IB_AT_PATH_SRC_ROUTE = 4, /* application controlled multipathing */
};
enum ib_at_route_flags {
- IB_AT_ROUTE_USE_DEFAULTS = 0,
- IB_AT_ROUTE_FORCE_ATS,
- IB_AT_ROUTE_FORCE_ARP,
- IB_AT_ROUTE_FORCE_RESOLVE,
+ IB_AT_ROUTE_USE_DEFAULTS = 0,
+ IB_AT_ROUTE_FORCE_ATS = 1,
+ IB_AT_ROUTE_FORCE_ARP = 2,
+ IB_AT_ROUTE_FORCE_RESOLVE = 4,
};
struct ib_at_path_attr {
@@ -169,7 +172,7 @@
* See ib_at_completion structure documentation for asynchronous
* operation details.
*/
-int ib_at_ips_by_gid(union ib_gid gid, u32 *dst_ips, int nips,
+int ib_at_ips_by_gid(union ib_gid *gid, u32 *dst_ips, int nips,
struct ib_at_completion *async_comp);
/**
@@ -208,7 +211,7 @@
* @req_id: asynchronous request ID ib_at_op_status
*
* Return non-negative ib_at_op_status value,
- * or -EINVAL if the reqest ID is invalid.
+ * or -EINVAL if the request ID is invalid.
*/
int ib_at_status(u64 req_id);
Index: at.c
===================================================================
--- at.c (revision 2331)
+++ at.c (working copy)
@@ -30,7 +30,7 @@
* SOFTWARE.
*
*
- * $Id:$
+ * $Id$
*/
#include <linux/module.h>
@@ -118,7 +118,7 @@
int sa_id;
};
-static struct async pending_reqs; /* dummy head for cyclic list */
+struct async pending_reqs; /* dummy head for cyclic list */
struct ib_at_src {
u32 ip;
@@ -158,7 +158,6 @@
static void path_req_complete(int stat, struct ib_sa_path_rec *ret, void *ctx);
static int resolve_path(struct path_req *req);
-
static int resolve_ip(struct ib_at_src *src, u32 dst_ip, u32 src_ip,
int tos, union ib_gid *dgid)
{
@@ -254,7 +253,7 @@
src->dev = priv->ca;
src->port = priv->port;
src->pkey = cpu_to_be16(priv->pkey);
- memcpy(&src->gid, (ipoib_dev->dev_addr + 4), sizeof(src->gid));
+ memcpy(&src->gid, ipoib_dev->dev_addr + 4, sizeof(src->gid));
if (!dgid)
return 0;
@@ -264,7 +263,7 @@
* the IB device which was found.
*/
if (rt->u.dst.neighbour->dev->flags & IFF_LOOPBACK) {
- memcpy(dgid, (ipoib_dev->dev_addr + 4),
+ memcpy(dgid, ipoib_dev->dev_addr + 4,
sizeof(union ib_gid));
return 1;
@@ -272,7 +271,7 @@
if ((NUD_CONNECTED|NUD_DELAY|NUD_PROBE) &
rt->u.dst.neighbour->nud_state) {
- memcpy(dgid, (rt->u.dst.neighbour->ha + 4),
+ memcpy(dgid, rt->u.dst.neighbour->ha + 4,
sizeof(union ib_gid));
return 1;
@@ -285,9 +284,17 @@
static u64 alloc_req_id(void)
{
- static u64 req_id = 1;
+ static u64 req_id = 0;
+ u64 new_id;
+ unsigned long flags;
- return ++req_id;
+ spin_lock_irqsave(&pending_reqs.lock, flags);
+ new_id = ++req_id;
+ if (!new_id)
+ new_id = ++req_id;
+ spin_unlock_irqrestore(&pending_reqs.lock, flags);
+
+ return new_id;
}
static void req_init(struct async *pend, void *data, int nelem, int type,
@@ -361,7 +368,7 @@
{
struct async *pend = v;
- DEBUG("complete req %p\n", pend);
+ DEBUG("complete pend %p", pend);
pend->comp.fn(pend->comp.req_id, pend->comp.context, pend->nelem);
@@ -373,20 +380,23 @@
struct async **rr, *waiting;
unsigned long flags = 0;
- DEBUG("pend %p nrec %d", pend, nrec);
+ DEBUG("pend %p nrec %d async %p", pend, nrec, q);
if (pend->status != IB_AT_STATUS_PENDING)
- WARN("pend %p already completed??", pend);
+ WARN("pend %p already completed? status %d", pend, pend->status);
pend->status = nrec < 0 ? IB_AT_STATUS_ERROR : IB_AT_STATUS_COMPLETED;
- if (pend->sa_query)
+ if (pend->sa_query) {
ib_sa_cancel_query(pend->sa_id, pend->sa_query);
+ pend->sa_query = NULL;
+ }
if (q)
spin_lock_irqsave(&q->lock, flags);
if (pend->parent) {
+ DEBUG("pend->parent %p", pend->parent);
for (rr = &pend->parent->waiting; *rr; rr = &(*rr)->waiting)
if (*rr == pend) {
*rr = (*rr)->waiting;
@@ -476,11 +486,13 @@
unsigned long flags;
struct async *a;
- DEBUG("lookup in q %p req %p", q, new);
+ DEBUG("lookup in q %p pending %p", q, new);
spin_lock_irqsave(&q->lock, flags);
- for (a = q->next; a != q; a = a->next)
+ for (a = q->next; a != q; a = a->next) {
+ DEBUG("%d %d", a->type, type);
if (a->type == type && same_fn(a, new))
break;
+ }
spin_unlock_irqrestore(&q->lock, flags);
return a == q ? NULL : a;
@@ -574,13 +586,14 @@
DEBUG("req %p", req);
if (req->pend.parent) {
- WARN("path_req_complete for child req %p???", req);
+ WARN("for child req %p???", req);
return;
}
if (status) {
- DEBUG("timed out - check if should retry");
- if (jiffies - req->pend.start < IB_AT_REQ_TIMEOUT)
+ DEBUG("status %d - check if should retry", status);
+ if (status == -ETIMEDOUT &&
+ jiffies - req->pend.start < IB_AT_REQ_TIMEOUT)
resolve_path(req);
else
req_end(&req->pend, -ETIMEDOUT, &pending_reqs);
@@ -605,6 +618,7 @@
{
struct async *pend, *next;
struct route_req *req;
+ struct path_req *preq;
unsigned long flags;
DEBUG("start sweeping");
@@ -613,18 +627,36 @@
for (pend = pending_reqs.next; pend != &pending_reqs; pend = next) {
next = pend->next;
- req = container_of(pend, struct route_req, pend);
+ switch (pend->type) {
+ case IBAT_REQ_ARP:
+ case IBAT_REQ_ATS:
+ req = container_of(pend, struct route_req, pend);
- DEBUG("examining route req %p pend %p", req, pend);
- if (jiffies > (pend->start + IB_AT_REQ_TIMEOUT)) {
- DEBUG("req delete <%d.%d.%d.%d> <%lu:%lu>",
- (req->dst_ip & 0x000000ff),
- (req->dst_ip & 0x0000ff00) >> 8,
- (req->dst_ip & 0x00ff0000) >> 16,
- (req->dst_ip & 0xff000000) >> 24,
- jiffies, pend->start);
+ DEBUG("examining route req %p pend %p", req, pend);
+ if (jiffies > pend->start + IB_AT_REQ_TIMEOUT) {
+ DEBUG("req delete <%d.%d.%d.%d> <%lu:%lu>",
+ (req->dst_ip & 0x000000ff),
+ (req->dst_ip & 0x0000ff00) >> 8,
+ (req->dst_ip & 0x00ff0000) >> 16,
+ (req->dst_ip & 0xff000000) >> 24,
+ jiffies, pend->start);
- req_end(pend, -ETIMEDOUT, NULL);
+ req_end(pend, -ETIMEDOUT, NULL);
+ }
+ break;
+ case IBAT_REQ_PATHREC:
+ preq = container_of(pend, struct path_req, pend);
+
+ DEBUG("examining path req %p pend %p", preq, pend);
+ if (jiffies > pend->start + IB_AT_REQ_TIMEOUT) {
+ DEBUG("req delete path <%lu:%lu>",
+ jiffies, pend->start);
+
+ req_end(pend, -ETIMEDOUT, NULL);
+ }
+ break;
+ default:
+ WARN("unknown async req type %d", pend->type);
}
}
@@ -651,7 +683,7 @@
if (req->pend.type == IBAT_REQ_ATS) {
WARN("ATS - not yet");
- return 0;
+ return -1; /* 0 when supported */
}
WARN("bad req %p type %d", req, req->pend.type);
@@ -666,32 +698,31 @@
.dgid = req->rt.dgid,
.sgid = req->rt.sgid,
};
- int r;
if (req->pend.type != IBAT_REQ_PATHREC) {
WARN("bad req %p type %d", req, req->pend.type);
return -1;
}
- r = ib_sa_path_rec_get(req->rt.out_dev,
- req->rt.out_port,
- &rec,
- (IB_SA_PATH_REC_DGID |
- IB_SA_PATH_REC_SGID |
- IB_SA_PATH_REC_PKEY |
- IB_SA_PATH_REC_NUMB_PATH),
- req->pend.timeout_ms,
- GFP_KERNEL,
- path_req_complete,
- req,
- &req->pend.sa_query);
+ req->pend.sa_id = ib_sa_path_rec_get(req->rt.out_dev,
+ req->rt.out_port,
+ &rec,
+ (IB_SA_PATH_REC_DGID |
+ IB_SA_PATH_REC_SGID |
+ IB_SA_PATH_REC_PKEY |
+ IB_SA_PATH_REC_NUMB_PATH),
+ req->pend.timeout_ms,
+ GFP_KERNEL,
+ path_req_complete,
+ req,
+ &req->pend.sa_query);
- if (r < 0)
- return r;
+ if (req->pend.sa_id < 0) {
+ WARN("ib_sa_path_rec_get %d", req->pend.sa_id);
+ return req->pend.sa_id;
+ }
req->pend.timeout_ms <<= 1; /* exponential backoff */
- req->pend.sa_id = r;
-
return 0;
}
@@ -716,10 +747,12 @@
spin_lock_irqsave(&q->lock, flags);
for (a = q->next; a != q; a = a->next) {
+ DEBUG("a %p", a);
if (a->type != IBAT_REQ_ARP)
continue;
req = container_of(a, struct route_req, pend);
+ DEBUG("req %p", req);
if (arp->op == __constant_htons(ARPOP_REPLY)) {
if (arp->dst_ip == req->dst_ip)
@@ -751,7 +784,6 @@
* queue IB arp packet onto work queue.
*/
DEBUG("recv IB ARP - queue work");
-
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
goto done;
@@ -763,7 +795,6 @@
done:
kfree_skb(skb);
-
return 0;
}
@@ -796,17 +827,20 @@
r = resolve_ip(&rreq->src, dst_ip, src_ip, tos, &rreq->dgid);
if (r < 0) {
+ DEBUG("resolve_ip r < 0 free req %p", rreq);
kmem_cache_free(route_req_cache, rreq);
return r;
}
if (r > 0) {
route_req_output(rreq, ib_route);
+ DEBUG("resolve_ip r > 0 free req %p", rreq);
kmem_cache_free(route_req_cache, rreq);
return 1;
}
if (!async_comp) {
+ DEBUG("!async_comp free req %p", rreq);
kmem_cache_free(route_req_cache, rreq);
return -EWOULDBLOCK;
}
@@ -855,6 +889,7 @@
*/
if (!async_comp) {
+ DEBUG("!async_comp free req %p", preq);
kmem_cache_free(path_req_cache, preq);
return -EWOULDBLOCK;
}
@@ -871,7 +906,7 @@
}
EXPORT_SYMBOL(ib_at_paths_by_route);
-int ib_at_ips_by_gid(union ib_gid gid, u32 *dst_ips, int nips,
+int ib_at_ips_by_gid(union ib_gid *gid, u32 *dst_ips, int nips,
struct ib_at_completion *async_comp)
{
return -1; /* FIXME: not implemented yet */
@@ -910,7 +945,7 @@
a->next->prev = child;
a->next = child;
- a->waiting = NULL; /* clear to avoid cancelling childs */
+ a->waiting = NULL; /* clear to avoid cancelling children */
}
req_end(a, -EINTR, NULL);
@@ -950,7 +985,16 @@
DEBUG("IB AT services init");
- route_req_cache = kmem_cache_create("ib_at_route_reqs",
+ /*
+ * init pending lists' dummies.
+ */
+ pending_reqs.next = pending_reqs.prev = &pending_reqs;
+ spin_lock_init(&pending_reqs.lock);
+
+ /*
+ * Init memory pools
+ */
+ route_req_cache = kmem_cache_create("ib_at_routes",
sizeof(struct route_req),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
@@ -960,7 +1004,7 @@
goto err_route;
}
- path_req_cache = kmem_cache_create("ib_at_path_reqs",
+ path_req_cache = kmem_cache_create("ib_at_paths",
sizeof(struct path_req),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
@@ -970,6 +1014,9 @@
goto err_path;
}
+ /*
+ * Init ib at worker thread and queue
+ */
ib_at_wq = create_workqueue("ib_at_wq");
if (!ib_at_wq) {
WARN("Failed to allocate IB AT wait queue.");
@@ -979,6 +1026,7 @@
INIT_WORK(&ib_at_timer, ib_at_sweep, NULL);
queue_delayed_work(ib_at_wq, &ib_at_timer, IB_AT_SWEEP_INTERVAL);
+
/*
* install device for receiving ARP packets in parallel to the normal
* Linux ARP, this will be the SDP notifier that an ARP request has
More information about the general
mailing list