[openib-general] [PATCH v2 1/2] sa_query: add generic query interfaces capable of supporting RMPP
Sean Hefty
sean.hefty at intel.com
Thu Aug 3 10:37:11 PDT 2006
The following patch adds a generic interface to send MADs to the SA.
The primary motivation of adding these calls is to expand the SA query
interface to include RMPP responses for users wanting more than a
single attribute returned from a query (e.g. multipath record queries).
The implementation of existing SA query routines was layered on top of
the generic query interface.
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
Notes from v1:
- "cursor" was renamed to "iter"
- ib_sa_iter_next() returns a pointer to the packed attribute
- The interface turned out being easier to use having a single function
call, ib_sa_iter_next(), to walk through the attributes.
Index: include/rdma/ib_sa.h
===================================================================
--- include/rdma/ib_sa.h (revision 8647)
+++ include/rdma/ib_sa.h (working copy)
@@ -254,6 +254,71 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
+struct ib_sa_iter;
+
+/**
+ * ib_sa_iter_create - Create an iterator that may be used to walk through
+ * a list of returned SA records.
+ * @mad_recv_wc: A received response from the SA.
+ *
+ * This call allocates an iterator that is used to walk through a list of
+ * SA records. Users must free the iterator by calling ib_sa_iter_free.
+ */
+struct ib_sa_iter *ib_sa_iter_create(struct ib_mad_recv_wc *mad_recv_wc);
+
+/**
+ * ib_sa_iter_free - Release an iterator.
+ * @iter: The iterator to free.
+ */
+void ib_sa_iter_free(struct ib_sa_iter *iter);
+
+/**
+ * ib_sa_iter_next - Move an iterator to reference the next attribute and
+ * return the attribute.
+ * @iter: The iterator to move.
+ *
+ * The referenced attribute will be in wire format. The funtion returns NULL
+ * if there are no more attributes to return.
+ */
+void *ib_sa_iter_next(struct ib_sa_iter *iter);
+
+/**
+ * ib_sa_send_mad - Send a MAD to the SA.
+ * @device:device to send query on
+ * @port_num: port number to send query on
+ * @method:MAD method to use in the send.
+ * @attr:Reference to attribute to send in MAD.
+ * @attr_id:Attribute type identifier.
+ * @comp_mask:component mask to send in MAD
+ * @timeout_ms:time to wait for response, if one is expected
+ * @retries:number of times to retry request
+ * @gfp_mask:GFP mask to use for internal allocations
+ * @callback:function called when query completes, times out or is
+ * canceled
+ * @context:opaque user context passed to callback
+ * @sa_query:query context, used to cancel query
+ *
+ * Send a message to the SA. If a response is expected (timeout_ms is
+ * non-zero), the callback function will be called when the query completes.
+ * Status is 0 for a successful response, -EINTR if the query
+ * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
+ * occurred sending the query. Mad_recv_wc will reference any returned
+ * response from the SA. It is the responsibility of the caller to free
+ * mad_recv_wc by call ib_free_recv_mad() if it is non-NULL.
+ *
+ * If the return value of ib_sa_send_mad() is negative, it is an
+ * error code. Otherwise it is a query ID that can be used to cancel
+ * the query.
+ */
+int ib_sa_send_mad(struct ib_device *device, u8 port_num,
+ int method, void *attr, int attr_id,
+ ib_sa_comp_mask comp_mask,
+ int timeout_ms, int retries, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context),
+ void *context, struct ib_sa_query **query);
+
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
Index: core/sa_query.c
===================================================================
--- core/sa_query.c (revision 8647)
+++ core/sa_query.c (working copy)
@@ -72,30 +72,42 @@ struct ib_sa_device {
};
struct ib_sa_query {
- void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
- void (*release)(struct ib_sa_query *);
+ void (*callback)(int, struct ib_mad_recv_wc *, void *);
struct ib_sa_port *port;
struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah;
+ void *context;
int id;
};
struct ib_sa_service_query {
void (*callback)(int, struct ib_sa_service_rec *, void *);
void *context;
- struct ib_sa_query sa_query;
+ struct ib_sa_query *sa_query;
};
struct ib_sa_path_query {
void (*callback)(int, struct ib_sa_path_rec *, void *);
void *context;
- struct ib_sa_query sa_query;
+ struct ib_sa_query *sa_query;
};
struct ib_sa_mcmember_query {
void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
void *context;
- struct ib_sa_query sa_query;
+ struct ib_sa_query *sa_query;
+};
+
+struct ib_sa_iter {
+ struct ib_mad_recv_wc *recv_wc;
+ struct ib_mad_recv_buf *recv_buf;
+ int attr_id;
+ int attr_size;
+ int attr_offset;
+ int data_offset;
+ int data_left;
+ void *attr;
+ u8 attr_data[0];
};
static void ib_sa_add_one(struct ib_device *device);
@@ -504,9 +516,17 @@ EXPORT_SYMBOL(ib_init_ah_from_mcmember);
int ib_sa_pack_attr(void *dst, void *src, int attr_id)
{
switch (attr_id) {
+ case IB_SA_ATTR_SERVICE_REC:
+ ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
+ src, dst);
+ break;
case IB_SA_ATTR_PATH_REC:
ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), src, dst);
break;
+ case IB_SA_ATTR_MC_MEMBER_REC:
+ ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
+ src, dst);
+ break;
default:
return -EINVAL;
}
@@ -517,9 +537,17 @@ EXPORT_SYMBOL(ib_sa_pack_attr);
int ib_sa_unpack_attr(void *dst, void *src, int attr_id)
{
switch (attr_id) {
+ case IB_SA_ATTR_SERVICE_REC:
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
+ src, dst);
+ break;
case IB_SA_ATTR_PATH_REC:
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), src, dst);
break;
+ case IB_SA_ATTR_MC_MEMBER_REC:
+ ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
+ src, dst);
+ break;
default:
return -EINVAL;
}
@@ -527,15 +555,20 @@ int ib_sa_unpack_attr(void *dst, void *s
}
EXPORT_SYMBOL(ib_sa_unpack_attr);
-static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
+static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent,
+ int method, void *attr, int attr_id,
+ ib_sa_comp_mask comp_mask)
{
unsigned long flags;
- memset(mad, 0, sizeof *mad);
-
mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
+ mad->mad_hdr.method = method;
+ mad->mad_hdr.attr_id = cpu_to_be16(attr_id);
+ mad->sa_hdr.comp_mask = comp_mask;
+
+ ib_sa_pack_attr(mad->data, attr, attr_id);
spin_lock_irqsave(&tid_lock, flags);
mad->mad_hdr.tid =
@@ -589,26 +622,175 @@ retry:
return ret ? ret : id;
}
-static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+/* Return size of SA attributes on the wire. */
+static int sa_mad_attr_size(int attr_id)
+{
+ int size;
+
+ switch (attr_id) {
+ case IB_SA_ATTR_SERVICE_REC:
+ size = 176;
+ break;
+ case IB_SA_ATTR_PATH_REC:
+ size = 64;
+ break;
+ case IB_SA_ATTR_MC_MEMBER_REC:
+ size = 52;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+ return size;
+}
+
+struct ib_sa_iter *ib_sa_iter_create(struct ib_mad_recv_wc *mad_recv_wc)
{
- struct ib_sa_path_query *query =
- container_of(sa_query, struct ib_sa_path_query, sa_query);
+ struct ib_sa_iter *iter;
+ struct ib_sa_mad *mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+ int attr_id, attr_size, attr_offset;
+
+ attr_id = be16_to_cpu(mad->mad_hdr.attr_id);
+ attr_offset = be16_to_cpu(mad->sa_hdr.attr_offset) * 8;
+ attr_size = sa_mad_attr_size(attr_id);
+ if (!attr_size || attr_offset < attr_size)
+ return ERR_PTR(-EINVAL);
+
+ iter = kzalloc(sizeof *iter + attr_size, GFP_KERNEL);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+
+ iter->data_left = mad_recv_wc->mad_len - IB_MGMT_SA_HDR;
+ iter->recv_wc = mad_recv_wc;
+ iter->recv_buf = &mad_recv_wc->recv_buf;
+ iter->attr_id = attr_id;
+ iter->attr_offset = attr_offset;
+ iter->attr_size = attr_size;
+ return iter;
+}
+EXPORT_SYMBOL(ib_sa_iter_create);
+
+void ib_sa_iter_free(struct ib_sa_iter *iter)
+{
+ kfree(iter);
+}
+EXPORT_SYMBOL(ib_sa_iter_free);
+
+void *ib_sa_iter_next(struct ib_sa_iter *iter)
+{
+ struct ib_sa_mad *mad;
+ int left, offset = 0;
+
+ while (iter->data_left >= iter->attr_offset) {
+ while (iter->data_offset < IB_MGMT_SA_DATA) {
+ mad = (struct ib_sa_mad *) iter->recv_buf->mad;
+
+ left = IB_MGMT_SA_DATA - iter->data_offset;
+ if (left < iter->attr_size) {
+ /* copy first piece of the attribute */
+ iter->attr = &iter->attr_data;
+ memcpy(iter->attr,
+ &mad->data[iter->data_offset], left);
+ offset = left;
+ break;
+ } else if (offset) {
+ /* copy the second piece of the attribute */
+ memcpy(iter->attr + offset, &mad->data[0],
+ iter->attr_size - offset);
+ iter->data_offset = iter->attr_size - offset;
+ offset = 0;
+ } else {
+ iter->attr = &mad->data[iter->data_offset];
+ iter->data_offset += iter->attr_size;
+ }
+
+ iter->data_left -= iter->attr_offset;
+ goto out;
+ }
+ iter->data_offset = 0;
+ iter->recv_buf = list_entry(iter->recv_buf->list.next,
+ struct ib_mad_recv_buf, list);
+ }
+ iter->attr = NULL;
+out:
+ return iter->attr;
+}
+EXPORT_SYMBOL(ib_sa_iter_next);
+
+int ib_sa_send_mad(struct ib_device *device, u8 port_num,
+ int method, void *attr, int attr_id,
+ ib_sa_comp_mask comp_mask,
+ int timeout_ms, int retries, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context),
+ void *context, struct ib_sa_query **query)
+{
+ struct ib_sa_query *sa_query;
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_port *port;
+ struct ib_mad_agent *agent;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ sa_query = kmalloc(sizeof *query, gfp_mask);
+ if (!sa_query)
+ return -ENOMEM;
+
+ sa_query->mad_buf = ib_create_send_mad(agent, 1, 0, 0, IB_MGMT_SA_HDR,
+ IB_MGMT_SA_DATA, gfp_mask);
+ if (!sa_query->mad_buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
- if (mad) {
- struct ib_sa_path_rec rec;
+ sa_query->port = port;
+ sa_query->callback = callback;
+ sa_query->context = context;
- ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
+ init_mad(sa_query->mad_buf->mad, agent, method, attr, attr_id,
+ comp_mask);
+
+ ret = send_mad(sa_query, timeout_ms, retries, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ *query = sa_query;
+ return ret;
+
+err2:
+ ib_free_send_mad(sa_query->mad_buf);
+err1:
+ kfree(query);
+ return ret;
}
-static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
+static void ib_sa_path_rec_callback(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context)
{
- kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
+ struct ib_sa_path_query *query = context;
+
+ if (query->callback) {
+ if (mad_recv_wc) {
+ struct ib_sa_mad *mad;
+ struct ib_sa_path_rec rec;
+
+ mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else
+ query->callback(status, NULL, query->context);
+ }
+ if (mad_recv_wc)
+ ib_free_recv_mad(mad_recv_wc);
+ kfree(query);
}
/**
@@ -647,83 +829,47 @@ int ib_sa_path_rec_get(struct ib_device
struct ib_sa_query **sa_query)
{
struct ib_sa_path_query *query;
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- struct ib_mad_agent *agent;
- struct ib_sa_mad *mad;
int ret;
- if (!sa_dev)
- return -ENODEV;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
- agent = port->agent;
-
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
- goto err1;
- }
-
query->callback = callback;
query->context = context;
-
- mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
-
- query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
- query->sa_query.release = ib_sa_path_rec_release;
- query->sa_query.port = port;
- mad->mad_hdr.method = IB_MGMT_METHOD_GET;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
- mad->sa_hdr.comp_mask = comp_mask;
-
- ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
-
- *sa_query = &query->sa_query;
-
- ret = send_mad(&query->sa_query, timeout_ms, retries, gfp_mask);
+ ret = ib_sa_send_mad(device, port_num, IB_MGMT_METHOD_GET, rec,
+ IB_SA_ATTR_PATH_REC, comp_mask, timeout_ms,
+ retries, gfp_mask, ib_sa_path_rec_callback,
+ query, &query->sa_query);
if (ret < 0)
- goto err2;
+ kfree(query);
return ret;
-
-err2:
- *sa_query = NULL;
- ib_free_send_mad(query->sa_query.mad_buf);
-
-err1:
- kfree(query);
- return ret;
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
-static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+static void ib_sa_service_rec_callback(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context)
{
- struct ib_sa_service_query *query =
- container_of(sa_query, struct ib_sa_service_query, sa_query);
+ struct ib_sa_service_query *query = context;
- if (mad) {
- struct ib_sa_service_rec rec;
-
- ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
-}
-
-static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
-{
- kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
+ if (query->callback) {
+ if (mad_recv_wc) {
+ struct ib_sa_mad *mad;
+ struct ib_sa_service_rec rec;
+
+ mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else
+ query->callback(status, NULL, query->context);
+ }
+ if (mad_recv_wc)
+ ib_free_recv_mad(mad_recv_wc);
+ kfree(query);
}
/**
@@ -764,89 +910,47 @@ int ib_sa_service_rec_query(struct ib_de
struct ib_sa_query **sa_query)
{
struct ib_sa_service_query *query;
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- struct ib_mad_agent *agent;
- struct ib_sa_mad *mad;
int ret;
- if (!sa_dev)
- return -ENODEV;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
- agent = port->agent;
-
- if (method != IB_MGMT_METHOD_GET &&
- method != IB_MGMT_METHOD_SET &&
- method != IB_SA_METHOD_DELETE)
- return -EINVAL;
-
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
- goto err1;
- }
-
query->callback = callback;
query->context = context;
-
- mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
-
- query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
- query->sa_query.release = ib_sa_service_rec_release;
- query->sa_query.port = port;
- mad->mad_hdr.method = method;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
- mad->sa_hdr.comp_mask = comp_mask;
-
- ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
- rec, mad->data);
-
- *sa_query = &query->sa_query;
-
- ret = send_mad(&query->sa_query, timeout_ms, retries, gfp_mask);
+ ret = ib_sa_send_mad(device, port_num, method, rec,
+ IB_SA_ATTR_SERVICE_REC, comp_mask, timeout_ms,
+ retries, gfp_mask, ib_sa_service_rec_callback,
+ query, &query->sa_query);
if (ret < 0)
- goto err2;
-
- return ret;
-
-err2:
- *sa_query = NULL;
- ib_free_send_mad(query->sa_query.mad_buf);
+ kfree(query);
-err1:
- kfree(query);
return ret;
}
EXPORT_SYMBOL(ib_sa_service_rec_query);
-static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+static void ib_sa_mcmember_rec_callback(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context)
{
- struct ib_sa_mcmember_query *query =
- container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
+ struct ib_sa_mcmember_query *query = context;
- if (mad) {
- struct ib_sa_mcmember_rec rec;
-
- ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
-}
-
-static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
-{
- kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
+ if (query->callback) {
+ if (mad_recv_wc) {
+ struct ib_sa_mad *mad;
+ struct ib_sa_mcmember_rec rec;
+
+ mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+ ib_unpack(mcmember_rec_table,
+ ARRAY_SIZE(mcmember_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else
+ query->callback(status, NULL, query->context);
+ }
+ if (mad_recv_wc)
+ ib_free_recv_mad(mad_recv_wc);
+ kfree(query);
}
int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
@@ -861,61 +965,22 @@ int ib_sa_mcmember_rec_query(struct ib_d
struct ib_sa_query **sa_query)
{
struct ib_sa_mcmember_query *query;
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- struct ib_mad_agent *agent;
- struct ib_sa_mad *mad;
int ret;
- if (!sa_dev)
- return -ENODEV;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
- agent = port->agent;
-
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
- goto err1;
- }
-
query->callback = callback;
query->context = context;
-
- mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
-
- query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
- query->sa_query.release = ib_sa_mcmember_rec_release;
- query->sa_query.port = port;
- mad->mad_hdr.method = method;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
- mad->sa_hdr.comp_mask = comp_mask;
-
- ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
- rec, mad->data);
-
- *sa_query = &query->sa_query;
-
- ret = send_mad(&query->sa_query, timeout_ms, retries, gfp_mask);
+ ret = ib_sa_send_mad(device, port_num, method, rec,
+ IB_SA_ATTR_MC_MEMBER_REC, comp_mask, timeout_ms,
+ retries, gfp_mask, ib_sa_mcmember_rec_callback,
+ query, &query->sa_query);
if (ret < 0)
- goto err2;
+ kfree(query);
return ret;
-
-err2:
- *sa_query = NULL;
- ib_free_send_mad(query->sa_query.mad_buf);
-
-err1:
- kfree(query);
- return ret;
}
EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
@@ -931,13 +996,13 @@ static void send_handler(struct ib_mad_a
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ query->callback(-ETIMEDOUT, NULL, query->context);
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ query->callback(-EINTR, NULL, query->context);
break;
default:
- query->callback(query, -EIO, NULL);
+ query->callback(-EIO, NULL, query->context);
break;
}
@@ -947,7 +1012,7 @@ static void send_handler(struct ib_mad_a
ib_free_send_mad(mad_send_wc->send_buf);
kref_put(&query->sm_ah->ref, free_sm_ah);
- query->release(query);
+ kfree(query);
}
static void recv_handler(struct ib_mad_agent *mad_agent,
@@ -959,17 +1024,11 @@ static void recv_handler(struct ib_mad_a
mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
query = mad_buf->context[0];
- if (query->callback) {
- if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
- query->callback(query,
- mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
- (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
- else
- query->callback(query, -EIO, NULL);
- }
-
- ib_free_recv_mad(mad_recv_wc);
+ if (query->callback)
+ query->callback(mad_recv_wc->recv_buf.mad->mad_hdr.status ?
+ -EINVAL : 0, mad_recv_wc, query->context);
+ else
+ ib_free_recv_mad(mad_recv_wc);
}
static void ib_sa_add_one(struct ib_device *device)
More information about the general
mailing list