[openib-general] [RFC] [PATCH 1/2] sa_query: add generic query interfaces capable of supporting RMPP
Sean Hefty
sean.hefty at intel.com
Tue Aug 1 15:17:21 PDT 2006
The following patch adds a generic interface to send MADs to the SA.
The primary motivation of adding these calls is to expand the SA query
interface to include RMPP responses for users wanting more than a
single attribute returned from a query (e.g. multipath record queries).
The design for retrieving attributes from an RMPP response was taken
from that used by the local SA cache. The implementation of existing
SA query routines was layered on top of the generic query interface.
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
---
Index: include/rdma/ib_sa.h
===================================================================
--- include/rdma/ib_sa.h (revision 8647)
+++ include/rdma/ib_sa.h (working copy)
@@ -254,6 +254,73 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
+struct ib_sa_attr_cursor;
+
+/**
+ * ib_sa_create_cursor - Create a cursor that may be used to walk through
+ * a list of returned SA records.
+ * @mad_recv_wc: A received response from the SA.
+ *
+ * This call allocates a cursor that is used to walk through a list of
+ * SA records. Users must free the cursor by calling ib_sa_free_cursor.
+ */
+struct ib_sa_attr_cursor *ib_sa_create_cursor(struct ib_mad_recv_wc *mad_recv_wc);
+
+/**
+ * ib_sa_free_cursor - Release a cursor.
+ * @cursor: The cursor to free.
+ */
+void ib_sa_free_cursor(struct ib_sa_attr_cursor *cursor);
+
+/**
+ * ib_sa_get_next_attr - Retrieve the next SA attribute referenced by a cursor.
+ * @cursor: A reference to a cursor that points to the next attribute to
+ * retrieve.
+ * @attr: Buffer to copy attribute.
+ *
+ * Returns non-zero if an attribute was returned, and copies the attribute
+ * into the provided buffer. Returns zero if all attributes have been
+ * retrieved from the cursor.
+ */
+int ib_sa_get_next_attr(struct ib_sa_attr_cursor *cursor, void *attr);
+
+/**
+ * ib_sa_send_mad - Send a MAD to the SA.
+ * @device:device to send query on
+ * @port_num: port number to send query on
+ * @method:MAD method to use in the send.
+ * @attr:Reference to attribute to send in MAD.
+ * @attr_id:Attribute type identifier.
+ * @comp_mask:component mask to send in MAD
+ * @timeout_ms:time to wait for response, if one is expected
+ * @retries:number of times to retry request
+ * @gfp_mask:GFP mask to use for internal allocations
+ * @callback:function called when query completes, times out or is
+ * canceled
+ * @context:opaque user context passed to callback
+ * @sa_query:query context, used to cancel query
+ *
+ * Send a message to the SA. If a response is expected (timeout_ms is
+ * non-zero), the callback function will be called when the query completes.
+ * Status is 0 for a successful response, -EINTR if the query
+ * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
+ * occurred sending the query. Mad_recv_wc will reference any returned
+ * response from the SA. It is the responsibility of the caller to free
+ * mad_recv_wc by call ib_free_recv_mad() if it is non-NULL.
+ *
+ * If the return value of ib_sa_send_mad() is negative, it is an
+ * error code. Otherwise it is a query ID that can be used to cancel
+ * the query.
+ */
+int ib_sa_send_mad(struct ib_device *device, u8 port_num,
+ int method, void *attr, int attr_id,
+ ib_sa_comp_mask comp_mask,
+ int timeout_ms, int retries, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context),
+ void *context, struct ib_sa_query **query);
+
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
Index: core/sa_query.c
===================================================================
--- core/sa_query.c (revision 8647)
+++ core/sa_query.c (working copy)
@@ -72,30 +72,41 @@ struct ib_sa_device {
};
struct ib_sa_query {
- void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
- void (*release)(struct ib_sa_query *);
+ void (*callback)(int, struct ib_mad_recv_wc *, void *);
struct ib_sa_port *port;
struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah;
+ void *context;
int id;
};
struct ib_sa_service_query {
void (*callback)(int, struct ib_sa_service_rec *, void *);
void *context;
- struct ib_sa_query sa_query;
+ struct ib_sa_query *sa_query;
};
struct ib_sa_path_query {
void (*callback)(int, struct ib_sa_path_rec *, void *);
void *context;
- struct ib_sa_query sa_query;
+ struct ib_sa_query *sa_query;
};
struct ib_sa_mcmember_query {
void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
void *context;
- struct ib_sa_query sa_query;
+ struct ib_sa_query *sa_query;
+};
+
+struct ib_sa_attr_cursor {
+ struct ib_mad_recv_wc *recv_wc;
+ struct ib_mad_recv_buf *recv_buf;
+ int attr_id;
+ int attr_size;
+ int attr_offset;
+ int data_offset;
+ int data_left;
+ u8 attr[0];
};
static void ib_sa_add_one(struct ib_device *device);
@@ -504,9 +515,17 @@ EXPORT_SYMBOL(ib_init_ah_from_mcmember);
int ib_sa_pack_attr(void *dst, void *src, int attr_id)
{
switch (attr_id) {
+ case IB_SA_ATTR_SERVICE_REC:
+ ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
+ src, dst);
+ break;
case IB_SA_ATTR_PATH_REC:
ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), src, dst);
break;
+ case IB_SA_ATTR_MC_MEMBER_REC:
+ ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
+ src, dst);
+ break;
default:
return -EINVAL;
}
@@ -517,9 +536,17 @@ EXPORT_SYMBOL(ib_sa_pack_attr);
int ib_sa_unpack_attr(void *dst, void *src, int attr_id)
{
switch (attr_id) {
+ case IB_SA_ATTR_SERVICE_REC:
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
+ src, dst);
+ break;
case IB_SA_ATTR_PATH_REC:
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), src, dst);
break;
+ case IB_SA_ATTR_MC_MEMBER_REC:
+ ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
+ src, dst);
+ break;
default:
return -EINVAL;
}
@@ -527,15 +554,20 @@ int ib_sa_unpack_attr(void *dst, void *s
}
EXPORT_SYMBOL(ib_sa_unpack_attr);
-static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
+static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent,
+ int method, void *attr, int attr_id,
+ ib_sa_comp_mask comp_mask)
{
unsigned long flags;
- memset(mad, 0, sizeof *mad);
-
mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
+ mad->mad_hdr.method = method;
+ mad->mad_hdr.attr_id = cpu_to_be16(attr_id);
+ mad->sa_hdr.comp_mask = comp_mask;
+
+ ib_sa_pack_attr(mad->data, attr, attr_id);
spin_lock_irqsave(&tid_lock, flags);
mad->mad_hdr.tid =
@@ -589,26 +621,175 @@ retry:
return ret ? ret : id;
}
-static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+/* Return size of SA attributes on the wire. */
+static int sa_mad_attr_size(int attr_id)
+{
+ int size;
+
+ switch (attr_id) {
+ case IB_SA_ATTR_SERVICE_REC:
+ size = 176;
+ break;
+ case IB_SA_ATTR_PATH_REC:
+ size = 64;
+ break;
+ case IB_SA_ATTR_MC_MEMBER_REC:
+ size = 52;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+ return size;
+}
+
+struct ib_sa_attr_cursor *ib_sa_create_cursor(struct ib_mad_recv_wc *mad_recv_wc)
{
- struct ib_sa_path_query *query =
- container_of(sa_query, struct ib_sa_path_query, sa_query);
+ struct ib_sa_attr_cursor *cursor;
+ struct ib_sa_mad *mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+ int attr_id, attr_size, attr_offset;
- if (mad) {
- struct ib_sa_path_rec rec;
+ attr_id = be16_to_cpu(mad->mad_hdr.attr_id);
+ attr_offset = be16_to_cpu(mad->sa_hdr.attr_offset) * 8;
+ attr_size = sa_mad_attr_size(attr_id);
+ if (!attr_size || attr_offset < attr_size)
+ return ERR_PTR(-EINVAL);
- ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
+ cursor = kzalloc(sizeof *cursor + attr_size, GFP_KERNEL);
+ if (!cursor)
+ return ERR_PTR(-ENOMEM);
+
+ cursor->data_left = mad_recv_wc->mad_len - IB_MGMT_SA_HDR;
+ cursor->recv_wc = mad_recv_wc;
+ cursor->recv_buf = &mad_recv_wc->recv_buf;
+ cursor->attr_id = attr_id;
+ cursor->attr_offset = attr_offset;
+ cursor->attr_size = attr_size;
+ return cursor;
}
+EXPORT_SYMBOL(ib_sa_create_cursor);
-static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
+void ib_sa_free_cursor(struct ib_sa_attr_cursor *cursor)
{
- kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
+ kfree(cursor);
+}
+EXPORT_SYMBOL(ib_sa_free_cursor);
+
+int ib_sa_get_next_attr(struct ib_sa_attr_cursor *cursor, void *attr)
+{
+ struct ib_sa_mad *mad;
+ void *sa_attr = NULL;
+ int left, offset = 0;
+
+ while (cursor->data_left >= cursor->attr_offset) {
+ while (cursor->data_offset < IB_MGMT_SA_DATA) {
+ mad = (struct ib_sa_mad *) cursor->recv_buf->mad;
+
+ left = IB_MGMT_SA_DATA - cursor->data_offset;
+ if (left < cursor->attr_size) {
+ /* copy first piece of the attribute */
+ sa_attr = &cursor->attr;
+ memcpy(sa_attr, &mad->data[cursor->data_offset],
+ left);
+ offset = left;
+ break;
+ } else if (offset) {
+ /* copy the second piece of the attribute */
+ memcpy(sa_attr + offset, &mad->data[0],
+ cursor->attr_size - offset);
+ cursor->data_offset = cursor->attr_size - offset;
+ offset = 0;
+ } else {
+ sa_attr = &mad->data[cursor->data_offset];
+ cursor->data_offset += cursor->attr_size;
+ }
+
+ cursor->data_left -= cursor->attr_offset;
+ return !ib_sa_unpack_attr(attr, sa_attr,
+ cursor->attr_id);
+ }
+ cursor->data_offset = 0;
+ cursor->recv_buf = list_entry(cursor->recv_buf->list.next,
+ struct ib_mad_recv_buf, list);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ib_sa_get_next_attr);
+
+int ib_sa_send_mad(struct ib_device *device, u8 port_num,
+ int method, void *attr, int attr_id,
+ ib_sa_comp_mask comp_mask,
+ int timeout_ms, int retries, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context),
+ void *context, struct ib_sa_query **query)
+{
+ struct ib_sa_query *sa_query;
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_port *port;
+ struct ib_mad_agent *agent;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ sa_query = kmalloc(sizeof *query, gfp_mask);
+ if (!sa_query)
+ return -ENOMEM;
+
+ sa_query->mad_buf = ib_create_send_mad(agent, 1, 0, 0, IB_MGMT_SA_HDR,
+ IB_MGMT_SA_DATA, gfp_mask);
+ if (!sa_query->mad_buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ sa_query->port = port;
+ sa_query->callback = callback;
+ sa_query->context = context;
+
+ init_mad(sa_query->mad_buf->mad, agent, method, attr, attr_id,
+ comp_mask);
+
+ ret = send_mad(sa_query, timeout_ms, retries, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ *query = sa_query;
+ return ret;
+
+err2:
+ ib_free_send_mad(sa_query->mad_buf);
+err1:
+ kfree(query);
+ return ret;
+}
+
+static void ib_sa_path_rec_callback(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context)
+{
+ struct ib_sa_path_query *query = context;
+
+ if (query->callback) {
+ if (mad_recv_wc) {
+ struct ib_sa_mad *mad;
+ struct ib_sa_path_rec rec;
+
+ mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else
+ query->callback(status, NULL, query->context);
+ }
+ if (mad_recv_wc)
+ ib_free_recv_mad(mad_recv_wc);
+ kfree(query);
}
/**
@@ -647,83 +828,47 @@ int ib_sa_path_rec_get(struct ib_device
struct ib_sa_query **sa_query)
{
struct ib_sa_path_query *query;
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- struct ib_mad_agent *agent;
- struct ib_sa_mad *mad;
int ret;
- if (!sa_dev)
- return -ENODEV;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
- agent = port->agent;
-
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
- goto err1;
- }
-
query->callback = callback;
query->context = context;
-
- mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
-
- query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
- query->sa_query.release = ib_sa_path_rec_release;
- query->sa_query.port = port;
- mad->mad_hdr.method = IB_MGMT_METHOD_GET;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
- mad->sa_hdr.comp_mask = comp_mask;
-
- ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
-
- *sa_query = &query->sa_query;
-
- ret = send_mad(&query->sa_query, timeout_ms, retries, gfp_mask);
+ ret = ib_sa_send_mad(device, port_num, IB_MGMT_METHOD_GET, rec,
+ IB_SA_ATTR_PATH_REC, comp_mask, timeout_ms,
+ retries, gfp_mask, ib_sa_path_rec_callback,
+ query, &query->sa_query);
if (ret < 0)
- goto err2;
+ kfree(query);
return ret;
-
-err2:
- *sa_query = NULL;
- ib_free_send_mad(query->sa_query.mad_buf);
-
-err1:
- kfree(query);
- return ret;
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
-static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+static void ib_sa_service_rec_callback(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context)
{
- struct ib_sa_service_query *query =
- container_of(sa_query, struct ib_sa_service_query, sa_query);
-
- if (mad) {
- struct ib_sa_service_rec rec;
+ struct ib_sa_service_query *query = context;
- ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
-}
-
-static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
-{
- kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
+ if (query->callback) {
+ if (mad_recv_wc) {
+ struct ib_sa_mad *mad;
+ struct ib_sa_service_rec rec;
+
+ mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else
+ query->callback(status, NULL, query->context);
+ }
+ if (mad_recv_wc)
+ ib_free_recv_mad(mad_recv_wc);
+ kfree(query);
}
/**
@@ -764,89 +909,47 @@ int ib_sa_service_rec_query(struct ib_de
struct ib_sa_query **sa_query)
{
struct ib_sa_service_query *query;
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- struct ib_mad_agent *agent;
- struct ib_sa_mad *mad;
int ret;
- if (!sa_dev)
- return -ENODEV;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
- agent = port->agent;
-
- if (method != IB_MGMT_METHOD_GET &&
- method != IB_MGMT_METHOD_SET &&
- method != IB_SA_METHOD_DELETE)
- return -EINVAL;
-
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
- goto err1;
- }
-
query->callback = callback;
query->context = context;
-
- mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
-
- query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
- query->sa_query.release = ib_sa_service_rec_release;
- query->sa_query.port = port;
- mad->mad_hdr.method = method;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
- mad->sa_hdr.comp_mask = comp_mask;
-
- ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
- rec, mad->data);
-
- *sa_query = &query->sa_query;
-
- ret = send_mad(&query->sa_query, timeout_ms, retries, gfp_mask);
+ ret = ib_sa_send_mad(device, port_num, method, rec,
+ IB_SA_ATTR_SERVICE_REC, comp_mask, timeout_ms,
+ retries, gfp_mask, ib_sa_service_rec_callback,
+ query, &query->sa_query);
if (ret < 0)
- goto err2;
+ kfree(query);
return ret;
-
-err2:
- *sa_query = NULL;
- ib_free_send_mad(query->sa_query.mad_buf);
-
-err1:
- kfree(query);
- return ret;
}
EXPORT_SYMBOL(ib_sa_service_rec_query);
-static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
- int status,
- struct ib_sa_mad *mad)
+static void ib_sa_mcmember_rec_callback(int status,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ void *context)
{
- struct ib_sa_mcmember_query *query =
- container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
+ struct ib_sa_mcmember_query *query = context;
- if (mad) {
- struct ib_sa_mcmember_rec rec;
-
- ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
- mad->data, &rec);
- query->callback(status, &rec, query->context);
- } else
- query->callback(status, NULL, query->context);
-}
-
-static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
-{
- kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
+ if (query->callback) {
+ if (mad_recv_wc) {
+ struct ib_sa_mad *mad;
+ struct ib_sa_mcmember_rec rec;
+
+ mad = (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad;
+ ib_unpack(mcmember_rec_table,
+ ARRAY_SIZE(mcmember_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else
+ query->callback(status, NULL, query->context);
+ }
+ if (mad_recv_wc)
+ ib_free_recv_mad(mad_recv_wc);
+ kfree(query);
}
int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
@@ -861,61 +964,22 @@ int ib_sa_mcmember_rec_query(struct ib_d
struct ib_sa_query **sa_query)
{
struct ib_sa_mcmember_query *query;
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
- struct ib_sa_port *port;
- struct ib_mad_agent *agent;
- struct ib_sa_mad *mad;
int ret;
- if (!sa_dev)
- return -ENODEV;
-
- port = &sa_dev->port[port_num - sa_dev->start_port];
- agent = port->agent;
-
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
- goto err1;
- }
-
query->callback = callback;
query->context = context;
-
- mad = query->sa_query.mad_buf->mad;
- init_mad(mad, agent);
-
- query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
- query->sa_query.release = ib_sa_mcmember_rec_release;
- query->sa_query.port = port;
- mad->mad_hdr.method = method;
- mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
- mad->sa_hdr.comp_mask = comp_mask;
-
- ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
- rec, mad->data);
-
- *sa_query = &query->sa_query;
-
- ret = send_mad(&query->sa_query, timeout_ms, retries, gfp_mask);
+ ret = ib_sa_send_mad(device, port_num, method, rec,
+ IB_SA_ATTR_MC_MEMBER_REC, comp_mask, timeout_ms,
+ retries, gfp_mask, ib_sa_mcmember_rec_callback,
+ query, &query->sa_query);
if (ret < 0)
- goto err2;
+ kfree(query);
return ret;
-
-err2:
- *sa_query = NULL;
- ib_free_send_mad(query->sa_query.mad_buf);
-
-err1:
- kfree(query);
- return ret;
}
EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
@@ -931,13 +995,13 @@ static void send_handler(struct ib_mad_a
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ query->callback(-ETIMEDOUT, NULL, query->context);
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ query->callback(-EINTR, NULL, query->context);
break;
default:
- query->callback(query, -EIO, NULL);
+ query->callback(-EIO, NULL, query->context);
break;
}
@@ -947,7 +1011,7 @@ static void send_handler(struct ib_mad_a
ib_free_send_mad(mad_send_wc->send_buf);
kref_put(&query->sm_ah->ref, free_sm_ah);
- query->release(query);
+ kfree(query);
}
static void recv_handler(struct ib_mad_agent *mad_agent,
@@ -959,17 +1023,11 @@ static void recv_handler(struct ib_mad_a
mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
query = mad_buf->context[0];
- if (query->callback) {
- if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
- query->callback(query,
- mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
- (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
- else
- query->callback(query, -EIO, NULL);
- }
-
- ib_free_recv_mad(mad_recv_wc);
+ if (query->callback)
+ query->callback(mad_recv_wc->recv_buf.mad->mad_hdr.status ?
+ -EINVAL : 0, mad_recv_wc, query->context);
+ else
+ ib_free_recv_mad(mad_recv_wc);
}
static void ib_sa_add_one(struct ib_device *device)
More information about the general
mailing list