[openib-general] [PATCH] [SA Query] Change sa_query MAD allocation
Sean Hefty
sean.hefty at intel.com
Thu Oct 13 16:25:57 PDT 2005
This patch changes sa_query to allocate MADs using the ib_create_send_mad()
routine.
The intent behind this change was to eventually change ib_post_send_mad() to
take an ib_send_mad_buf as input, but see the "DMA mapping abuses in MAD layer"
thread. We may want to go with an alternate solution. However, I'm posting
the patch since it's usable even without changes to ib_post_send_mad().
Signed-off-by: Sean Hefty <sean.hefty at intel.com>
Index: sa_query.c
===================================================================
--- sa_query.c (revision 3692)
+++ sa_query.c (working copy)
@@ -74,9 +74,8 @@ struct ib_sa_query {
void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
void (*release)(struct ib_sa_query *);
struct ib_sa_port *port;
- struct ib_sa_mad *mad;
+ struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah;
- DECLARE_PCI_UNMAP_ADDR(mapping)
int id;
};
@@ -426,6 +425,7 @@ void ib_sa_cancel_query(int id, struct i
{
unsigned long flags;
struct ib_mad_agent *agent;
+ u64 wr_id;
spin_lock_irqsave(&idr_lock, flags);
if (idr_find(&query_idr, id) != query) {
@@ -433,9 +433,10 @@ void ib_sa_cancel_query(int id, struct i
return;
}
agent = query->port->agent;
+ wr_id = (unsigned long) query->mad_buf;
spin_unlock_irqrestore(&idr_lock, flags);
- ib_cancel_mad(agent, id);
+ ib_cancel_mad(agent, wr_id);
}
EXPORT_SYMBOL(ib_sa_cancel_query);
@@ -455,73 +456,51 @@ static void init_mad(struct ib_sa_mad *m
spin_unlock_irqrestore(&tid_lock, flags);
}
+static void acquire_ah(struct ib_sa_port *port, struct ib_sa_query *query)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->ah_lock, flags);
+ kref_get(&port->sm_ah->ref);
+ query->sm_ah = port->sm_ah;
+ spin_unlock_irqrestore(&port->ah_lock, flags);
+}
+
static int send_mad(struct ib_sa_query *query, int timeout_ms)
{
struct ib_sa_port *port = query->port;
+ struct ib_send_wr *bad_wr;
unsigned long flags;
- int ret;
- struct ib_sge gather_list;
- struct ib_send_wr *bad_wr, wr = {
- .opcode = IB_WR_SEND,
- .sg_list = &gather_list,
- .num_sge = 1,
- .send_flags = IB_SEND_SIGNALED,
- .wr = {
- .ud = {
- .mad_hdr = &query->mad->mad_hdr,
- .remote_qpn = 1,
- .remote_qkey = IB_QP1_QKEY,
- .timeout_ms = timeout_ms,
- }
- }
- };
+ int ret, id;
retry:
if (!idr_pre_get(&query_idr, GFP_ATOMIC))
return -ENOMEM;
spin_lock_irqsave(&idr_lock, flags);
- ret = idr_get_new(&query_idr, query, &query->id);
+ ret = idr_get_new(&query_idr, query, &id);
spin_unlock_irqrestore(&idr_lock, flags);
if (ret == -EAGAIN)
goto retry;
if (ret)
return ret;
- wr.wr_id = query->id;
-
- spin_lock_irqsave(&port->ah_lock, flags);
- kref_get(&port->sm_ah->ref);
- query->sm_ah = port->sm_ah;
- wr.wr.ud.ah = port->sm_ah->ah;
- spin_unlock_irqrestore(&port->ah_lock, flags);
-
- gather_list.addr = dma_map_single(port->agent->device->dma_device,
- query->mad,
- sizeof (struct ib_sa_mad),
- DMA_TO_DEVICE);
- gather_list.length = sizeof (struct ib_sa_mad);
- gather_list.lkey = port->agent->mr->lkey;
- pci_unmap_addr_set(query, mapping, gather_list.addr);
+ query->mad_buf->send_wr.wr.ud.timeout_ms = timeout_ms;
+ query->mad_buf->context[0] = query;
+ query->id = id;
- ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
+ ret = ib_post_send_mad(port->agent, &query->mad_buf->send_wr, &bad_wr);
if (ret) {
- dma_unmap_single(port->agent->device->dma_device,
- pci_unmap_addr(query, mapping),
- sizeof (struct ib_sa_mad),
- DMA_TO_DEVICE);
- kref_put(&query->sm_ah->ref, free_sm_ah);
spin_lock_irqsave(&idr_lock, flags);
- idr_remove(&query_idr, query->id);
+ idr_remove(&query_idr, id);
spin_unlock_irqrestore(&idr_lock, flags);
}
/*
* It's not safe to dereference query any more, because the
* send may already have completed and freed the query in
- * another context. So use wr.wr_id, which has a copy of the
- * query's id.
+ * another context.
*/
- return ret ? ret : wr.wr_id;
+ return ret ? ret : id;
}
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
@@ -543,7 +522,6 @@ static void ib_sa_path_rec_callback(stru
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
- kfree(sa_query->mad);
kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
}
@@ -585,42 +563,53 @@ int ib_sa_path_rec_get(struct ib_device
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
struct ib_mad_agent *agent = port->agent;
+ struct ib_sa_mad *mad;
int ret;
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
- if (!query->sa_query.mad) {
- kfree(query);
- return -ENOMEM;
+
+ acquire_ah(port, &query->sa_query);
+ query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
+ query->sa_query.sm_ah->ah,
+ 0, IB_MGMT_MAD_DATA -
+ IB_MGMT_SA_DATA,
+ IB_MGMT_SA_DATA, gfp_mask);
+ if (!query->sa_query.mad_buf) {
+ ret = -ENOMEM;
+ goto err1;
}
query->callback = callback;
query->context = context;
- init_mad(query->sa_query.mad, agent);
+ mad = (struct ib_sa_mad *) query->sa_query.mad_buf->mad;
+ init_mad(mad, agent);
- query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
- query->sa_query.release = ib_sa_path_rec_release;
- query->sa_query.port = port;
- query->sa_query.mad->mad_hdr.method = IB_MGMT_METHOD_GET;
- query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
- query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
+ query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
+ query->sa_query.release = ib_sa_path_rec_release;
+ query->sa_query.port = port;
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
- ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
- rec, query->sa_query.mad->data);
+ ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
*sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms);
- if (ret < 0) {
- *sa_query = NULL;
- kfree(query->sa_query.mad);
- kfree(query);
- }
+ if (ret < 0)
+ goto err2;
return ret;
+err2:
+ *sa_query = NULL;
+ ib_free_send_mad(query->sa_query.mad_buf);
+err1:
+ kref_put(&query->sa_query.sm_ah->ref, free_sm_ah);
+ kfree(query);
+ return ret;
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
@@ -643,7 +632,6 @@ static void ib_sa_service_rec_callback(s
static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
{
- kfree(sa_query->mad);
kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
}
@@ -687,6 +675,7 @@ int ib_sa_service_rec_query(struct ib_de
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
struct ib_mad_agent *agent = port->agent;
+ struct ib_sa_mad *mad;
int ret;
if (method != IB_MGMT_METHOD_GET &&
@@ -697,38 +686,48 @@ int ib_sa_service_rec_query(struct ib_de
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
- if (!query->sa_query.mad) {
- kfree(query);
- return -ENOMEM;
+
+ acquire_ah(port, &query->sa_query);
+ query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
+ query->sa_query.sm_ah->ah,
+ 0, IB_MGMT_MAD_DATA -
+ IB_MGMT_SA_DATA,
+ IB_MGMT_SA_DATA, gfp_mask);
+ if (!query->sa_query.mad_buf) {
+ ret = -ENOMEM;
+ goto err1;
}
query->callback = callback;
query->context = context;
- init_mad(query->sa_query.mad, agent);
+ mad = (struct ib_sa_mad *) query->sa_query.mad_buf->mad;
+ init_mad(mad, agent);
- query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
- query->sa_query.release = ib_sa_service_rec_release;
- query->sa_query.port = port;
- query->sa_query.mad->mad_hdr.method = method;
- query->sa_query.mad->mad_hdr.attr_id =
- cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
- query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
+ query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
+ query->sa_query.release = ib_sa_service_rec_release;
+ query->sa_query.port = port;
+ mad->mad_hdr.method = method;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
- rec, query->sa_query.mad->data);
+ rec, mad->data);
*sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms);
- if (ret < 0) {
- *sa_query = NULL;
- kfree(query->sa_query.mad);
- kfree(query);
- }
+ if (ret < 0)
+ goto err2;
return ret;
+err2:
+ *sa_query = NULL;
+ ib_free_send_mad(query->sa_query.mad_buf);
+err1:
+ kref_put(&query->sa_query.sm_ah->ref, free_sm_ah);
+ kfree(query);
+ return ret;
}
EXPORT_SYMBOL(ib_sa_service_rec_query);
@@ -751,7 +750,6 @@ static void ib_sa_mcmember_rec_callback(
static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
{
- kfree(sa_query->mad);
kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
}
@@ -770,42 +768,54 @@ int ib_sa_mcmember_rec_query(struct ib_d
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
struct ib_mad_agent *agent = port->agent;
+ struct ib_sa_mad *mad;
int ret;
query = kmalloc(sizeof *query, gfp_mask);
if (!query)
return -ENOMEM;
- query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
- if (!query->sa_query.mad) {
- kfree(query);
- return -ENOMEM;
+
+ acquire_ah(port, &query->sa_query);
+ query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
+ query->sa_query.sm_ah->ah,
+ 0, IB_MGMT_MAD_DATA -
+ IB_MGMT_SA_DATA,
+ IB_MGMT_SA_DATA, gfp_mask);
+ if (!query->sa_query.mad_buf) {
+ ret = -ENOMEM;
+ goto err1;
}
query->callback = callback;
query->context = context;
- init_mad(query->sa_query.mad, agent);
+ mad = (struct ib_sa_mad *) query->sa_query.mad_buf->mad;
+ init_mad(mad, agent);
- query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
- query->sa_query.release = ib_sa_mcmember_rec_release;
- query->sa_query.port = port;
- query->sa_query.mad->mad_hdr.method = method;
- query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
- query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
+ query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
+ query->sa_query.release = ib_sa_mcmember_rec_release;
+ query->sa_query.port = port;
+ mad->mad_hdr.method = method;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
- rec, query->sa_query.mad->data);
+ rec, mad->data);
*sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms);
- if (ret < 0) {
- *sa_query = NULL;
- kfree(query->sa_query.mad);
- kfree(query);
- }
+ if (ret < 0)
+ goto err2;
return ret;
+err2:
+ *sa_query = NULL;
+ ib_free_send_mad(query->sa_query.mad_buf);
+err1:
+ kref_put(&query->sa_query.sm_ah->ref, free_sm_ah);
+ kfree(query);
+ return ret;
}
EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
@@ -813,14 +823,11 @@ static void send_handler(struct ib_mad_a
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_sa_query *query;
+ struct ib_mad_send_buf *mad_buf;
unsigned long flags;
- spin_lock_irqsave(&idr_lock, flags);
- query = idr_find(&query_idr, mad_send_wc->wr_id);
- spin_unlock_irqrestore(&idr_lock, flags);
-
- if (!query)
- return;
+ mad_buf = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
+ query = mad_buf->context[0];
if (query->callback)
switch (mad_send_wc->status) {
@@ -838,30 +845,25 @@ static void send_handler(struct ib_mad_a
break;
}
- dma_unmap_single(agent->device->dma_device,
- pci_unmap_addr(query, mapping),
- sizeof (struct ib_sa_mad),
- DMA_TO_DEVICE);
- kref_put(&query->sm_ah->ref, free_sm_ah);
-
- query->release(query);
-
spin_lock_irqsave(&idr_lock, flags);
- idr_remove(&query_idr, mad_send_wc->wr_id);
+ idr_remove(&query_idr, query->id);
spin_unlock_irqrestore(&idr_lock, flags);
+
+ ib_free_send_mad(query->mad_buf);
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+ query->release(query);
}
static void recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_sa_query *query;
- unsigned long flags;
+ struct ib_mad_send_buf *mad_buf;
- spin_lock_irqsave(&idr_lock, flags);
- query = idr_find(&query_idr, mad_recv_wc->wc->wr_id);
- spin_unlock_irqrestore(&idr_lock, flags);
+ mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
+ query = mad_buf->context[0];
- if (query && query->callback) {
+ if (query->callback) {
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
More information about the general
mailing list