[ofa-general] [PATCH v2] cma: fix access to freed memory
Eli Cohen
eli at mellanox.co.il
Tue Aug 4 06:24:08 PDT 2009
rdma_join_multicast() allocates struct cma_multicast and then proceeds to join
to a multicast address. However, the join operation completes in another
context and the allocated struct could be released if the user destroys either
the rdma_id object or decides to leave the multicast group while the join
operation is in progress. This patch uses a kref object to maintain reference
counting to avoid such situation.
Signed-off-by: Eli Cohen <eli at mellanox.co.il>
---
Changes from previous version: I removed the protection of mc list
manipulation using spinlocks becuase -
a. In order to break into different patches
b. I have doubts as for the necessity of this protection.
drivers/infiniband/core/cma.c | 20 ++++++++++++++++----
1 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 851de83..aa62101 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -157,6 +157,7 @@ struct cma_multicast {
struct list_head list;
void *context;
struct sockaddr_storage addr;
+ struct kref mcref;
};
struct cma_work {
@@ -290,6 +291,13 @@ static inline void cma_deref_dev(struct cma_device *cma_dev)
complete(&cma_dev->comp);
}
+void release_mc(struct kref *kref)
+{
+ struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
+
+ kfree(mc);
+}
+
static void cma_detach_from_dev(struct rdma_id_private *id_priv)
{
list_del(&id_priv->list);
@@ -827,7 +835,7 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
struct cma_multicast, list);
list_del(&mc->list);
ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
+ kref_put(&mc->mcref, release_mc);
}
}
@@ -2643,7 +2651,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
id_priv = mc->id_priv;
if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
- return 0;
+ goto out;
mutex_lock(&id_priv->qp_mutex);
if (!status && id_priv->id.qp)
@@ -2669,10 +2677,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
cma_exch(id_priv, CMA_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id);
- return 0;
+ goto out;
}
mutex_unlock(&id_priv->handler_mutex);
+out:
+ kref_put(&mc->mcref, release_mc);
return 0;
}
@@ -2759,11 +2769,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
memcpy(&mc->addr, addr, ip_addr_size(addr));
mc->context = context;
mc->id_priv = id_priv;
+ kref_init(&mc->mcref);
spin_lock(&id_priv->lock);
list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock);
+ kref_get(&mc->mcref);
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
ret = cma_join_ib_multicast(id_priv, mc);
@@ -2800,7 +2812,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
&mc->multicast.ib->rec.mgid,
mc->multicast.ib->rec.mlid);
ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
+ kref_put(&mc->mcref, release_mc);
return;
}
}
--
1.6.3.3
More information about the general
mailing list