[ofa-general] [PATCH 2/2] IB/ipath - fix QP use after free bug
Arthur Jones
arthur.jones at qlogic.com
Tue Jan 15 15:58:18 PST 2008
From: Ralph Campbell <ralph.campbell at qlogic.com>
When calling ipath_destory_qp() while send WQEs are queued, it is possible
for the ipath driver to schedule the send tasklet after tasklet_kill()
which leads to the QP structure being used after it is freed.
Signed-off-by: Ralph Campbell <ralph.campbell at qlogic.com>
---
drivers/infiniband/hw/ipath/ipath_qp.c | 3 ++-
drivers/infiniband/hw/ipath/ipath_rc.c | 34 +++++++++++++++--------------
drivers/infiniband/hw/ipath/ipath_verbs.c | 6 +++--
drivers/infiniband/hw/ipath/ipath_verbs.h | 7 ++++++
4 files changed, 30 insertions(+), 20 deletions(-)
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index b997ff8..fbeec63 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -942,6 +942,7 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
spin_lock(&dev->n_qps_lock);
dev->n_qps_allocated--;
spin_unlock(&dev->n_qps_lock);
+ set_bit(IPATH_S_DESTROYING, &qp->s_busy);
/* Stop the sending tasklet. */
tasklet_kill(&qp->s_task);
@@ -1077,5 +1078,5 @@ void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
(qp->s_lsn == (u32) -1 ||
ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
qp->s_lsn + 1) <= 0))
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 120a61b..9215dad 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -652,8 +652,8 @@ queue_ack:
qp->s_ack_psn = qp->r_ack_psn;
spin_unlock_irqrestore(&qp->s_lock, flags);
- /* Call ipath_do_rc_send() in another thread. */
- tasklet_hi_schedule(&qp->s_task);
+ /* Schedule the send tasklet. */
+ ipath_schedule_send(qp);
done:
return;
@@ -713,7 +713,7 @@ static void reset_psn(struct ipath_qp *qp, u32 psn)
/*
* Set the state to restart in the middle of a request.
* Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See ipath_do_rc_send().
+ * See ipath_make_rc_req().
*/
switch (opcode) {
case IB_WR_SEND:
@@ -790,7 +790,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
reset_psn(qp, psn);
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
bail:
return;
@@ -798,11 +798,13 @@ bail:
static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
{
- if (qp->s_wait_credit) {
- qp->s_wait_credit = 0;
- tasklet_hi_schedule(&qp->s_task);
+ if (qp->s_last_psn != psn) {
+ qp->s_last_psn = psn;
+ if (qp->s_wait_credit) {
+ qp->s_wait_credit = 0;
+ ipath_schedule_send(qp);
+ }
}
- qp->s_last_psn = psn;
}
/**
@@ -904,10 +906,10 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
if ((qp->s_flags & IPATH_S_FENCE_PENDING) &&
!qp->s_num_rd_atomic) {
qp->s_flags &= ~IPATH_S_FENCE_PENDING;
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
} else if (qp->s_flags & IPATH_S_RDMAR_PENDING) {
qp->s_flags &= ~IPATH_S_RDMAR_PENDING;
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
}
}
/* Post a send completion queue entry if requested. */
@@ -970,7 +972,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
*/
if (ipath_cmp24(qp->s_psn, psn) <= 0) {
reset_psn(qp, psn + 1);
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
}
} else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
qp->s_state = OP(SEND_LAST);
@@ -1484,7 +1486,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
break;
}
qp->r_nak_state = 0;
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
unlock_done:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1847,8 +1849,8 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
barrier();
qp->r_head_ack_queue = next;
- /* Call ipath_do_rc_send() in another thread. */
- tasklet_hi_schedule(&qp->s_task);
+ /* Schedule the send tasklet. */
+ ipath_schedule_send(qp);
goto done;
}
@@ -1907,8 +1909,8 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
barrier();
qp->r_head_ack_queue = next;
- /* Call ipath_do_rc_send() in another thread. */
- tasklet_hi_schedule(&qp->s_task);
+ /* Schedule the send tasklet. */
+ ipath_schedule_send(qp);
goto done;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index c4c9984..dc779e0 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -616,7 +616,7 @@ static void ipath_ib_timer(struct ipath_ibdev *dev)
if (--qp->s_rnr_timeout == 0) {
do {
list_del_init(&qp->timerwait);
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
if (list_empty(last))
break;
qp = list_entry(last->next, struct ipath_qp,
@@ -1060,7 +1060,7 @@ bail:
* This is called from ipath_intr() at interrupt level when a PIO buffer is
* available after ipath_verbs_send() returned an error that no buffers were
* available. Return 1 if we consumed all the PIO buffers and we still have
- * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
+ * QPs waiting for buffers (for now, just restart the send tasklet and
* return zero).
*/
int ipath_ib_piobufavail(struct ipath_ibdev *dev)
@@ -1077,7 +1077,7 @@ int ipath_ib_piobufavail(struct ipath_ibdev *dev)
piowait);
list_del_init(&qp->piowait);
clear_bit(IPATH_S_BUSY, &qp->s_busy);
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 6ccb54f..ebaf6f4 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -422,6 +422,7 @@ struct ipath_qp {
/* Bit definition for s_busy. */
#define IPATH_S_BUSY 0
+#define IPATH_S_DESTROYING 1
/*
* Bit definitions for s_flags.
@@ -635,6 +636,12 @@ static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
return container_of(ibdev, struct ipath_ibdev, ibdev);
}
+static inline void ipath_schedule_send(struct ipath_qp *qp)
+{
+ if (!test_bit(IPATH_S_DESTROYING, &qp->s_busy))
+ tasklet_hi_schedule(&qp->s_task);
+}
+
int ipath_process_mad(struct ib_device *ibdev,
int mad_flags,
u8 port_num,
More information about the general
mailing list