[openib-general] Re: mthca_qp patch
Michael S. Tsirkin
mst at mellanox.co.il
Wed Dec 7 07:43:48 PST 2005
Several fixes in mthca:
1. Add limit checking on rd_atomic and dest_rd_atomic attributes:
especially for max_dest_rd_atomic, a value that is larger than HCA
capability can cause RDB overflow and corruption of another QP.
2. Fix typo in rd_atomic calculation: ffs(x) - 1 does not
find the next power of 2, fls(x - 1) does.
3. Only change the driver's copy of the QP attributes in modify QP
after checking the modify QP command completed successfully.
Signed-off-by: Jack Morgenstein <jackm at mellanox.co.il>
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
Index: linux-kernel/drivers/infiniband/hw/mthca/mthca_qp.c
===================================================================
--- linux-kernel.orig/drivers/infiniband/hw/mthca/mthca_qp.c
+++ linux-kernel/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -589,6 +589,20 @@ int mthca_modify_qp(struct ib_qp *ibqp,
return -EINVAL;
}
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
+ mthca_dbg(dev, "Max rdma_atomic as initiator (%u) too large. max is %d\n",
+ attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
+ mthca_dbg(dev, "Max rdma_atomic as responder(%u) too large. max is %d\n",
+ attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
+ return -EINVAL;
+ }
+
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
@@ -712,9 +726,9 @@ int mthca_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
- qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
- ffs(attr->max_rd_atomic) - 1 : 0,
- 7) << 21);
+ qp_context->params1 |=
+ cpu_to_be32(attr->max_rd_atomic ?
+ fls(attr->max_rd_atomic - 1) << 21 : 0);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
}
@@ -748,13 +762,9 @@ int mthca_modify_qp(struct ib_qp *ibqp,
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
MTHCA_QP_OPTPAR_RRE |
MTHCA_QP_OPTPAR_RAE);
-
- qp->atomic_rd_en = attr->qp_access_flags;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- u8 rra_max;
-
if (qp->resp_depth && !attr->max_dest_rd_atomic) {
/*
* Lowering our responder resources to zero.
@@ -782,16 +792,10 @@ int mthca_modify_qp(struct ib_qp *ibqp,
MTHCA_QP_OPTPAR_RAE);
}
- for (rra_max = 0;
- 1 << rra_max < attr->max_dest_rd_atomic &&
- rra_max < dev->qp_table.rdb_shift;
- ++rra_max)
- ; /* nothing */
-
- qp_context->params2 |= cpu_to_be32(rra_max << 21);
+ qp_context->params2 |=
+ cpu_to_be32(attr->max_dest_rd_atomic ?
+ fls(attr->max_dest_rd_atomic - 1) << 21 : 0);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
-
- qp->resp_depth = attr->max_dest_rd_atomic;
}
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
@@ -833,8 +837,13 @@ int mthca_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
}
- if (!err)
+ if (!err) {
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->atomic_rd_en = attr->qp_access_flags;
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->resp_depth = attr->max_dest_rd_atomic;
qp->state = new_state;
+ }
mthca_free_mailbox(dev, mailbox);
More information about the general
mailing list