[openib-general] [PATCH] mthca: query_qp and query_srq
Eli Cohen
eli at mellanox.co.il
Sun Feb 12 07:41:43 PST 2006
Signed-off-by: Eli Cohen <eli at mellanox.co.il>
Signed-off-by: Michael S. Tsirkin <mst at mellanox.co.il>
implement ib_query_qp() and ib_query_srq() for mthca.
Index: openib_gen2/drivers/infiniband/hw/mthca/mthca_cmd.h
===================================================================
--- openib_gen2.orig/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ openib_gen2/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -303,6 +303,8 @@ int mthca_RESIZE_CQ(struct mthca_dev *de
u8 *status);
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox
*mailbox,
int srq_num, u8 *status);
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+ struct mthca_mailbox *mailbox, u8 *status);
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox
*mailbox,
int srq_num, u8 *status);
int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8
*status);
Index: openib_gen2/drivers/infiniband/core/verbs.c
===================================================================
--- openib_gen2.orig/drivers/infiniband/core/verbs.c
+++ openib_gen2/drivers/infiniband/core/verbs.c
@@ -257,9 +257,18 @@ int ib_query_qp(struct ib_qp *qp,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
- return qp->device->query_qp ?
+ int err;
+
+ err = qp->device->query_qp ?
qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
-ENOSYS;
+ if (err)
+ return err;
+ qp_init_attr->recv_cq = qp->recv_cq;
+ qp_init_attr->send_cq = qp->send_cq;
+ qp_init_attr->srq = qp->srq;
+
+ return err;
}
EXPORT_SYMBOL(ib_query_qp);
Index: openib_gen2/drivers/infiniband/hw/mthca/mthca_srq.c
===================================================================
--- openib_gen2.orig/drivers/infiniband/hw/mthca/mthca_srq.c
+++ openib_gen2/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -357,6 +357,40 @@ int mthca_modify_srq(struct ib_srq *ibsr
return 0;
}
+
+int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct mthca_dev *dev = to_mdev(ibsrq->device);
+ struct mthca_srq *srq = to_msrq(ibsrq);
+ struct mthca_mailbox *mailbox;
+ struct mthca_arbel_srq_context *arbel_ctx;
+ u8 status;
+ int err;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
+ if (err)
+ goto exit;
+ if (mthca_is_memfree(dev)) {
+ arbel_ctx = mailbox->buf;
+ srq_attr->srq_limit = arbel_ctx->limit_watermark;
+ }
+ else
+ srq_attr->srq_limit = 0;
+
+ srq_attr->pd = ibsrq->pd;
+ srq_attr->max_wr = srq->max;
+ srq_attr->max_sge = srq->max_gs;
+
+exit:
+ mthca_free_mailbox(dev, mailbox);
+
+ return err;
+}
+
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type)
{
Index: openib_gen2/drivers/infiniband/include/rdma/ib_verbs.h
===================================================================
--- openib_gen2.orig/drivers/infiniband/include/rdma/ib_verbs.h
+++ openib_gen2/drivers/infiniband/include/rdma/ib_verbs.h
@@ -423,9 +423,10 @@ enum ib_srq_attr_mask {
};
struct ib_srq_attr {
- u32 max_wr;
- u32 max_sge;
- u32 srq_limit;
+ u32 max_wr;
+ u32 max_sge;
+ u32 srq_limit;
+ struct ib_pd *pd;
};
struct ib_srq_init_attr {
Index: openib_gen2/drivers/infiniband/hw/mthca/mthca_provider.c
===================================================================
--- openib_gen2.orig/drivers/infiniband/hw/mthca/mthca_provider.c
+++ openib_gen2/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1270,7 +1270,10 @@ int mthca_register_device(struct mthca_d
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ);
+
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.dma_device = &dev->pdev->dev;
@@ -1291,7 +1294,8 @@ int mthca_register_device(struct mthca_d
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
dev->ib_dev.create_srq = mthca_create_srq;
- dev->ib_dev.modify_srq = mthca_modify_srq;
+ dev->ib_dev.modify_srq = mthca_modify_srq;
+ dev->ib_dev.query_srq = mthca_query_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
if (mthca_is_memfree(dev))
@@ -1302,6 +1306,7 @@ int mthca_register_device(struct mthca_d
dev->ib_dev.create_qp = mthca_create_qp;
dev->ib_dev.modify_qp = mthca_modify_qp;
+ dev->ib_dev.query_qp = mthca_query_qp;
dev->ib_dev.destroy_qp = mthca_destroy_qp;
dev->ib_dev.create_cq = mthca_create_cq;
dev->ib_dev.resize_cq = mthca_resize_cq;
Index: openib_gen2/drivers/infiniband/hw/mthca/mthca_qp.c
===================================================================
--- openib_gen2.orig/drivers/infiniband/hw/mthca/mthca_qp.c
+++ openib_gen2/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -270,6 +270,32 @@ static int to_mthca_state(enum ib_qp_sta
}
}
+
+static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
+{
+ switch (mthca_state) {
+ case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
+ case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
+ case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
+ case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
+ case MTHCA_QP_STATE_DRAINING:
+ case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
+ case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
+ case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
+{
+ switch (mthca_mig_state) {
+ case 0: return IB_MIG_ARMED;
+ case 1: return IB_MIG_REARM;
+ case 3: return IB_MIG_MIGRATED;
+ default: BUG();
+ }
+}
+
enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
static int to_mthca_st(int transport)
@@ -553,6 +579,44 @@ static __be32 get_hw_access_flags(struct
return cpu_to_be32(hw_access_flags);
}
+static int to_ib_qp_access_flags(int mthca_flags)
+{
+ int ib_flags = 0;
+
+ if (mthca_flags & MTHCA_QP_BIT_RRE)
+ ib_flags |= IB_ACCESS_REMOTE_READ;
+
+ if (mthca_flags & MTHCA_QP_BIT_RWE)
+ ib_flags |= IB_ACCESS_REMOTE_WRITE;
+
+ if (mthca_flags & MTHCA_QP_BIT_RAE)
+ ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return ib_flags;
+}
+
+static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr
*ib_ah_attr,
+ struct mthca_qp_path *path)
+{
+ memset(ib_ah_attr, 0, sizeof *path);
+ ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+ ib_ah_attr->dlid = be16_to_cpu(path->rlid);
+ ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
+ ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
+ ib_ah_attr->static_rate = path->static_rate & 0x7;
+ ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
+ if (ib_ah_attr->ah_flags) {
+ ib_ah_attr->grh.sgid_index = path->mgid_index &
(dev->limits.gid_table_len - 1);
+ ib_ah_attr->grh.hop_limit = path->hop_limit;
+ ib_ah_attr->grh.traffic_class =
+ (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
+ ib_ah_attr->grh.flow_label =
+ be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
+ memcpy(ib_ah_attr->grh.dgid.raw,
+ path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+ }
+}
+
static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path
*path)
{
path->g_mylmc = ah->src_path_bits & 0x7f;
@@ -914,6 +978,79 @@ int mthca_modify_qp(struct ib_qp *ibqp,
return err;
}
+
+
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int
qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mthca_dev *dev = to_mdev(ibqp->device);
+ struct mthca_qp *qp = to_mqp(ibqp);
+ int err;
+ struct mthca_mailbox *mailbox;
+ struct mthca_qp_param *qp_param;
+ struct mthca_qp_context *context;
+ int mthca_state;
+ u8 status;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
+ if (err)
+ goto exit;
+
+ qp_param = mailbox->buf;
+ context = &qp_param->context;
+ mthca_state = be32_to_cpu(context->flags) >> 28;
+ qp_attr->qp_state = to_ib_qp_state(mthca_state);
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->path_mtu = context->mtu_msgmax >> 5;
+ qp_attr->path_mig_state =
+ to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
+ qp_attr->qkey = be32_to_cpu(context->qkey);
+ qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
+ qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
+ qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
+ qp_attr->qp_access_flags =
+ to_ib_qp_access_flags(be32_to_cpu(context->params2));
+ qp_attr->cap.max_send_wr = qp->sq.max;
+ qp_attr->cap.max_recv_wr = qp->rq.max;
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+ to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+
+ qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
+ qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) &
0x7f;
+
+
+ /* qp_attr->en_sqd_async_notify
+ this field is only applicable in modify qp */
+ qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
+
+ qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) &
0x7);
+
+ qp_attr->max_dest_rd_atomic =
+ 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
+ qp_attr->min_rnr_timer =
+ (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
+ qp_attr->port_num = qp_attr->ah_attr.port_num;
+ qp_attr->timeout = context->pri_path.ackto >> 3;
+ qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
+ qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
+ qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ qp_attr->alt_timeout = context->alt_path.ackto >> 3;
+ qp_init_attr->cap = qp_attr->cap;
+
+exit:
+ mthca_free_mailbox(dev, mailbox);
+ return err;
+}
+
+
static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp
*qp, int desc_sz)
{
/*
Index: openib_gen2/drivers/infiniband/hw/mthca/mthca_cmd.c
===================================================================
--- openib_gen2.orig/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ openib_gen2/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1554,6 +1554,15 @@ int mthca_SW2HW_SRQ(struct mthca_dev *de
CMD_TIME_CLASS_A, status);
}
+
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+ struct mthca_mailbox *mailbox, u8 *status)
+{
+
+ return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
+ CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
+}
+
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox
*mailbox,
int srq_num, u8 *status)
{
Index: openib_gen2/drivers/infiniband/hw/mthca/mthca_dev.h
===================================================================
--- openib_gen2.orig/drivers/infiniband/hw/mthca/mthca_dev.h
+++ openib_gen2/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -486,6 +486,7 @@ int mthca_alloc_srq(struct mthca_dev *de
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask);
+int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
@@ -497,6 +498,10 @@ int mthca_arbel_post_srq_recv(struct ib_
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int
attr_mask);
+
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int
qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
More information about the general
mailing list