[ofw] mlx4 patch - supporting multiple devices
Tzachi Dar
tzachid at mellanox.co.il
Wed Jun 11 14:00:08 PDT 2008
The following patch adds supports for features needed for ipoib such as
collapsed CQs and RSS (first step is to allocate ranges of qps).
Thanks
Tzachi
Index: mlx4/kernel/bus/ib/cq.c
===================================================================
--- mlx4/kernel/bus/ib/cq.c (revision 1261)
+++ mlx4/kernel/bus/ib/cq.c (working copy)
@@ -200,7 +200,7 @@
}
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
- cq->db.dma.da, &cq->mcq);
+ cq->db.dma.da, &cq->mcq, 0, 0);
if (err)
goto err_dbmap;
Index: mlx4/kernel/bus/ib/main.c
===================================================================
--- mlx4/kernel/bus/ib/main.c (revision 1261)
+++ mlx4/kernel/bus/ib/main.c (working copy)
@@ -91,7 +91,7 @@
props->max_mr_size = ~0ull;
props->page_size_cap = dev->dev->caps.page_size_cap;
- props->max_qp = dev->dev->caps.num_qps -
dev->dev->caps.reserved_qps;
+//?????????????????????? props->max_qp = dev->dev->caps.num_qps -
dev->dev->caps.reserved_qps;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
@@ -252,34 +252,6 @@
return 0;
}
-
-static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int
reset_qkey_viols,
- u32 cap_mask)
-{
- struct mlx4_cmd_mailbox *mailbox;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memset(mailbox->buf, 0, 256);
-
- if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
- *(u8 *) mailbox->buf = (u8)(!!reset_qkey_viols << 6);
- ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
- } else {
- ((u8 *) mailbox->buf)[3] = (u8)!!reset_qkey_viols;
- ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
- }
-
- err = mlx4_cmd(dev->dev, mailbox->dma.da, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev->dev, mailbox);
- return err;
-}
-
static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int
mask,
struct ib_port_modify *props)
{
@@ -296,7 +268,7 @@
cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
~props->clr_port_cap_mask;
- err = mlx4_SET_PORT(to_mdev(ibdev), port,
+ err = mlx4_SET_PORT(to_mdev(ibdev)->dev, port,
!!(mask & IB_PORT_RESET_QKEY_CNTR),
cap_mask);
Index: mlx4/kernel/bus/ib/qp.c
===================================================================
--- mlx4/kernel/bus/ib/qp.c (revision 1261)
+++ mlx4/kernel/bus/ib/qp.c (working copy)
@@ -324,7 +324,7 @@
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
+ struct ib_udata *udata, u32 sqpn, struct mlx4_ib_qp *qp)
{
int err;
@@ -419,6 +419,11 @@
}
}
+ if (!sqpn)
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1, &sqpn);
+ if (err)
+ goto err_wrid;
+
err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
if (err)
goto err_wrid;
Index: mlx4/kernel/bus/ib/srq.c
===================================================================
--- mlx4/kernel/bus/ib/srq.c (revision 1261)
+++ mlx4/kernel/bus/ib/srq.c (working copy)
@@ -82,6 +82,8 @@
int buf_size;
int err;
int i;
+ u32 cqn = 0;
+ u16 xrcd = 0;
/* Sanity check SRQ size before proceeding */
if ((int)init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
@@ -170,8 +172,7 @@
goto err_mtt;
}
}
-
- err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
+ err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcd, &srq->mtt,
srq->db.dma.da, &srq->msrq);
if (err)
goto err_wrid;
@@ -264,6 +265,9 @@
struct mlx4_ib_dev *dev = to_mdev(srq->device);
struct mlx4_ib_srq *msrq = to_msrq(srq);
+ mlx4_srq_invalidate(dev->dev, &msrq->msrq);
+ mlx4_srq_remove(dev->dev, &msrq->msrq);
+
mlx4_srq_free(dev->dev, &msrq->msrq);
mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
Index: mlx4/kernel/bus/inc/cmd.h
===================================================================
--- mlx4/kernel/bus/inc/cmd.h (revision 1261)
+++ mlx4/kernel/bus/inc/cmd.h (working copy)
@@ -130,6 +130,15 @@
MLX4_MAILBOX_SIZE = 4096
};
+enum {
+ /* set port opcode modifiers */
+ MLX4_SET_PORT_GENERAL = 0x0,
+ MLX4_SET_PORT_RQP_CALC = 0x1,
+ MLX4_SET_PORT_MAC_TABLE = 0x2,
+ MLX4_SET_PORT_VLAN_TABLE = 0x3,
+ MLX4_SET_PORT_PRIO_MAP = 0x4,
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
@@ -174,4 +183,6 @@
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct
mlx4_cmd_mailbox *mailbox);
+int imlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int
out_is_imm,
+ u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout);
#endif /* MLX4_CMD_H */
Index: mlx4/kernel/bus/inc/device.h
===================================================================
--- mlx4/kernel/bus/inc/device.h (revision 1261)
+++ mlx4/kernel/bus/inc/device.h (working copy)
@@ -55,6 +55,7 @@
MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7,
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
+ MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
MLX4_DEV_CAP_FLAG_APM = 1 << 17,
MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
@@ -128,11 +129,26 @@
MLX4_STAT_RATE_OFFSET = 5
};
+enum qp_region {
+ MLX4_QP_REGION_FW = 0,
+ MLX4_QP_REGION_ETH_ADDR,
+ MLX4_QP_REGION_FC_ADDR,
+ MLX4_QP_REGION_FC_EXCH,
+ MLX4_QP_REGION_COUNT /* Must be last */
+};
+
+enum {
+ MLX4_NUM_FEXCH = 64 * 1024,
+};
+
+
struct mlx4_caps {
u64 fw_ver;
int num_ports;
int vl_cap[MLX4_MAX_PORTS + 1];
- int mtu_cap[MLX4_MAX_PORTS + 1];
+ int ib_mtu_cap[MLX4_MAX_PORTS + 1];
+ u64 def_mac[MLX4_MAX_PORTS + 1];
+ int eth_mtu_cap[MLX4_MAX_PORTS + 1];
int gid_table_len[MLX4_MAX_PORTS + 1];
int pkey_table_len[MLX4_MAX_PORTS + 1];
int local_ca_ack_delay;
@@ -147,7 +163,6 @@
int max_rq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
- int reserved_qps;
int sqp_start;
int num_srqs;
int max_srq_wqes;
@@ -176,13 +191,46 @@
u32 flags;
u16 stat_rate_support;
u8 port_width_cap[MLX4_MAX_PORTS + 1];
+ int reserved_qps_cnt[MLX4_QP_REGION_COUNT];
+ int reserved_qps_base[MLX4_QP_REGION_COUNT];
+ int log_num_macs;
+ int log_num_vlans;
+ int log_num_prios;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ int reserved_fexch_mpts_base;
};
struct mlx4_buf_list {
u8 *buf;
dma_addr_t map;
};
+enum {
+ MLX4_DB_PER_PAGE = PAGE_SIZE / 4
+};
+struct mlx4_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
+ DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
+ unsigned long *bits[2];
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
+struct mlx4_db {
+ __be32 *db;
+ struct mlx4_db_pgdir *pgdir;
+ dma_addr_t dma;
+ int index;
+ int order;
+};
+
+struct mlx4_mtt {
+ u32 first_seg;
+ int order;
+ int page_shift;
+};
+
struct mlx4_buf {
union {
struct mlx4_buf_list direct;
@@ -193,10 +241,10 @@
int page_shift;
};
-struct mlx4_mtt {
- u32 first_seg;
- int order;
- int page_shift;
+struct mlx4_hwq_resources {
+ struct mlx4_db db;
+ struct mlx4_mtt mtt;
+ struct mlx4_buf buf;
};
struct mlx4_mr {
@@ -238,6 +286,7 @@
int arm_sn;
int cqn;
+ int comp_eq_idx;
atomic_t refcount;
struct completion free;
@@ -304,10 +353,45 @@
u64 si_guid;
};
+static inline void mlx4_query_steer_cap(struct mlx4_dev *dev, int
*log_mac,
+ int *log_vlan, int *log_prio)
+{
+ *log_mac = dev->caps.log_num_macs;
+ *log_vlan = dev->caps.log_num_vlans;
+ *log_prio = dev->caps.log_num_prios;
+}
+
+static inline u32 mlx4_get_ports_of_type(struct mlx4_dev *dev,
+ enum mlx4_port_type ptype)
+{
+ u32 ret = 0;
+ int i;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
+ if (dev->caps.port_type[i] == ptype)
+ ret |= 1 << (i-1);
+ }
+ return ret;
+}
+
+#define foreach_port(port, bitmap) \
+ for ((port) = 1; (port) <= MLX4_MAX_PORTS; ++(port)) \
+ if (bitmap & 1 << ((port)-1))
+
+static inline int mlx4_get_fexch_mpts_base(struct mlx4_dev *dev)
+{
+ return dev->caps.reserved_fexch_mpts_base;
+}
+
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf
*buf);
+int mlx4_db_alloc(struct mlx4_dev *dev,
+ struct mlx4_db *db, int order);
+
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
+
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
@@ -319,25 +403,68 @@
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
+int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ u64 iova, u64 size, u32 access, int npages,
+ int page_shift, struct mlx4_mr *mr);
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32
access,
int npages, int page_shift, struct mlx4_mr *mr);
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
+void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf);
+struct device;
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources
*wqres,
+ int size, int max_direct);
+void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources
*wqres,
+ int size);
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
- struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
+ struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+ unsigned vector, int collapsed);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp);
+struct mlx4_cq_context;
+int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
+ struct mlx4_cq_context *context, int modify);
+
+static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
+ void __iomem *uar_page,
+ spinlock_t *doorbell_lock);
+
+enum mlx4_qp_state;
+enum mlx4_qp_optpar;
+struct mlx4_qp_context;
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, u32
*base);
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int
cnt);
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
- u64 db_rec, struct mlx4_srq *srq);
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp);
+
+
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
+
+void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
+
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
+
+void mlx4_srq_invalidate(struct mlx4_dev *dev, struct mlx4_srq *srq);
+void mlx4_srq_remove(struct mlx4_dev *dev, struct mlx4_srq *srq);
+
int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int
limit_watermark);
int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int
*limit_watermark);
@@ -347,14 +474,29 @@
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8
gid[16]);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8
gid[16]);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int
*index);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int
*index);
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64
*page_list,
int npages, u64 iova, u32 *lkey, u32 *rkey);
+int mlx4_map_phys_fmr_fbo(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
+ u64 *page_list, int npages, u64 iova,
+ u32 fbo, u32 len, u32 *lkey, u32 *rkey);
int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int
max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
+int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ u32 access, int max_pages, int max_maps,
+ u8 page_shift, struct mlx4_fmr *fmr);
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
+int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int reset_qkey_viols,
+ u32 cap_mask);
+
#endif /* MLX4_DEVICE_H */
Index: mlx4/kernel/bus/inc/qp.h
===================================================================
--- mlx4/kernel/bus/inc/qp.h (revision 1261)
+++ mlx4/kernel/bus/inc/qp.h (working copy)
@@ -52,7 +52,10 @@
MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
- MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16
+ MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
+ MLX4_QP_OPTPAR_RSS_RCA = 1 << 17,
+ MLX4_QP_OPTPAR_SRQN = 1 << 18,
+ MLX4_QP_OPTPAR_CQN_RCV = 1 << 19
};
enum mlx4_qp_state {
@@ -275,20 +278,21 @@
__be32 byte_count;
};
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
- struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
- int sqd_event, struct mlx4_qp *qp);
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context);
+
+
+int mlx4_qp_get_region(struct mlx4_dev *dev,
+ enum qp_region region,
+ int *base_qpn, int *cnt);
+
static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev,
u32 qpn)
{
- return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps
- 1));
+ return (struct mlx4_qp *)radix_tree_lookup(&dev->qp_table_tree, qpn &
(dev->caps.num_qps - 1));
}
-void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
struct mlx4_qp *mlx4_qp_lookup_locked(struct mlx4_dev *dev, u32 qpn);
Index: mlx4/kernel/bus/net/alloc.c
===================================================================
--- mlx4/kernel/bus/net/alloc.c (revision 1261)
+++ mlx4/kernel/bus/net/alloc.c (working copy)
@@ -38,15 +38,19 @@
spin_lock(&bitmap->lock);
- obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
- if (obj >= bitmap->max) {
+ obj = find_next_zero_bit(bitmap->table,
+ bitmap->effective_max,
+ bitmap->last);
+ if (obj >= bitmap->effective_max) {
bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
- obj = find_first_zero_bit(bitmap->table, bitmap->max);
+ obj = find_first_zero_bit(bitmap->table, bitmap->effective_max);
}
- if (obj < bitmap->max) {
+ if (obj < bitmap->effective_max) {
set_bit(obj, bitmap->table);
- bitmap->last = (obj + 1) & (bitmap->max - 1);
+ bitmap->last = (obj + 1);
+ if (bitmap->last == bitmap->effective_max)
+ bitmap->last = 0;
obj |= bitmap->top;
} else
obj = (u32)-1;
@@ -67,10 +71,87 @@
spin_unlock(&bitmap->lock);
}
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32
reserved)
+static unsigned long find_next_zero_string_aligned(unsigned long
*bitmap,
+ u32 start, u32 nbits,
+ int len, int align)
{
+ unsigned long end, i;
+
+again:
+ start = ALIGN(start, align);
+ while ((start < nbits) && test_bit(start, bitmap))
+ start += align;
+ if (start >= nbits)
+ return ULONG_MAX;
+
+ end = start+len;
+ if (end > nbits)
+ return ULONG_MAX;
+ for (i = start+1; i < end; i++) {
+ if (test_bit(i, bitmap)) {
+ start = i+1;
+ goto again;
+ }
+ }
+ return start;
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int
align)
+{
+ u32 obj;
int i;
+ if (likely(cnt == 1 && align == 1))
+ return mlx4_bitmap_alloc(bitmap);
+
+ spin_lock(&bitmap->lock);
+
+ obj = find_next_zero_string_aligned(bitmap->table, bitmap->last,
+ bitmap->effective_max, cnt, align);
+ if (obj >= bitmap->effective_max) {
+ bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ obj = find_next_zero_string_aligned(bitmap->table, 0,
+ bitmap->effective_max,
+ cnt, align);
+ }
+
+ if (obj < bitmap->effective_max) {
+ for (i = 0; i < cnt; i++)
+ set_bit(obj+i, bitmap->table);
+ if (obj == bitmap->last) {
+ bitmap->last = (obj + cnt);
+ if (bitmap->last >= bitmap->effective_max)
+ bitmap->last = 0;
+ }
+ obj |= bitmap->top;
+ } else
+ obj = ULONG_MAX;
+
+ spin_unlock(&bitmap->lock);
+
+
+ return obj;
+}
+
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int
cnt)
+{
+ int i;
+
+ obj &= bitmap->max - 1;
+
+ spin_lock(&bitmap->lock);
+ for (i = 0; i < cnt; i++)
+ clear_bit(obj+i, bitmap->table);
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ spin_unlock(&bitmap->lock);
+}
+int mlx4_bitmap_init_with_effective_max(struct mlx4_bitmap *bitmap,
+ u32 num, u32 mask, u32 reserved,
+ u32 effective_max)
+{
+ int i;
+
/* num must be a power of 2 */
if (num != roundup_pow_of_two(num))
return -EINVAL;
@@ -79,6 +160,7 @@
bitmap->top = 0;
bitmap->max = num;
bitmap->mask = mask;
+ bitmap->effective_max = effective_max;
spin_lock_init(&bitmap->lock);
bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long),
GFP_KERNEL);
if (!bitmap->table)
@@ -90,6 +172,13 @@
return 0;
}
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap,
+ u32 num, u32 mask, u32 reserved)
+{
+ return mlx4_bitmap_init_with_effective_max(bitmap, num, mask,
+ reserved, num);
+}
+
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
{
kfree(bitmap->table);
@@ -182,3 +271,166 @@
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
+
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct mlx4_dev *dev)
+{
+ struct mlx4_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+ if (!pgdir)
+ return NULL;
+
+ bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
+ pgdir->bits[0] = pgdir->order0;
+ pgdir->bits[1] = pgdir->order1;
+
+
+ pgdir->db_page = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &pgdir->db_dma, GFP_KERNEL);
+ if (!pgdir->db_page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+ struct mlx4_db *db, int order)
+{
+ int o;
+ int i;
+
+ for (o = order; o <= 1; ++o) {
+ i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
+ if (i < MLX4_DB_PER_PAGE >> o)
+ goto found;
+ }
+
+ return -ENOMEM;
+
+found:
+ clear_bit(i, pgdir->bits[o]);
+
+ i <<= o;
+
+ if (o > order)
+ set_bit(i ^ 1, pgdir->bits[order]);
+
+ db->pgdir = pgdir;
+ db->index = i;
+ db->db = pgdir->db_page + db->index;
+ db->dma.da = pgdir->db_dma.da + db->index * 4;
+ db->dma.va = (VOID *)(UINT_PTR)-1;
+ db->dma.sz = ULONG_MAX;
+ db->order = order;
+
+ return 0;
+}
+
+int mlx4_db_alloc(struct mlx4_dev *dev,
+ struct mlx4_db *db, int order)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_db_pgdir *pgdir;
+ int ret = 0;
+ int ret1 = 0;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ list_for_each_entry(pgdir, &priv->pgdir_list, list, struct
mlx4_db_pgdir)
+ if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
+ goto out;
+
+ pgdir = mlx4_alloc_db_pgdir(dev);
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &priv->pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ ret1 = mlx4_alloc_db_from_pgdir(pgdir, db, order);
+ ASSERT(ret1 == 0);
+
+out:
+ mutex_unlock(&priv->pgdir_mutex);
+
+ return ret;
+}
+
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int o;
+ int i;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ o = db->order;
+ i = db->index;
+
+ if (db->order == 0 && test_bit(i ^ 1, db->pgdir->order0)) {
+ clear_bit(i ^ 1, db->pgdir->order0);
+ ++o;
+ }
+
+ i >>= o;
+ set_bit(i, db->pgdir->bits[o]);
+
+ if (bitmap_full(db->pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
+
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ db->pgdir->db_page, db->pgdir->db_dma);
+ list_del(&db->pgdir->list);
+ kfree(db->pgdir);
+ }
+
+ mutex_unlock(&priv->pgdir_mutex);
+}
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources
*wqres,
+ int size, int max_direct)
+{
+ int err;
+
+ err = mlx4_db_alloc(dev, &wqres->db, 1);
+ if (err)
+ return err;
+ *wqres->db.db = 0;
+
+ if (mlx4_buf_alloc(dev, size, max_direct, &wqres->buf)) {
+ err = -ENOMEM;
+ goto err_db;
+ }
+
+ err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
+ &wqres->mtt);
+ if (err)
+ goto err_buf;
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
+ if (err)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+err_buf:
+ mlx4_buf_free(dev, size, &wqres->buf);
+err_db:
+ mlx4_db_free(dev, &wqres->db);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
+
+void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources
*wqres,
+ int size)
+{
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+ mlx4_buf_free(dev, size, &wqres->buf);
+ mlx4_db_free(dev, &wqres->db);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
+
Index: mlx4/kernel/bus/net/cmd.c
===================================================================
--- mlx4/kernel/bus/net/cmd.c (revision 1261)
+++ mlx4/kernel/bus/net/cmd.c (working copy)
@@ -515,3 +515,12 @@
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+// This is the interface version of this function
+int imlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int
out_is_imm,
+ u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout)
+{
+ return __mlx4_cmd(dev, in_param, out_param, out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+}
+
Index: mlx4/kernel/bus/net/cq.c
===================================================================
--- mlx4/kernel/bus/net/cq.c (revision 1261)
+++ mlx4/kernel/bus/net/cq.c (working copy)
@@ -117,7 +117,8 @@
}
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
- struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
+ struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+ unsigned vector, int collapsed)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -126,6 +127,10 @@
u64 mtt_addr;
int err;
+ UNREFERENCED_PARAMETER(vector);
+#define COLLAPSED_SHIFT 18
+#define ENTRIES_SHIFT 24
+
cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
if (cq->cqn == -1)
return -ENOMEM;
@@ -153,7 +158,9 @@
cq_context = (struct mlx4_cq_context *)mailbox->buf;
memset(cq_context, 0, sizeof *cq_context);
- cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) |
uar->index);
+ cq_context->flags = cpu_to_be32(!!collapsed << COLLAPSED_SHIFT);
+ cq_context->logsize_usrpage = cpu_to_be32(
+ (ilog2(nent) << ENTRIES_SHIFT) | uar->index);
cq_context->comp_eqn = (u8)priv->eq_table.eq[MLX4_EQ_COMP].eqn;
cq_context->log_page_size = (u8)(mtt->page_shift -
MLX4_ICM_PAGE_SHIFT);
Index: mlx4/kernel/bus/net/fw.c
===================================================================
--- mlx4/kernel/bus/net/fw.c (revision 1261)
+++ mlx4/kernel/bus/net/fw.c (working copy)
@@ -308,7 +308,7 @@
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->max_vl[i] = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
- dev_cap->max_mtu[i] = field >> 4;
+ dev_cap->ib_mtu[i] = field >> 4;
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
dev_cap->max_gids[i] = 1 << (field & 0xf);
@@ -316,9 +316,11 @@
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
}
} else {
+#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
#define QUERY_PORT_MTU_OFFSET 0x01
#define QUERY_PORT_WIDTH_OFFSET 0x06
#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
+#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -328,7 +330,7 @@
goto out;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
- dev_cap->max_mtu[i] = field & 0xf;
+ dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
@@ -336,6 +338,14 @@
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
dev_cap->max_vl[i] = field & 0xf;
+ MLX4_GET(field, outbox,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ dev_cap->supported_port_types[i] = field & 3;
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
+ dev_cap->log_max_macs[i] = field & 0xf;
+ dev_cap->log_max_vlans[i] = field >> 4;
+ dev_cap->eth_mtu[i] = be16_to_cpu(((u16 *) outbox)[1]);
+ dev_cap->def_mac[i] = be64_to_cpu(((u64 *) outbox)[2]);
}
}
@@ -373,7 +383,7 @@
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap:
%d\n",
- dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1],
+ dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
dev_cap->max_port_width[1]);
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
@@ -706,7 +716,7 @@
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
/* Enable QoS support if module parameter set */
- if (g.enable_qos)
+ if (g.mod_enable_qos)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
/* QPC/EEC/CQC/EQC/RDMARC attributes */
@@ -787,7 +797,7 @@
flags |= (dev->caps.port_width_cap[port] & 0xf) <<
INIT_PORT_PORT_WIDTH_SHIFT;
MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
- field = (u16)(128 << dev->caps.mtu_cap[port]);
+ field = (u16)(128 << dev->caps.ib_mtu_cap[port]);
MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
field = (u16)dev->caps.gid_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
Index: mlx4/kernel/bus/net/fw.h
===================================================================
--- mlx4/kernel/bus/net/fw.h (revision 1261)
+++ mlx4/kernel/bus/net/fw.h (working copy)
@@ -61,11 +61,13 @@
int local_ca_ack_delay;
int num_ports;
u32 max_msg_sz;
- int max_mtu[MLX4_MAX_PORTS + 1];
+ int ib_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1];
int max_gids[MLX4_MAX_PORTS + 1];
int max_pkeys[MLX4_MAX_PORTS + 1];
+ u64 def_mac[MLX4_MAX_PORTS + 1];
+ int eth_mtu[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
u32 flags;
int reserved_uars;
@@ -96,6 +98,10 @@
u8 bmme_flags;
u32 reserved_lkey;
u64 max_icm_sz;
+ u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 log_max_macs[MLX4_MAX_PORTS + 1];
+ u8 log_max_vlans[MLX4_MAX_PORTS + 1];
+
};
struct mlx4_adapter {
Index: mlx4/kernel/bus/net/main.c
===================================================================
--- mlx4/kernel/bus/net/main.c (revision 1261)
+++ mlx4/kernel/bus/net/main.c (working copy)
@@ -58,13 +58,14 @@
#endif /* CONFIG_PCI_MSI */
+
static struct mlx4_profile default_profile = {
1 << 17, /* num_qp */
1 << 4, /* rdmarc_per_qp */
1 << 16, /* num_srq */
1 << 16, /* num_cq */
1 << 13, /* num_mcg */
- 1 << 17, /* num_mpt */
+ 1 << 18, /* num_mpt */
1 << 20 /* num_mtt */
};
@@ -103,11 +104,68 @@
};
#define MLX4_PCI_TABLE_SIZE (sizeof(mlx4_pci_table)/sizeof(struct
pci_device_id))
+
+static int mlx4_check_port_params(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_type)
+{
+ if (port_type[0] != port_type[1] &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ mlx4_err(dev, "Only same port types supported "
+ "on this HCA, aborting.\n");
+ return -EINVAL;
+ }
+ if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
+ (port_type[1] == MLX4_PORT_TYPE_IB)) {
+ mlx4_err(dev, "eth-ib configuration is not supported.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void mlx4_str2port_type(WCHAR **port_str,
+ enum mlx4_port_type *port_type)
+{
+ int i;
+
+ for (i = 0; i < MLX4_MAX_PORTS; i++) {
+ if (!wcscmp(port_str[i], L"eth"))
+ port_type[i] = MLX4_PORT_TYPE_ETH;
+ else
+ port_type[i] = MLX4_PORT_TYPE_IB;
+ }
+}
+
+int mlx4_count_ib_ports()
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < MLX4_MAX_PORTS; i++) {
+ if (g.mod_port_type[i] == MLX4_PORT_TYPE_IB) {
+ count++;
+ }
+ }
+ return count;
+}
+
+BOOLEAN mlx4_is_eth_port(int port_number)
+{
+ if (g.mod_port_type[port_number] == MLX4_PORT_TYPE_IB) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap
*dev_cap)
{
int err;
int i;
+ int num_eth_ports = 0;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS];
+ for (i = 0; i < MLX4_MAX_PORTS; i++)
+ port_type[i] = g.mod_port_type[i];
+
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
@@ -138,10 +196,12 @@
dev->caps.num_ports = dev_cap->num_ports;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
- dev->caps.mtu_cap[i] = dev_cap->max_mtu[i];
+ dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
dev->caps.port_width_cap[i] = (u8)dev_cap->max_port_width[i];
+ dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
+ dev->caps.def_mac[i] = dev_cap->def_mac[i];
}
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -152,7 +212,6 @@
dev->caps.max_rq_sg = dev_cap->max_rq_sg;
dev->caps.max_wqes = dev_cap->max_qp_sz;
dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
- dev->caps.reserved_qps = dev_cap->reserved_qps;
dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -178,6 +237,55 @@
dev->caps.flags = dev_cap->flags;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
+ dev->caps.log_num_macs = ilog2(roundup_pow_of_two
+ (g.mod_num_mac + 1));
+ dev->caps.log_num_vlans = ilog2(roundup_pow_of_two
+ (g.mod_num_vlan + 2));
+ dev->caps.log_num_prios = (g.mod_use_prio)? 3: 0;
+
+ err = mlx4_check_port_params(dev, port_type);
+ if (err)
+ return err;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
+ if (!dev_cap->supported_port_types[i]) {
+ mlx4_warn(dev, "FW doesn't support Multi Protocol, "
+ "loading IB only\n");
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
+ continue;
+ }
+ if (port_type[i-1] & dev_cap->supported_port_types[i])
+ dev->caps.port_type[i] = port_type[i-1];
+ else {
+ mlx4_err(dev, "Requested port type for port %d "
+ "not supported by HW\n", i);
+ return -ENODEV;
+ }
+ if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
+ dev->caps.log_num_macs = dev_cap->log_max_macs[i];
+ mlx4_warn(dev, "Requested number of MACs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_macs);
+ }
+ if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
+ dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
+ mlx4_warn(dev, "Requested number of VLANs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_vlans);
+ }
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+ ++num_eth_ports;
+ }
+
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
+ (1 << dev->caps.log_num_macs)*
+ (1 << dev->caps.log_num_vlans)*
+ (1 << dev->caps.log_num_prios)*
+ num_eth_ports;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+
return 0;
}
@@ -226,7 +334,8 @@
((u64) (MLX4_CMPT_TYPE_QP *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err)
goto err;
@@ -356,7 +465,8 @@
init_hca->qpc_base,
dev_cap->qpc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
goto err_unmap_dmpt;
@@ -366,7 +476,8 @@
init_hca->auxc_base,
dev_cap->aux_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
goto err_unmap_qp;
@@ -376,7 +487,8 @@
init_hca->altc_base,
dev_cap->altc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
goto err_unmap_auxc;
@@ -386,7 +498,8 @@
init_hca->rdmarc_base,
dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
goto err_unmap_altc;
@@ -579,6 +692,7 @@
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ u8 port;
err = mlx4_init_uar_table(dev);
if (err) {
@@ -677,8 +791,23 @@
"multicast group table, aborting.\n");
goto err_qp_table_free;
}
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ err = mlx4_SET_PORT(dev, port,0 ,0);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_mcg_table_free;
+ }
+ }
+ for (port = 0; port < dev->caps.num_ports; port++) {
+ mlx4_init_mac_table(dev, port);
+ mlx4_init_vlan_table(dev, port);
+ }
+
return 0;
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -811,6 +940,9 @@
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ INIT_LIST_HEAD(&priv->pgdir_list);
+ mutex_init(&priv->pgdir_mutex);
+
/* deal with livefish, if any */
dev = &priv->dev;
dev->pdev = pdev;
Index: mlx4/kernel/bus/net/mcg.c
===================================================================
--- mlx4/kernel/bus/net/mcg.c (revision 1261)
+++ mlx4/kernel/bus/net/mcg.c (working copy)
@@ -211,7 +211,7 @@
}
mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
- (!!g.mlx4_blck_lb << MGM_BLCK_LB_BIT));
+ (!!g.mod_mlx4_blck_lb << MGM_BLCK_LB_BIT));
mgm->members_count = cpu_to_be32(members_count);
Index: mlx4/kernel/bus/net/mlx4.h
===================================================================
--- mlx4/kernel/bus/net/mlx4.h (revision 1261)
+++ mlx4/kernel/bus/net/mlx4.h (working copy)
@@ -60,6 +60,12 @@
// Driver global data
//
+enum mlx4_port_type {
+ MLX4_PORT_TYPE_IB = 1 << 0,
+ MLX4_PORT_TYPE_ETH = 1 << 1,
+};
+
+
#pragma warning(disable:4201) // nameless struct/union
typedef struct _GLOBALS {
BUS_WMI_STD_DATA;
@@ -71,9 +77,14 @@
int mod_num_mcg;
int mod_num_mpt;
int mod_num_mtt;
+ int mod_num_mac;
+ int mod_num_vlan;
+ int mod_use_prio;
- int enable_qos;
- int mlx4_blck_lb;
+ int mod_enable_qos;
+ int mod_mlx4_blck_lb;
+ enum mlx4_port_type mod_port_type[MLX4_MAX_PORTS];
+
} GLOBALS;
#pragma warning(default:4201) // nameless struct/union
@@ -122,6 +133,7 @@
u32 last;
u32 top;
u32 max;
+ u32 effective_max;
u32 mask;
spinlock_t lock;
unsigned long *table;
@@ -271,6 +283,35 @@
LARGE_INTEGER interval;
};
+struct mlx4_mac_table {
+#define MLX4_MAX_MAC_NUM 128
+#define MLX4_MAC_MASK 0xffffffffffff
+#define MLX4_MAC_VALID_SHIFT 63
+#define MLX4_MAC_TABLE_SIZE MLX4_MAX_MAC_NUM << 3
+ __be64 entries[MLX4_MAX_MAC_NUM];
+ int refs[MLX4_MAX_MAC_NUM];
+ struct semaphore mac_sem;
+ int total;
+ int max;
+};
+
+struct mlx4_vlan_table {
+#define MLX4_MAX_VLAN_NUM 126
+#define MLX4_VLAN_MASK 0xfff
+#define MLX4_VLAN_VALID 1 << 31
+#define MLX4_VLAN_TABLE_SIZE MLX4_MAX_VLAN_NUM << 2
+ __be32 entries[MLX4_MAX_VLAN_NUM];
+ int refs[MLX4_MAX_VLAN_NUM];
+ struct semaphore vlan_sem;
+ int total;
+ int max;
+};
+
+struct mlx4_port_info {
+ struct mlx4_mac_table mac_table;
+ struct mlx4_vlan_table vlan_table;
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -278,6 +319,9 @@
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
+
struct mlx4_fw fw;
struct mlx4_cmd cmd;
@@ -296,6 +340,7 @@
struct mlx4_uar driver_uar;
void __iomem *kar;
+ struct mlx4_port_info port[MLX4_MAX_PORTS];
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -305,9 +350,17 @@
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int
align);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int
cnt);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32
reserved);
+int mlx4_bitmap_init_with_effective_max(struct mlx4_bitmap *bitmap,
+ u32 num, u32 mask, u32 reserved,
+ u32 effective_max);
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
+int mlx4_db_alloc(struct mlx4_dev *dev,
+ struct mlx4_db *db, int order);
+
int mlx4_init_pd_table(struct mlx4_dev *dev);
int mlx4_init_uar_table(struct mlx4_dev *dev);
int mlx4_init_mr_table(struct mlx4_dev *dev);
@@ -336,6 +389,9 @@
void mlx4_intf_init();
void mlx4_net_init();
+BOOLEAN mlx4_is_eth_port(int port_number);
+int mlx4_count_ib_ports();
+
struct mlx4_dev_cap;
struct mlx4_init_hca_param;
@@ -353,9 +409,16 @@
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+int mlx4_qp_get_region(struct mlx4_dev *dev,
+ enum qp_region region,
+ int *base_qpn, int *cnt);
+
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
+void mlx4_init_mac_table(struct mlx4_dev *dev, u8 port);
+void mlx4_init_vlan_table(struct mlx4_dev *dev, u8 port);
+
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
@@ -366,5 +429,7 @@
void mlx4_remove_one(struct pci_dev *pdev);
+#define ETH_FCS_LEN 4 /* Frame Check Sequence Length */
+#define ETH_HLEN 14
#endif /* MLX4_H */
Index: mlx4/kernel/bus/net/mr.c
===================================================================
--- mlx4/kernel/bus/net/mr.c (revision 1261)
+++ mlx4/kernel/bus/net/mr.c (working copy)
@@ -434,8 +434,18 @@
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
int err;
- err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
- (u32)~0, (u32)dev->caps.reserved_mrws);
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
+ dev->caps.reserved_fexch_mpts_base = dev->caps.num_mpts -
+ (2 * dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]);
+ err = mlx4_bitmap_init_with_effective_max(&mr_table->mpt_bitmap,
+ dev->caps.num_mpts,
+ (u32)~0, dev->caps.reserved_mrws,
+ dev->caps.reserved_fexch_mpts_base);
+
+
+
if (err)
return err;
Index: mlx4/kernel/bus/net/port.c
===================================================================
--- mlx4/kernel/bus/net/port.c (revision 0)
+++ mlx4/kernel/bus/net/port.c (revision 0)
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "mlx4.h"
+#include "cmd.h"
+
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, u8 port)
+{
+ struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+ int i;
+
+ sema_init(&table->mac_sem, 1);
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_macs;
+ table->total = 0;
+}
+
+void mlx4_init_vlan_table(struct mlx4_dev *dev, u8 port)
+{
+ struct mlx4_vlan_table *table =
&mlx4_priv(dev)->port[port].vlan_table;
+ int i;
+
+ sema_init(&table->vlan_sem, 1);
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_vlans;
+ table->total = 0;
+}
+
+static int mlx4_SET_PORT_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma.da, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int
*index)
+{
+ struct mlx4_mac_table *table =
+ &mlx4_priv(dev)->port[port - 1].mac_table;
+ int i, err = 0;
+ int free = -1;
+ u64 valid = 1;
+
+ mlx4_dbg(dev, "Registering mac : 0x%llx\n", mac);
+ down(&table->mac_sem);
+ for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
+ if (free < 0 && !table->refs[i]) {
+ free = i;
+ continue;
+ }
+
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ /* Mac already registered, increase refernce count */
+ *index = i;
+ ++table->refs[i];
+ goto out;
+ }
+ }
+ mlx4_dbg(dev, "Free mac index is %d\n", free);
+
+ if (table->total == table->max) {
+ /* No free mac entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be64(mac | valid <<
MLX4_MAC_VALID_SHIFT);
+
+ err = mlx4_SET_PORT_mac_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_err(dev, "Failed adding mac: 0x%llx\n", mac);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ *index = free;
+ ++table->total;
+out:
+ up(&table->mac_sem);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
+{
+ struct mlx4_mac_table *table =
+ &mlx4_priv(dev)->port[port - 1].mac_table;
+
+ down(&table->mac_sem);
+ if (!table->refs[index]) {
+ mlx4_warn(dev, "No mac entry for index %d\n", index);
+ goto out;
+ }
+ if (--table->refs[index]) {
+ mlx4_warn(dev, "Have more references for index %d,"
+ "no need to modify mac table\n", index);
+ goto out;
+ }
+ table->entries[index] = 0;
+ mlx4_SET_PORT_mac_table(dev, port, table->entries);
+ --table->total;
+out:
+ up(&table->mac_sem);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
+
+static int mlx4_SET_PORT_vlan_table(struct mlx4_dev *dev, u8 port,
+ __be32 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
+ in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma.da, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int
*index)
+{
+ struct mlx4_vlan_table *table =
+ &mlx4_priv(dev)->port[port - 1].vlan_table;
+ int i, err = 0;
+ int free = -1;
+
+ down(&table->vlan_sem);
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
+ if (free < 0 && (table->refs[i] == 0)) {
+ free = i;
+ continue;
+ }
+
+ if (table->refs[i] &&
+ (vlan == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* Vlan already registered, increase refernce count */
+ *index = i;
+ ++table->refs[i];
+ goto out;
+ }
+ }
+
+ if (table->total == table->max) {
+ /* No free vlan entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
+
+ err = mlx4_SET_PORT_vlan_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ *index = free;
+ ++table->total;
+out:
+ up(&table->vlan_sem);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_vlan);
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ struct mlx4_vlan_table *table =
+ &mlx4_priv(dev)->port[port - 1].vlan_table;
+
+ down(&table->vlan_sem);
+ if (!table->refs[index]) {
+ mlx4_warn(dev, "No vlan entry for index %d\n", index);
+ goto out;
+ }
+ if (--table->refs[index]) {
+ mlx4_dbg(dev, "Have more references for index %d,"
+ "no need to modify vlan table\n", index);
+ goto out;
+ }
+ table->entries[index] = 0;
+ mlx4_SET_PORT_vlan_table(dev, port, table->entries);
+ --table->total;
+out:
+ up(&table->vlan_sem);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port,
+ int reset_qkey_viols, u32 cap_mask)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ u8 is_eth = (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) ? 1 : 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, 256);
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ *(u8 *) mailbox->buf = (u8)(!!reset_qkey_viols << 6);
+ ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
+ } else {
+ ((u8 *) mailbox->buf)[3] = (u8)!!reset_qkey_viols;
+ ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
+ }
+
+ if (is_eth) {
+ ((u8 *) mailbox->buf)[3] = 7;
+ ((__be16 *) mailbox->buf)[3] =
+ cpu_to_be16(dev->caps.eth_mtu_cap[port] +
+ ETH_HLEN + ETH_FCS_LEN);
+ ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
+ ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
+ }
+ err = mlx4_cmd(dev, mailbox->dma.da, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
Index: mlx4/kernel/bus/net/qp.c
===================================================================
--- mlx4/kernel/bus/net/qp.c (revision 1261)
+++ mlx4/kernel/bus/net/qp.c (working copy)
@@ -142,20 +142,43 @@
}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, u32
*base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int qpn;
+
+ qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (qpn == -1)
+ return -ENOMEM;
+
+ *base = qpn;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ if (base_qpn < dev->caps.sqp_start + 8)
+ return;
+
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (sqpn)
- qp->qpn = sqpn;
- else {
- qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
- if (qp->qpn == -1)
- return -ENOMEM;
- }
+ if (!qpn)
+ return -EINVAL;
+ qp->qpn = qpn;
+
err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
if (err)
goto err_out;
@@ -203,9 +226,6 @@
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
err_out:
- if (!sqpn)
- mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
-
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
@@ -247,8 +267,6 @@
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
- if (qp->qpn >= dev->caps.sqp_start + 8)
- mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
@@ -262,6 +280,7 @@
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
+ int reserved_from_top = 0;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -271,9 +290,44 @@
* block of special QPs must be aligned to a multiple of 8, so
* round up.
*/
- dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
- err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 24) - 1, dev->caps.sqp_start + 8);
+ dev->caps.sqp_start =
+ ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
+
+ {
+ int sort[MLX4_QP_REGION_COUNT];
+ int i, j, tmp;
+ int last_base = dev->caps.num_qps;
+
+ for (i = 1; i < MLX4_QP_REGION_COUNT; ++i)
+ sort[i] = i;
+
+ for (i = MLX4_QP_REGION_COUNT; i > 0; --i) {
+ for (j = 2; j < i; ++j) {
+ if (dev->caps.reserved_qps_cnt[sort[j]] >
+ dev->caps.reserved_qps_cnt[sort[j - 1]]) {
+ tmp = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = tmp;
+ }
+ }
+ }
+
+ for (i = 1; i < MLX4_QP_REGION_COUNT; ++i) {
+ last_base -= dev->caps.reserved_qps_cnt[sort[i]];
+ dev->caps.reserved_qps_base[sort[i]] = last_base;
+ reserved_from_top +=
+ dev->caps.reserved_qps_cnt[sort[i]];
+ }
+
+ }
+
+ err = mlx4_bitmap_init_with_effective_max(&qp_table->bitmap,
+ dev->caps.num_qps,
+ (1 << 23) - 1,
+ dev->caps.sqp_start + 8,
+ dev->caps.num_qps -
+ reserved_from_top);
+
if (err)
return err;
@@ -287,6 +341,21 @@
radix_tree_destroy(&dev->qp_table_tree);
}
+int mlx4_qp_get_region(struct mlx4_dev *dev,
+ enum qp_region region,
+ int *base_qpn, int *cnt)
+{
+ if ((region < 0) || (region >= MLX4_QP_REGION_COUNT))
+ return -EINVAL;
+
+ *base_qpn = dev->caps.reserved_qps_base[region];
+ *cnt = dev->caps.reserved_qps_cnt[region];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_get_region);
+
+
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context)
{
@@ -307,3 +376,35 @@
}
EXPORT_SYMBOL_GPL(mlx4_qp_query);
+int mlx4_qp_to_ready(struct mlx4_dev *dev,
+ struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp,
+ enum mlx4_qp_state *qp_state)
+{
+#define STATE_ARR_SIZE 4
+ int err = 0;
+ int i;
+ enum mlx4_qp_state states[STATE_ARR_SIZE] = {
+ MLX4_QP_STATE_RST,
+ MLX4_QP_STATE_INIT,
+ MLX4_QP_STATE_RTR,
+ MLX4_QP_STATE_RTS
+ };
+
+ for (i = 0; i < STATE_ARR_SIZE - 1; i++) {
+ context->flags |= cpu_to_be32(states[i+1] << 28);
+ err = mlx4_qp_modify(dev, mtt, states[i],
+ states[i+1], context, 0, 0, qp);
+ if (err) {
+ mlx4_err(dev, "Failed to bring qp to state:"
+ "%d with error: %d\n",
+ states[i+1], err);
+ return err;
+ }
+ *qp_state = states[i+1];
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
+
Index: mlx4/kernel/bus/net/srq.c
===================================================================
--- mlx4/kernel/bus/net/srq.c (revision 1261)
+++ mlx4/kernel/bus/net/srq.c (working copy)
@@ -38,20 +38,20 @@
struct mlx4_srq_context {
__be32 state_logsize_srqn;
u8 logstride;
- u8 reserved1[3];
- u8 pg_offset;
- u8 reserved2[3];
- u32 reserved3;
+ u8 reserved1;
+ __be16 xrc_domain;
+ __be32 pg_offset_cqn;
+ u32 reserved2;
u8 log_page_size;
- u8 reserved4[2];
+ u8 reserved3[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
__be32 pd;
__be16 limit_watermark;
__be16 wqe_cnt;
- u16 reserved5;
+ u16 reserved4;
__be16 wqe_counter;
- u32 reserved6;
+ u32 reserved5;
__be64 db_rec_addr;
};
@@ -107,14 +107,15 @@
MLX4_CMD_TIME_CLASS_A);
}
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
- u64 db_rec, struct mlx4_srq *srq)
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_srq_context *srq_context;
u64 mtt_addr;
int err;
+ UNREFERENCED_PARAMETER(xrcd);
srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
if (srq->srqn == -1)
@@ -146,6 +147,7 @@
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24)
|
srq->srqn);
srq_context->logstride = (u8)(srq->wqe_shift - 4);
+ srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff);
srq_context->log_page_size = (u8)(mtt->page_shift -
MLX4_ICM_PAGE_SHIFT);
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -182,19 +184,32 @@
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
-void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
+
+
+void mlx4_srq_invalidate(struct mlx4_dev *dev, struct mlx4_srq *srq)
{
- struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
int err;
err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
if (err)
mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err,
srq->srqn);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_invalidate);
+void mlx4_srq_remove(struct mlx4_dev *dev, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_remove);
+void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
--
Tzachi Dar
Mellanox Technologies LTD.
SW- Windows
Phone: +972 (4) 909 7200 (ext 271)
Mobile: +972 (57) 741 1269
E-mail: tzachid at mellanox.co.il
----------------------------------------------------------------------
Emails belong on computers, trees belong in forests; if you must print
this, do it on recycled paper.
http://www.greenpeace.org/international/
----------------------------------------------------------------------
Disclaimer added by CodeTwo Exchange Rules
http://www.codetwo.com
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20080612/0bc72df6/attachment.html>
-------------- next part --------------
A non-text attachment was scrubbed...
Name: mlx4_mp.patch
Type: application/octet-stream
Size: 56358 bytes
Desc: mlx4_mp.patch
URL: <http://lists.openfabrics.org/pipermail/ofw/attachments/20080612/0bc72df6/attachment.obj>
More information about the ofw
mailing list