|
@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
|
|
|
[ 1] = "UC transport",
|
|
|
[ 2] = "UD transport",
|
|
|
[ 3] = "XRC transport",
|
|
|
- [ 4] = "reliable multicast",
|
|
|
- [ 5] = "FCoIB support",
|
|
|
[ 6] = "SRQ support",
|
|
|
[ 7] = "IPoIB checksum offload",
|
|
|
[ 8] = "P_Key violation counter",
|
|
|
[ 9] = "Q_Key violation counter",
|
|
|
- [10] = "VMM",
|
|
|
[12] = "Dual Port Different Protocol (DPDP) support",
|
|
|
[15] = "Big LSO headers",
|
|
|
[16] = "MW support",
|
|
@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
|
|
|
[19] = "Raw multicast support",
|
|
|
[20] = "Address vector port checking support",
|
|
|
[21] = "UD multicast support",
|
|
|
- [24] = "Demand paging support",
|
|
|
- [25] = "Router support",
|
|
|
[30] = "IBoE support",
|
|
|
[32] = "Unicast loopback support",
|
|
|
[34] = "FCS header control",
|
|
|
- [38] = "Wake On LAN support",
|
|
|
+ [37] = "Wake On LAN (port1) support",
|
|
|
+ [38] = "Wake On LAN (port2) support",
|
|
|
[40] = "UDP RSS support",
|
|
|
[41] = "Unicast VEP steering support",
|
|
|
[42] = "Multicast VEP steering support",
|
|
@@ -145,7 +141,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
|
|
|
[16] = "CONFIG DEV support",
|
|
|
[17] = "Asymmetric EQs support",
|
|
|
[18] = "More than 80 VFs support",
|
|
|
- [19] = "Performance optimized for limited rule configuration flow steering support"
|
|
|
+ [19] = "Performance optimized for limited rule configuration flow steering support",
|
|
|
+ [20] = "Recoverable error events support"
|
|
|
};
|
|
|
int i;
|
|
|
|
|
@@ -259,6 +256,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
|
|
|
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
|
|
|
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
|
|
|
+#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
|
|
|
|
|
|
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
|
|
|
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
|
|
@@ -273,6 +271,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
|
|
|
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
|
|
|
#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
|
|
|
+#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
|
|
|
#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
|
|
|
|
|
|
#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
|
|
@@ -344,9 +343,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
} else if (vhcr->op_modifier == 0) {
|
|
|
struct mlx4_active_ports actv_ports =
|
|
|
mlx4_get_active_ports(dev, slave);
|
|
|
- /* enable rdma and ethernet interfaces, and new quota locations */
|
|
|
+ /* enable rdma and ethernet interfaces, new quota locations,
|
|
|
+ * and reserved lkey
|
|
|
+ */
|
|
|
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
|
|
|
- QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX);
|
|
|
+ QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
|
|
|
+ QUERY_FUNC_CAP_FLAG_RESD_LKEY);
|
|
|
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
|
|
|
|
|
|
field = min(
|
|
@@ -411,6 +413,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
|
|
|
QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
|
|
|
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
|
|
|
+
|
|
|
+ size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
|
|
|
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
|
|
|
} else
|
|
|
err = -EINVAL;
|
|
|
|
|
@@ -503,6 +508,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
|
|
|
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
|
|
|
func_cap->reserved_eq = size & 0xFFFFFF;
|
|
|
|
|
|
+ if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
|
|
|
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
|
|
|
+ func_cap->reserved_lkey = size;
|
|
|
+ } else {
|
|
|
+ func_cap->reserved_lkey = 0;
|
|
|
+ }
|
|
|
+
|
|
|
func_cap->extra_flags = 0;
|
|
|
|
|
|
/* Mailbox data from 0x6c and onward should only be treated if
|
|
@@ -859,6 +871,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|
|
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
|
|
|
if (field32 & (1 << 0))
|
|
|
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
|
|
|
+ if (field32 & (1 << 7))
|
|
|
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
|
|
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
|
|
|
if (field & 1<<6)
|
|
|
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
|
|
@@ -1562,6 +1576,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|
|
#define INIT_HCA_VXLAN_OFFSET 0x0c
|
|
|
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
|
|
|
#define INIT_HCA_FLAGS_OFFSET 0x014
|
|
|
+#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
|
|
|
#define INIT_HCA_QPC_OFFSET 0x020
|
|
|
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
|
|
|
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
|
|
@@ -1668,6 +1683,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|
|
dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
|
|
|
}
|
|
|
|
|
|
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
|
|
|
+ *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
|
|
|
+
|
|
|
/* QPC/EEC/CQC/EQC/RDMARC attributes */
|
|
|
|
|
|
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
|
|
@@ -1752,8 +1770,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|
|
MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
|
|
|
}
|
|
|
|
|
|
- err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
|
|
|
- MLX4_CMD_NATIVE);
|
|
|
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
|
|
|
+ MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
|
|
|
|
|
|
if (err)
|
|
|
mlx4_err(dev, "INIT_HCA returns %d\n", err);
|
|
@@ -1879,6 +1897,36 @@ out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
|
|
|
+{
|
|
|
+ struct mlx4_cmd_mailbox *mailbox;
|
|
|
+ __be32 *outbox;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
+ if (IS_ERR(mailbox)) {
|
|
|
+ mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
|
|
|
+ return PTR_ERR(mailbox);
|
|
|
+ }
|
|
|
+ outbox = mailbox->buf;
|
|
|
+
|
|
|
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
|
|
|
+ MLX4_CMD_QUERY_HCA,
|
|
|
+ MLX4_CMD_TIME_CLASS_B,
|
|
|
+ !mlx4_is_slave(dev));
|
|
|
+ if (err) {
|
|
|
+ mlx4_warn(dev, "hca_core_clock update failed\n");
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
|
|
|
+
|
|
|
+out:
|
|
|
+ mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
|
|
|
* and real QP0 are active, so that the paravirtualized QP0 is ready
|
|
|
* to operate */
|
|
@@ -1983,6 +2031,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
|
|
|
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
|
|
|
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
|
|
|
|
|
+ if (!err)
|
|
|
+ mlx4_hca_core_clock_update(dev);
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
|
|
@@ -2007,7 +2058,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
|
|
|
if (priv->mfunc.master.init_port_ref[port] == 1) {
|
|
|
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
|
|
|
- 1000, MLX4_CMD_NATIVE);
|
|
|
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
|
|
|
if (err)
|
|
|
return err;
|
|
|
}
|
|
@@ -2018,7 +2069,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
if (!priv->mfunc.master.qp0_state[port].qp0_active &&
|
|
|
priv->mfunc.master.qp0_state[port].port_active) {
|
|
|
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
|
|
|
- 1000, MLX4_CMD_NATIVE);
|
|
|
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
|
|
|
if (err)
|
|
|
return err;
|
|
|
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
|
|
@@ -2033,15 +2084,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
|
|
|
|
|
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
|
|
|
{
|
|
|
- return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
|
|
|
- MLX4_CMD_WRAPPED);
|
|
|
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
|
|
|
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
|
|
|
|
|
|
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
|
|
|
{
|
|
|
- return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
|
|
|
- MLX4_CMD_NATIVE);
|
|
|
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
|
|
|
+ MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
|
|
|
}
|
|
|
|
|
|
struct mlx4_config_dev {
|
|
@@ -2180,7 +2231,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
|
|
|
int mlx4_NOP(struct mlx4_dev *dev)
|
|
|
{
|
|
|
/* Input modifier of 0x1f means "finish as soon as possible." */
|
|
|
- return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
|
|
|
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
|
|
|
+ MLX4_CMD_NATIVE);
|
|
|
}
|
|
|
|
|
|
int mlx4_get_phys_port_id(struct mlx4_dev *dev)
|