|
@@ -74,6 +74,11 @@ struct mlx5_device_context {
|
|
|
void *context;
|
|
|
};
|
|
|
|
|
|
+enum {
|
|
|
+ MLX5_ATOMIC_REQ_MODE_BE = 0x0,
|
|
|
+ MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
|
|
|
+};
|
|
|
+
|
|
|
static struct mlx5_profile profile[] = {
|
|
|
[0] = {
|
|
|
.mask = 0,
|
|
@@ -383,7 +388,7 @@ query_ex:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
|
|
|
+static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
|
|
|
{
|
|
|
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
|
|
|
int err;
|
|
@@ -391,6 +396,7 @@ static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
|
|
|
memset(out, 0, sizeof(out));
|
|
|
|
|
|
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
|
|
|
+ MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
|
|
|
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
|
|
|
if (err)
|
|
|
return err;
|
|
@@ -400,6 +406,46 @@ static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
|
|
|
+{
|
|
|
+ void *set_ctx;
|
|
|
+ void *set_hca_cap;
|
|
|
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
|
|
|
+ int req_endianness;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (MLX5_CAP_GEN(dev, atomic)) {
|
|
|
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
|
|
|
+ HCA_CAP_OPMOD_GET_CUR);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ } else {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ req_endianness =
|
|
|
+ MLX5_CAP_ATOMIC(dev,
|
|
|
+ supported_atomic_req_8B_endianess_mode_1);
|
|
|
+
|
|
|
+ if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
|
|
|
+ if (!set_ctx)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
|
|
|
+
|
|
|
+ /* Set requestor to host endianness */
|
|
|
+ MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
|
|
|
+ MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
|
|
|
+
|
|
|
+ err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
|
|
|
+
|
|
|
+ kfree(set_ctx);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
static int handle_hca_cap(struct mlx5_core_dev *dev)
|
|
|
{
|
|
|
void *set_ctx = NULL;
|
|
@@ -441,7 +487,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
|
|
|
|
|
|
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
|
|
|
|
|
|
- err = set_caps(dev, set_ctx, set_sz);
|
|
|
+ err = set_caps(dev, set_ctx, set_sz,
|
|
|
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
|
|
|
|
|
|
query_ex:
|
|
|
kfree(set_ctx);
|
|
@@ -974,6 +1021,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
|
|
goto reclaim_boot_pages;
|
|
|
}
|
|
|
|
|
|
+ err = handle_hca_cap_atomic(dev);
|
|
|
+ if (err) {
|
|
|
+ dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
|
|
|
+ goto reclaim_boot_pages;
|
|
|
+ }
|
|
|
+
|
|
|
err = mlx5_satisfy_startup_pages(dev, 0);
|
|
|
if (err) {
|
|
|
dev_err(&pdev->dev, "failed to allocate init pages\n");
|