|
@@ -795,8 +795,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|
struct ib_udata *udata)
|
|
struct ib_udata *udata)
|
|
{
|
|
{
|
|
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
|
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
|
- struct mlx5_ib_alloc_ucontext_req_v2 req;
|
|
|
|
- struct mlx5_ib_alloc_ucontext_resp resp;
|
|
|
|
|
|
+ struct mlx5_ib_alloc_ucontext_req_v2 req = {};
|
|
|
|
+ struct mlx5_ib_alloc_ucontext_resp resp = {};
|
|
struct mlx5_ib_ucontext *context;
|
|
struct mlx5_ib_ucontext *context;
|
|
struct mlx5_uuar_info *uuari;
|
|
struct mlx5_uuar_info *uuari;
|
|
struct mlx5_uar *uars;
|
|
struct mlx5_uar *uars;
|
|
@@ -811,20 +811,19 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|
if (!dev->ib_active)
|
|
if (!dev->ib_active)
|
|
return ERR_PTR(-EAGAIN);
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
|
|
- memset(&req, 0, sizeof(req));
|
|
|
|
reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
|
|
reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
|
|
if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
|
|
if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
|
|
ver = 0;
|
|
ver = 0;
|
|
- else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
|
|
|
|
|
|
+ else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
|
|
ver = 2;
|
|
ver = 2;
|
|
else
|
|
else
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
- err = ib_copy_from_udata(&req, udata, reqlen);
|
|
|
|
|
|
+ err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
|
|
if (err)
|
|
if (err)
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
|
|
|
|
- if (req.flags || req.reserved)
|
|
|
|
|
|
+ if (req.flags)
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
if (req.total_num_uuars > MLX5_MAX_UUARS)
|
|
if (req.total_num_uuars > MLX5_MAX_UUARS)
|
|
@@ -833,6 +832,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|
if (req.total_num_uuars == 0)
|
|
if (req.total_num_uuars == 0)
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
+ if (req.comp_mask)
|
|
|
|
+ return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
+
|
|
|
|
+ if (reqlen > sizeof(req) &&
|
|
|
|
+ !ib_is_udata_cleared(udata, sizeof(req),
|
|
|
|
+ udata->inlen - sizeof(req)))
|
|
|
|
+ return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
+
|
|
req.total_num_uuars = ALIGN(req.total_num_uuars,
|
|
req.total_num_uuars = ALIGN(req.total_num_uuars,
|
|
MLX5_NON_FP_BF_REGS_PER_PAGE);
|
|
MLX5_NON_FP_BF_REGS_PER_PAGE);
|
|
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
|
|
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
|
|
@@ -848,6 +855,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
|
|
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
|
|
resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
|
|
resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
|
|
resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
|
|
resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
|
|
|
|
+ resp.response_length = min(offsetof(typeof(resp), response_length) +
|
|
|
|
+ sizeof(resp.response_length), udata->outlen);
|
|
|
|
|
|
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
|
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
|
if (!context)
|
|
if (!context)
|
|
@@ -898,8 +907,20 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|
|
|
|
|
resp.tot_uuars = req.total_num_uuars;
|
|
resp.tot_uuars = req.total_num_uuars;
|
|
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
|
|
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
|
|
- err = ib_copy_to_udata(udata, &resp,
|
|
|
|
- sizeof(resp) - sizeof(resp.reserved));
|
|
|
|
|
|
+
|
|
|
|
+ if (field_avail(typeof(resp), reserved2, udata->outlen))
|
|
|
|
+ resp.response_length += sizeof(resp.reserved2);
|
|
|
|
+
|
|
|
|
+ if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
|
|
|
|
+ resp.comp_mask |=
|
|
|
|
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
|
|
|
|
+ resp.hca_core_clock_offset =
|
|
|
|
+ offsetof(struct mlx5_init_seg, internal_timer_h) %
|
|
|
|
+ PAGE_SIZE;
|
|
|
|
+ resp.response_length += sizeof(resp.hca_core_clock_offset);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
|
if (err)
|
|
if (err)
|
|
goto out_uars;
|
|
goto out_uars;
|
|
|
|
|