i915_query.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. /*
  2. * SPDX-License-Identifier: MIT
  3. *
  4. * Copyright © 2018 Intel Corporation
  5. */
  6. #include "i915_drv.h"
  7. #include "i915_query.h"
  8. #include <uapi/drm/i915_drm.h>
  9. static int query_topology_info(struct drm_i915_private *dev_priv,
  10. struct drm_i915_query_item *query_item)
  11. {
  12. const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
  13. struct drm_i915_query_topology_info topo;
  14. u32 slice_length, subslice_length, eu_length, total_length;
  15. if (query_item->flags != 0)
  16. return -EINVAL;
  17. if (sseu->max_slices == 0)
  18. return -ENODEV;
  19. BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
  20. slice_length = sizeof(sseu->slice_mask);
  21. subslice_length = sseu->max_slices *
  22. DIV_ROUND_UP(sseu->max_subslices,
  23. sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
  24. eu_length = sseu->max_slices * sseu->max_subslices *
  25. DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
  26. total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
  27. if (query_item->length == 0)
  28. return total_length;
  29. if (query_item->length < total_length)
  30. return -EINVAL;
  31. if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
  32. sizeof(topo)))
  33. return -EFAULT;
  34. if (topo.flags != 0)
  35. return -EINVAL;
  36. if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
  37. total_length))
  38. return -EFAULT;
  39. memset(&topo, 0, sizeof(topo));
  40. topo.max_slices = sseu->max_slices;
  41. topo.max_subslices = sseu->max_subslices;
  42. topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
  43. topo.subslice_offset = slice_length;
  44. topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
  45. topo.eu_offset = slice_length + subslice_length;
  46. topo.eu_stride =
  47. DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
  48. if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
  49. &topo, sizeof(topo)))
  50. return -EFAULT;
  51. if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
  52. &sseu->slice_mask, slice_length))
  53. return -EFAULT;
  54. if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
  55. sizeof(topo) + slice_length),
  56. sseu->subslice_mask, subslice_length))
  57. return -EFAULT;
  58. if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
  59. sizeof(topo) +
  60. slice_length + subslice_length),
  61. sseu->eu_mask, eu_length))
  62. return -EFAULT;
  63. return total_length;
  64. }
  65. static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
  66. struct drm_i915_query_item *query_item) = {
  67. query_topology_info,
  68. };
  69. int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  70. {
  71. struct drm_i915_private *dev_priv = to_i915(dev);
  72. struct drm_i915_query *args = data;
  73. struct drm_i915_query_item __user *user_item_ptr =
  74. u64_to_user_ptr(args->items_ptr);
  75. u32 i;
  76. if (args->flags != 0)
  77. return -EINVAL;
  78. for (i = 0; i < args->num_items; i++, user_item_ptr++) {
  79. struct drm_i915_query_item item;
  80. u64 func_idx;
  81. int ret;
  82. if (copy_from_user(&item, user_item_ptr, sizeof(item)))
  83. return -EFAULT;
  84. if (item.query_id == 0)
  85. return -EINVAL;
  86. func_idx = item.query_id - 1;
  87. if (func_idx < ARRAY_SIZE(i915_query_funcs))
  88. ret = i915_query_funcs[func_idx](dev_priv, &item);
  89. else
  90. ret = -EINVAL;
  91. /* Only write the length back to userspace if they differ. */
  92. if (ret != item.length && put_user(ret, &user_item_ptr->length))
  93. return -EFAULT;
  94. }
  95. return 0;
  96. }