amdgpu_queue_mgr.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * Copyright 2017 Valve Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Andres Rodriguez
  23. */
  24. #include "amdgpu.h"
  25. #include "amdgpu_ring.h"
  26. static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper,
  27. int hw_ip)
  28. {
  29. if (!mapper)
  30. return -EINVAL;
  31. if (hw_ip > AMDGPU_MAX_IP_NUM)
  32. return -EINVAL;
  33. mapper->hw_ip = hw_ip;
  34. mutex_init(&mapper->lock);
  35. memset(mapper->queue_map, 0, sizeof(mapper->queue_map));
  36. return 0;
  37. }
  38. static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper,
  39. int ring)
  40. {
  41. return mapper->queue_map[ring];
  42. }
  43. static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
  44. int ring, struct amdgpu_ring *pring)
  45. {
  46. if (WARN_ON(mapper->queue_map[ring])) {
  47. DRM_ERROR("Un-expected ring re-map\n");
  48. return -EINVAL;
  49. }
  50. mapper->queue_map[ring] = pring;
  51. return 0;
  52. }
  53. static int amdgpu_identity_map(struct amdgpu_device *adev,
  54. struct amdgpu_queue_mapper *mapper,
  55. u32 ring,
  56. struct amdgpu_ring **out_ring)
  57. {
  58. switch (mapper->hw_ip) {
  59. case AMDGPU_HW_IP_GFX:
  60. *out_ring = &adev->gfx.gfx_ring[ring];
  61. break;
  62. case AMDGPU_HW_IP_COMPUTE:
  63. *out_ring = &adev->gfx.compute_ring[ring];
  64. break;
  65. case AMDGPU_HW_IP_DMA:
  66. *out_ring = &adev->sdma.instance[ring].ring;
  67. break;
  68. case AMDGPU_HW_IP_UVD:
  69. *out_ring = &adev->uvd.inst[0].ring;
  70. break;
  71. case AMDGPU_HW_IP_VCE:
  72. *out_ring = &adev->vce.ring[ring];
  73. break;
  74. case AMDGPU_HW_IP_UVD_ENC:
  75. *out_ring = &adev->uvd.inst[0].ring_enc[ring];
  76. break;
  77. case AMDGPU_HW_IP_VCN_DEC:
  78. *out_ring = &adev->vcn.ring_dec;
  79. break;
  80. case AMDGPU_HW_IP_VCN_ENC:
  81. *out_ring = &adev->vcn.ring_enc[ring];
  82. break;
  83. case AMDGPU_HW_IP_VCN_JPEG:
  84. *out_ring = &adev->vcn.ring_jpeg;
  85. break;
  86. default:
  87. *out_ring = NULL;
  88. DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
  89. return -EINVAL;
  90. }
  91. return amdgpu_update_cached_map(mapper, ring, *out_ring);
  92. }
  93. static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
  94. {
  95. switch (hw_ip) {
  96. case AMDGPU_HW_IP_GFX:
  97. return AMDGPU_RING_TYPE_GFX;
  98. case AMDGPU_HW_IP_COMPUTE:
  99. return AMDGPU_RING_TYPE_COMPUTE;
  100. case AMDGPU_HW_IP_DMA:
  101. return AMDGPU_RING_TYPE_SDMA;
  102. case AMDGPU_HW_IP_UVD:
  103. return AMDGPU_RING_TYPE_UVD;
  104. case AMDGPU_HW_IP_VCE:
  105. return AMDGPU_RING_TYPE_VCE;
  106. default:
  107. DRM_ERROR("Invalid HW IP specified %d\n", hw_ip);
  108. return -1;
  109. }
  110. }
  111. static int amdgpu_lru_map(struct amdgpu_device *adev,
  112. struct amdgpu_queue_mapper *mapper,
  113. u32 user_ring, bool lru_pipe_order,
  114. struct amdgpu_ring **out_ring)
  115. {
  116. int r, i, j;
  117. int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
  118. int ring_blacklist[AMDGPU_MAX_RINGS];
  119. struct amdgpu_ring *ring;
  120. /* 0 is a valid ring index, so initialize to -1 */
  121. memset(ring_blacklist, 0xff, sizeof(ring_blacklist));
  122. for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) {
  123. ring = mapper->queue_map[i];
  124. if (ring)
  125. ring_blacklist[j++] = ring->idx;
  126. }
  127. r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
  128. j, lru_pipe_order, out_ring);
  129. if (r)
  130. return r;
  131. return amdgpu_update_cached_map(mapper, user_ring, *out_ring);
  132. }
  133. /**
  134. * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
  135. *
  136. * @adev: amdgpu_device pointer
  137. * @mgr: amdgpu_queue_mgr structure holding queue information
  138. *
  139. * Initialize the the selected @mgr (all asics).
  140. *
  141. * Returns 0 on success, error on failure.
  142. */
  143. int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
  144. struct amdgpu_queue_mgr *mgr)
  145. {
  146. int i, r;
  147. if (!adev || !mgr)
  148. return -EINVAL;
  149. memset(mgr, 0, sizeof(*mgr));
  150. for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) {
  151. r = amdgpu_queue_mapper_init(&mgr->mapper[i], i);
  152. if (r)
  153. return r;
  154. }
  155. return 0;
  156. }
  157. /**
  158. * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
  159. *
  160. * @adev: amdgpu_device pointer
  161. * @mgr: amdgpu_queue_mgr structure holding queue information
  162. *
  163. * De-initialize the the selected @mgr (all asics).
  164. *
  165. * Returns 0 on success, error on failure.
  166. */
  167. int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
  168. struct amdgpu_queue_mgr *mgr)
  169. {
  170. return 0;
  171. }
  172. /**
  173. * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
  174. *
  175. * @adev: amdgpu_device pointer
  176. * @mgr: amdgpu_queue_mgr structure holding queue information
  177. * @hw_ip: HW IP enum
  178. * @instance: HW instance
  179. * @ring: user ring id
  180. * @our_ring: pointer to mapped amdgpu_ring
  181. *
  182. * Map a userspace ring id to an appropriate kernel ring. Different
  183. * policies are configurable at a HW IP level.
  184. *
  185. * Returns 0 on success, error on failure.
  186. */
  187. int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
  188. struct amdgpu_queue_mgr *mgr,
  189. u32 hw_ip, u32 instance, u32 ring,
  190. struct amdgpu_ring **out_ring)
  191. {
  192. int i, r, ip_num_rings = 0;
  193. struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
  194. if (!adev || !mgr || !out_ring)
  195. return -EINVAL;
  196. if (hw_ip >= AMDGPU_MAX_IP_NUM)
  197. return -EINVAL;
  198. if (ring >= AMDGPU_MAX_RINGS)
  199. return -EINVAL;
  200. /* Right now all IPs have only one instance - multiple rings. */
  201. if (instance != 0) {
  202. DRM_DEBUG("invalid ip instance: %d\n", instance);
  203. return -EINVAL;
  204. }
  205. switch (hw_ip) {
  206. case AMDGPU_HW_IP_GFX:
  207. ip_num_rings = adev->gfx.num_gfx_rings;
  208. break;
  209. case AMDGPU_HW_IP_COMPUTE:
  210. ip_num_rings = adev->gfx.num_compute_rings;
  211. break;
  212. case AMDGPU_HW_IP_DMA:
  213. ip_num_rings = adev->sdma.num_instances;
  214. break;
  215. case AMDGPU_HW_IP_UVD:
  216. for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
  217. if (!(adev->uvd.harvest_config & (1 << i)))
  218. ip_num_rings++;
  219. }
  220. break;
  221. case AMDGPU_HW_IP_VCE:
  222. ip_num_rings = adev->vce.num_rings;
  223. break;
  224. case AMDGPU_HW_IP_UVD_ENC:
  225. for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
  226. if (!(adev->uvd.harvest_config & (1 << i)))
  227. ip_num_rings++;
  228. }
  229. ip_num_rings =
  230. adev->uvd.num_enc_rings * ip_num_rings;
  231. break;
  232. case AMDGPU_HW_IP_VCN_DEC:
  233. ip_num_rings = 1;
  234. break;
  235. case AMDGPU_HW_IP_VCN_ENC:
  236. ip_num_rings = adev->vcn.num_enc_rings;
  237. break;
  238. case AMDGPU_HW_IP_VCN_JPEG:
  239. ip_num_rings = 1;
  240. break;
  241. default:
  242. DRM_DEBUG("unknown ip type: %d\n", hw_ip);
  243. return -EINVAL;
  244. }
  245. if (ring >= ip_num_rings) {
  246. DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
  247. ring, ip_num_rings, hw_ip);
  248. return -EINVAL;
  249. }
  250. mutex_lock(&mapper->lock);
  251. *out_ring = amdgpu_get_cached_map(mapper, ring);
  252. if (*out_ring) {
  253. /* cache hit */
  254. r = 0;
  255. goto out_unlock;
  256. }
  257. switch (mapper->hw_ip) {
  258. case AMDGPU_HW_IP_GFX:
  259. case AMDGPU_HW_IP_UVD:
  260. case AMDGPU_HW_IP_VCE:
  261. case AMDGPU_HW_IP_UVD_ENC:
  262. case AMDGPU_HW_IP_VCN_DEC:
  263. case AMDGPU_HW_IP_VCN_ENC:
  264. case AMDGPU_HW_IP_VCN_JPEG:
  265. r = amdgpu_identity_map(adev, mapper, ring, out_ring);
  266. break;
  267. case AMDGPU_HW_IP_DMA:
  268. r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
  269. break;
  270. case AMDGPU_HW_IP_COMPUTE:
  271. r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
  272. break;
  273. default:
  274. *out_ring = NULL;
  275. r = -EINVAL;
  276. DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip);
  277. }
  278. out_unlock:
  279. mutex_unlock(&mapper->lock);
  280. return r;
  281. }