uvd_v1_0.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König <christian.koenig@amd.com>
  23. */
  24. #include <drm/drmP.h>
  25. #include "radeon.h"
  26. #include "radeon_asic.h"
  27. #include "r600d.h"
  28. /**
  29. * uvd_v1_0_get_rptr - get read pointer
  30. *
  31. * @rdev: radeon_device pointer
  32. * @ring: radeon_ring pointer
  33. *
  34. * Returns the current hardware read pointer
  35. */
  36. uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
  37. struct radeon_ring *ring)
  38. {
  39. return RREG32(UVD_RBC_RB_RPTR);
  40. }
  41. /**
  42. * uvd_v1_0_get_wptr - get write pointer
  43. *
  44. * @rdev: radeon_device pointer
  45. * @ring: radeon_ring pointer
  46. *
  47. * Returns the current hardware write pointer
  48. */
  49. uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
  50. struct radeon_ring *ring)
  51. {
  52. return RREG32(UVD_RBC_RB_WPTR);
  53. }
  54. /**
  55. * uvd_v1_0_set_wptr - set write pointer
  56. *
  57. * @rdev: radeon_device pointer
  58. * @ring: radeon_ring pointer
  59. *
  60. * Commits the write pointer to the hardware
  61. */
  62. void uvd_v1_0_set_wptr(struct radeon_device *rdev,
  63. struct radeon_ring *ring)
  64. {
  65. WREG32(UVD_RBC_RB_WPTR, ring->wptr);
  66. }
  67. /**
  68. * uvd_v1_0_init - start and test UVD block
  69. *
  70. * @rdev: radeon_device pointer
  71. *
  72. * Initialize the hardware, boot up the VCPU and do some testing
  73. */
  74. int uvd_v1_0_init(struct radeon_device *rdev)
  75. {
  76. struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  77. uint32_t tmp;
  78. int r;
  79. /* raise clocks while booting up the VCPU */
  80. if (rdev->family < CHIP_RV740)
  81. radeon_set_uvd_clocks(rdev, 10000, 10000);
  82. else
  83. radeon_set_uvd_clocks(rdev, 53300, 40000);
  84. r = uvd_v1_0_start(rdev);
  85. if (r)
  86. goto done;
  87. ring->ready = true;
  88. r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
  89. if (r) {
  90. ring->ready = false;
  91. goto done;
  92. }
  93. r = radeon_ring_lock(rdev, ring, 10);
  94. if (r) {
  95. DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
  96. goto done;
  97. }
  98. tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
  99. radeon_ring_write(ring, tmp);
  100. radeon_ring_write(ring, 0xFFFFF);
  101. tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
  102. radeon_ring_write(ring, tmp);
  103. radeon_ring_write(ring, 0xFFFFF);
  104. tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
  105. radeon_ring_write(ring, tmp);
  106. radeon_ring_write(ring, 0xFFFFF);
  107. /* Clear timeout status bits */
  108. radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
  109. radeon_ring_write(ring, 0x8);
  110. radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
  111. radeon_ring_write(ring, 3);
  112. radeon_ring_unlock_commit(rdev, ring);
  113. done:
  114. /* lower clocks again */
  115. radeon_set_uvd_clocks(rdev, 0, 0);
  116. if (!r)
  117. DRM_INFO("UVD initialized successfully.\n");
  118. return r;
  119. }
  120. /**
  121. * uvd_v1_0_fini - stop the hardware block
  122. *
  123. * @rdev: radeon_device pointer
  124. *
  125. * Stop the UVD block, mark ring as not ready any more
  126. */
  127. void uvd_v1_0_fini(struct radeon_device *rdev)
  128. {
  129. struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  130. uvd_v1_0_stop(rdev);
  131. ring->ready = false;
  132. }
  133. /**
  134. * uvd_v1_0_start - start UVD block
  135. *
  136. * @rdev: radeon_device pointer
  137. *
  138. * Setup and start the UVD block
  139. */
  140. int uvd_v1_0_start(struct radeon_device *rdev)
  141. {
  142. struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  143. uint32_t rb_bufsz;
  144. int i, j, r;
  145. /* disable byte swapping */
  146. u32 lmi_swap_cntl = 0;
  147. u32 mp_swap_cntl = 0;
  148. /* disable clock gating */
  149. WREG32(UVD_CGC_GATE, 0);
  150. /* disable interupt */
  151. WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
  152. /* Stall UMC and register bus before resetting VCPU */
  153. WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
  154. WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
  155. mdelay(1);
  156. /* put LMI, VCPU, RBC etc... into reset */
  157. WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
  158. LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
  159. CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
  160. mdelay(5);
  161. /* take UVD block out of reset */
  162. WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
  163. mdelay(5);
  164. /* initialize UVD memory controller */
  165. WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
  166. (1 << 21) | (1 << 9) | (1 << 20));
  167. #ifdef __BIG_ENDIAN
  168. /* swap (8 in 32) RB and IB */
  169. lmi_swap_cntl = 0xa;
  170. mp_swap_cntl = 0;
  171. #endif
  172. WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
  173. WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
  174. WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
  175. WREG32(UVD_MPC_SET_MUXA1, 0x0);
  176. WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
  177. WREG32(UVD_MPC_SET_MUXB1, 0x0);
  178. WREG32(UVD_MPC_SET_ALU, 0);
  179. WREG32(UVD_MPC_SET_MUX, 0x88);
  180. /* take all subblocks out of reset, except VCPU */
  181. WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
  182. mdelay(5);
  183. /* enable VCPU clock */
  184. WREG32(UVD_VCPU_CNTL, 1 << 9);
  185. /* enable UMC */
  186. WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
  187. /* boot up the VCPU */
  188. WREG32(UVD_SOFT_RESET, 0);
  189. mdelay(10);
  190. WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
  191. for (i = 0; i < 10; ++i) {
  192. uint32_t status;
  193. for (j = 0; j < 100; ++j) {
  194. status = RREG32(UVD_STATUS);
  195. if (status & 2)
  196. break;
  197. mdelay(10);
  198. }
  199. r = 0;
  200. if (status & 2)
  201. break;
  202. DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
  203. WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
  204. mdelay(10);
  205. WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
  206. mdelay(10);
  207. r = -1;
  208. }
  209. if (r) {
  210. DRM_ERROR("UVD not responding, giving up!!!\n");
  211. return r;
  212. }
  213. /* enable interupt */
  214. WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
  215. /* force RBC into idle state */
  216. WREG32(UVD_RBC_RB_CNTL, 0x11010101);
  217. /* Set the write pointer delay */
  218. WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
  219. /* programm the 4GB memory segment for rptr and ring buffer */
  220. WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
  221. (0x7 << 16) | (0x1 << 31));
  222. /* Initialize the ring buffer's read and write pointers */
  223. WREG32(UVD_RBC_RB_RPTR, 0x0);
  224. ring->wptr = RREG32(UVD_RBC_RB_RPTR);
  225. WREG32(UVD_RBC_RB_WPTR, ring->wptr);
  226. /* set the ring address */
  227. WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
  228. /* Set ring buffer size */
  229. rb_bufsz = order_base_2(ring->ring_size);
  230. rb_bufsz = (0x1 << 8) | rb_bufsz;
  231. WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
  232. return 0;
  233. }
  234. /**
  235. * uvd_v1_0_stop - stop UVD block
  236. *
  237. * @rdev: radeon_device pointer
  238. *
  239. * stop the UVD block
  240. */
  241. void uvd_v1_0_stop(struct radeon_device *rdev)
  242. {
  243. /* force RBC into idle state */
  244. WREG32(UVD_RBC_RB_CNTL, 0x11010101);
  245. /* Stall UMC and register bus before resetting VCPU */
  246. WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
  247. WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
  248. mdelay(1);
  249. /* put VCPU into reset */
  250. WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
  251. mdelay(5);
  252. /* disable VCPU clock */
  253. WREG32(UVD_VCPU_CNTL, 0x0);
  254. /* Unstall UMC and register bus */
  255. WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
  256. WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
  257. }
  258. /**
  259. * uvd_v1_0_ring_test - register write test
  260. *
  261. * @rdev: radeon_device pointer
  262. * @ring: radeon_ring pointer
  263. *
  264. * Test if we can successfully write to the context register
  265. */
  266. int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  267. {
  268. uint32_t tmp = 0;
  269. unsigned i;
  270. int r;
  271. WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
  272. r = radeon_ring_lock(rdev, ring, 3);
  273. if (r) {
  274. DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
  275. ring->idx, r);
  276. return r;
  277. }
  278. radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
  279. radeon_ring_write(ring, 0xDEADBEEF);
  280. radeon_ring_unlock_commit(rdev, ring);
  281. for (i = 0; i < rdev->usec_timeout; i++) {
  282. tmp = RREG32(UVD_CONTEXT_ID);
  283. if (tmp == 0xDEADBEEF)
  284. break;
  285. DRM_UDELAY(1);
  286. }
  287. if (i < rdev->usec_timeout) {
  288. DRM_INFO("ring test on %d succeeded in %d usecs\n",
  289. ring->idx, i);
  290. } else {
  291. DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
  292. ring->idx, tmp);
  293. r = -EINVAL;
  294. }
  295. return r;
  296. }
  297. /**
  298. * uvd_v1_0_semaphore_emit - emit semaphore command
  299. *
  300. * @rdev: radeon_device pointer
  301. * @ring: radeon_ring pointer
  302. * @semaphore: semaphore to emit commands for
  303. * @emit_wait: true if we should emit a wait command
  304. *
  305. * Emit a semaphore command (either wait or signal) to the UVD ring.
  306. */
  307. bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
  308. struct radeon_ring *ring,
  309. struct radeon_semaphore *semaphore,
  310. bool emit_wait)
  311. {
  312. uint64_t addr = semaphore->gpu_addr;
  313. radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
  314. radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
  315. radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
  316. radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
  317. radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
  318. radeon_ring_write(ring, emit_wait ? 1 : 0);
  319. return true;
  320. }
  321. /**
  322. * uvd_v1_0_ib_execute - execute indirect buffer
  323. *
  324. * @rdev: radeon_device pointer
  325. * @ib: indirect buffer to execute
  326. *
  327. * Write ring commands to execute the indirect buffer
  328. */
  329. void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  330. {
  331. struct radeon_ring *ring = &rdev->ring[ib->ring];
  332. radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
  333. radeon_ring_write(ring, ib->gpu_addr);
  334. radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
  335. radeon_ring_write(ring, ib->length_dw);
  336. }
  337. /**
  338. * uvd_v1_0_ib_test - test ib execution
  339. *
  340. * @rdev: radeon_device pointer
  341. * @ring: radeon_ring pointer
  342. *
  343. * Test if we can successfully execute an IB
  344. */
  345. int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  346. {
  347. struct radeon_fence *fence = NULL;
  348. int r;
  349. if (rdev->family < CHIP_RV740)
  350. r = radeon_set_uvd_clocks(rdev, 10000, 10000);
  351. else
  352. r = radeon_set_uvd_clocks(rdev, 53300, 40000);
  353. if (r) {
  354. DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
  355. return r;
  356. }
  357. r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
  358. if (r) {
  359. DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
  360. goto error;
  361. }
  362. r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
  363. if (r) {
  364. DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
  365. goto error;
  366. }
  367. r = radeon_fence_wait(fence, false);
  368. if (r) {
  369. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  370. goto error;
  371. }
  372. DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
  373. error:
  374. radeon_fence_unref(&fence);
  375. radeon_set_uvd_clocks(rdev, 0, 0);
  376. return r;
  377. }