uvd_v5_0.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König <christian.koenig@amd.com>
  23. */
  24. #include <linux/firmware.h>
  25. #include <drm/drmP.h>
  26. #include "amdgpu.h"
  27. #include "amdgpu_uvd.h"
  28. #include "vid.h"
  29. #include "uvd/uvd_5_0_d.h"
  30. #include "uvd/uvd_5_0_sh_mask.h"
  31. #include "oss/oss_2_0_d.h"
  32. #include "oss/oss_2_0_sh_mask.h"
  33. #include "bif/bif_5_0_d.h"
  34. #include "vi.h"
  35. static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
  36. static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
  37. static int uvd_v5_0_start(struct amdgpu_device *adev);
  38. static void uvd_v5_0_stop(struct amdgpu_device *adev);
  39. /**
  40. * uvd_v5_0_ring_get_rptr - get read pointer
  41. *
  42. * @ring: amdgpu_ring pointer
  43. *
  44. * Returns the current hardware read pointer
  45. */
  46. static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
  47. {
  48. struct amdgpu_device *adev = ring->adev;
  49. return RREG32(mmUVD_RBC_RB_RPTR);
  50. }
  51. /**
  52. * uvd_v5_0_ring_get_wptr - get write pointer
  53. *
  54. * @ring: amdgpu_ring pointer
  55. *
  56. * Returns the current hardware write pointer
  57. */
  58. static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
  59. {
  60. struct amdgpu_device *adev = ring->adev;
  61. return RREG32(mmUVD_RBC_RB_WPTR);
  62. }
  63. /**
  64. * uvd_v5_0_ring_set_wptr - set write pointer
  65. *
  66. * @ring: amdgpu_ring pointer
  67. *
  68. * Commits the write pointer to the hardware
  69. */
  70. static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
  71. {
  72. struct amdgpu_device *adev = ring->adev;
  73. WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
  74. }
  75. static int uvd_v5_0_early_init(void *handle)
  76. {
  77. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  78. uvd_v5_0_set_ring_funcs(adev);
  79. uvd_v5_0_set_irq_funcs(adev);
  80. return 0;
  81. }
  82. static int uvd_v5_0_sw_init(void *handle)
  83. {
  84. struct amdgpu_ring *ring;
  85. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  86. int r;
  87. /* UVD TRAP */
  88. r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
  89. if (r)
  90. return r;
  91. r = amdgpu_uvd_sw_init(adev);
  92. if (r)
  93. return r;
  94. r = amdgpu_uvd_resume(adev);
  95. if (r)
  96. return r;
  97. ring = &adev->uvd.ring;
  98. sprintf(ring->name, "uvd");
  99. r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
  100. &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
  101. return r;
  102. }
  103. static int uvd_v5_0_sw_fini(void *handle)
  104. {
  105. int r;
  106. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  107. r = amdgpu_uvd_suspend(adev);
  108. if (r)
  109. return r;
  110. r = amdgpu_uvd_sw_fini(adev);
  111. if (r)
  112. return r;
  113. return r;
  114. }
  115. /**
  116. * uvd_v5_0_hw_init - start and test UVD block
  117. *
  118. * @adev: amdgpu_device pointer
  119. *
  120. * Initialize the hardware, boot up the VCPU and do some testing
  121. */
  122. static int uvd_v5_0_hw_init(void *handle)
  123. {
  124. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  125. struct amdgpu_ring *ring = &adev->uvd.ring;
  126. uint32_t tmp;
  127. int r;
  128. /* raise clocks while booting up the VCPU */
  129. amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
  130. r = uvd_v5_0_start(adev);
  131. if (r)
  132. goto done;
  133. ring->ready = true;
  134. r = amdgpu_ring_test_ring(ring);
  135. if (r) {
  136. ring->ready = false;
  137. goto done;
  138. }
  139. r = amdgpu_ring_alloc(ring, 10);
  140. if (r) {
  141. DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
  142. goto done;
  143. }
  144. tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
  145. amdgpu_ring_write(ring, tmp);
  146. amdgpu_ring_write(ring, 0xFFFFF);
  147. tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
  148. amdgpu_ring_write(ring, tmp);
  149. amdgpu_ring_write(ring, 0xFFFFF);
  150. tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
  151. amdgpu_ring_write(ring, tmp);
  152. amdgpu_ring_write(ring, 0xFFFFF);
  153. /* Clear timeout status bits */
  154. amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
  155. amdgpu_ring_write(ring, 0x8);
  156. amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
  157. amdgpu_ring_write(ring, 3);
  158. amdgpu_ring_commit(ring);
  159. done:
  160. /* lower clocks again */
  161. amdgpu_asic_set_uvd_clocks(adev, 0, 0);
  162. if (!r)
  163. DRM_INFO("UVD initialized successfully.\n");
  164. return r;
  165. }
  166. /**
  167. * uvd_v5_0_hw_fini - stop the hardware block
  168. *
  169. * @adev: amdgpu_device pointer
  170. *
  171. * Stop the UVD block, mark ring as not ready any more
  172. */
  173. static int uvd_v5_0_hw_fini(void *handle)
  174. {
  175. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  176. struct amdgpu_ring *ring = &adev->uvd.ring;
  177. uvd_v5_0_stop(adev);
  178. ring->ready = false;
  179. return 0;
  180. }
  181. static int uvd_v5_0_suspend(void *handle)
  182. {
  183. int r;
  184. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  185. r = uvd_v5_0_hw_fini(adev);
  186. if (r)
  187. return r;
  188. r = amdgpu_uvd_suspend(adev);
  189. if (r)
  190. return r;
  191. return r;
  192. }
  193. static int uvd_v5_0_resume(void *handle)
  194. {
  195. int r;
  196. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  197. r = amdgpu_uvd_resume(adev);
  198. if (r)
  199. return r;
  200. r = uvd_v5_0_hw_init(adev);
  201. if (r)
  202. return r;
  203. return r;
  204. }
  205. /**
  206. * uvd_v5_0_mc_resume - memory controller programming
  207. *
  208. * @adev: amdgpu_device pointer
  209. *
  210. * Let the UVD memory controller know it's offsets
  211. */
  212. static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
  213. {
  214. uint64_t offset;
  215. uint32_t size;
  216. /* programm memory controller bits 0-27 */
  217. WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
  218. lower_32_bits(adev->uvd.gpu_addr));
  219. WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
  220. upper_32_bits(adev->uvd.gpu_addr));
  221. offset = AMDGPU_UVD_FIRMWARE_OFFSET;
  222. size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
  223. WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
  224. WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
  225. offset += size;
  226. size = AMDGPU_UVD_HEAP_SIZE;
  227. WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
  228. WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
  229. offset += size;
  230. size = AMDGPU_UVD_STACK_SIZE +
  231. (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
  232. WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
  233. WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
  234. WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
  235. WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
  236. WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
  237. }
  238. /**
  239. * uvd_v5_0_start - start UVD block
  240. *
  241. * @adev: amdgpu_device pointer
  242. *
  243. * Setup and start the UVD block
  244. */
  245. static int uvd_v5_0_start(struct amdgpu_device *adev)
  246. {
  247. struct amdgpu_ring *ring = &adev->uvd.ring;
  248. uint32_t rb_bufsz, tmp;
  249. uint32_t lmi_swap_cntl;
  250. uint32_t mp_swap_cntl;
  251. int i, j, r;
  252. /*disable DPG */
  253. WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
  254. /* disable byte swapping */
  255. lmi_swap_cntl = 0;
  256. mp_swap_cntl = 0;
  257. uvd_v5_0_mc_resume(adev);
  258. /* disable clock gating */
  259. WREG32(mmUVD_CGC_GATE, 0);
  260. /* disable interupt */
  261. WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
  262. /* stall UMC and register bus before resetting VCPU */
  263. WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
  264. mdelay(1);
  265. /* put LMI, VCPU, RBC etc... into reset */
  266. WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
  267. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
  268. UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
  269. UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
  270. UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
  271. mdelay(5);
  272. /* take UVD block out of reset */
  273. WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
  274. mdelay(5);
  275. /* initialize UVD memory controller */
  276. WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
  277. (1 << 21) | (1 << 9) | (1 << 20));
  278. #ifdef __BIG_ENDIAN
  279. /* swap (8 in 32) RB and IB */
  280. lmi_swap_cntl = 0xa;
  281. mp_swap_cntl = 0;
  282. #endif
  283. WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
  284. WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
  285. WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
  286. WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
  287. WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
  288. WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
  289. WREG32(mmUVD_MPC_SET_ALU, 0);
  290. WREG32(mmUVD_MPC_SET_MUX, 0x88);
  291. /* take all subblocks out of reset, except VCPU */
  292. WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  293. mdelay(5);
  294. /* enable VCPU clock */
  295. WREG32(mmUVD_VCPU_CNTL, 1 << 9);
  296. /* enable UMC */
  297. WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
  298. /* boot up the VCPU */
  299. WREG32(mmUVD_SOFT_RESET, 0);
  300. mdelay(10);
  301. for (i = 0; i < 10; ++i) {
  302. uint32_t status;
  303. for (j = 0; j < 100; ++j) {
  304. status = RREG32(mmUVD_STATUS);
  305. if (status & 2)
  306. break;
  307. mdelay(10);
  308. }
  309. r = 0;
  310. if (status & 2)
  311. break;
  312. DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
  313. WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
  314. ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  315. mdelay(10);
  316. WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  317. mdelay(10);
  318. r = -1;
  319. }
  320. if (r) {
  321. DRM_ERROR("UVD not responding, giving up!!!\n");
  322. return r;
  323. }
  324. /* enable master interrupt */
  325. WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
  326. /* clear the bit 4 of UVD_STATUS */
  327. WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
  328. rb_bufsz = order_base_2(ring->ring_size);
  329. tmp = 0;
  330. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
  331. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
  332. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
  333. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
  334. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
  335. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
  336. /* force RBC into idle state */
  337. WREG32(mmUVD_RBC_RB_CNTL, tmp);
  338. /* set the write pointer delay */
  339. WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
  340. /* set the wb address */
  341. WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
  342. /* programm the RB_BASE for ring buffer */
  343. WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
  344. lower_32_bits(ring->gpu_addr));
  345. WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
  346. upper_32_bits(ring->gpu_addr));
  347. /* Initialize the ring buffer's read and write pointers */
  348. WREG32(mmUVD_RBC_RB_RPTR, 0);
  349. ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
  350. WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
  351. WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
  352. return 0;
  353. }
  354. /**
  355. * uvd_v5_0_stop - stop UVD block
  356. *
  357. * @adev: amdgpu_device pointer
  358. *
  359. * stop the UVD block
  360. */
  361. static void uvd_v5_0_stop(struct amdgpu_device *adev)
  362. {
  363. /* force RBC into idle state */
  364. WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
  365. /* Stall UMC and register bus before resetting VCPU */
  366. WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
  367. mdelay(1);
  368. /* put VCPU into reset */
  369. WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  370. mdelay(5);
  371. /* disable VCPU clock */
  372. WREG32(mmUVD_VCPU_CNTL, 0x0);
  373. /* Unstall UMC and register bus */
  374. WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
  375. }
  376. /**
  377. * uvd_v5_0_ring_emit_fence - emit an fence & trap command
  378. *
  379. * @ring: amdgpu_ring pointer
  380. * @fence: fence to emit
  381. *
  382. * Write a fence and a trap command to the ring.
  383. */
  384. static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
  385. unsigned flags)
  386. {
  387. WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
  388. amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
  389. amdgpu_ring_write(ring, seq);
  390. amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
  391. amdgpu_ring_write(ring, addr & 0xffffffff);
  392. amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
  393. amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
  394. amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
  395. amdgpu_ring_write(ring, 0);
  396. amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
  397. amdgpu_ring_write(ring, 0);
  398. amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
  399. amdgpu_ring_write(ring, 0);
  400. amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
  401. amdgpu_ring_write(ring, 2);
  402. }
  403. /**
  404. * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
  405. *
  406. * @ring: amdgpu_ring pointer
  407. *
  408. * Emits an hdp flush.
  409. */
  410. static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
  411. {
  412. amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
  413. amdgpu_ring_write(ring, 0);
  414. }
  415. /**
  416. * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
  417. *
  418. * @ring: amdgpu_ring pointer
  419. *
  420. * Emits an hdp invalidate.
  421. */
  422. static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
  423. {
  424. amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
  425. amdgpu_ring_write(ring, 1);
  426. }
  427. /**
  428. * uvd_v5_0_ring_test_ring - register write test
  429. *
  430. * @ring: amdgpu_ring pointer
  431. *
  432. * Test if we can successfully write to the context register
  433. */
  434. static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
  435. {
  436. struct amdgpu_device *adev = ring->adev;
  437. uint32_t tmp = 0;
  438. unsigned i;
  439. int r;
  440. WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
  441. r = amdgpu_ring_alloc(ring, 3);
  442. if (r) {
  443. DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
  444. ring->idx, r);
  445. return r;
  446. }
  447. amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
  448. amdgpu_ring_write(ring, 0xDEADBEEF);
  449. amdgpu_ring_commit(ring);
  450. for (i = 0; i < adev->usec_timeout; i++) {
  451. tmp = RREG32(mmUVD_CONTEXT_ID);
  452. if (tmp == 0xDEADBEEF)
  453. break;
  454. DRM_UDELAY(1);
  455. }
  456. if (i < adev->usec_timeout) {
  457. DRM_INFO("ring test on %d succeeded in %d usecs\n",
  458. ring->idx, i);
  459. } else {
  460. DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
  461. ring->idx, tmp);
  462. r = -EINVAL;
  463. }
  464. return r;
  465. }
  466. /**
  467. * uvd_v5_0_ring_emit_ib - execute indirect buffer
  468. *
  469. * @ring: amdgpu_ring pointer
  470. * @ib: indirect buffer to execute
  471. *
  472. * Write ring commands to execute the indirect buffer
  473. */
  474. static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
  475. struct amdgpu_ib *ib,
  476. unsigned vm_id, bool ctx_switch)
  477. {
  478. amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
  479. amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
  480. amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
  481. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  482. amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
  483. amdgpu_ring_write(ring, ib->length_dw);
  484. }
  485. static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
  486. {
  487. return
  488. 6; /* uvd_v5_0_ring_emit_ib */
  489. }
  490. static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
  491. {
  492. return
  493. 2 + /* uvd_v5_0_ring_emit_hdp_flush */
  494. 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
  495. 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
  496. }
  497. static bool uvd_v5_0_is_idle(void *handle)
  498. {
  499. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  500. return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
  501. }
  502. static int uvd_v5_0_wait_for_idle(void *handle)
  503. {
  504. unsigned i;
  505. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  506. for (i = 0; i < adev->usec_timeout; i++) {
  507. if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
  508. return 0;
  509. }
  510. return -ETIMEDOUT;
  511. }
  512. static int uvd_v5_0_soft_reset(void *handle)
  513. {
  514. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  515. uvd_v5_0_stop(adev);
  516. WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
  517. ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
  518. mdelay(5);
  519. return uvd_v5_0_start(adev);
  520. }
  521. static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
  522. struct amdgpu_irq_src *source,
  523. unsigned type,
  524. enum amdgpu_interrupt_state state)
  525. {
  526. // TODO
  527. return 0;
  528. }
  529. static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
  530. struct amdgpu_irq_src *source,
  531. struct amdgpu_iv_entry *entry)
  532. {
  533. DRM_DEBUG("IH: UVD TRAP\n");
  534. amdgpu_fence_process(&adev->uvd.ring);
  535. return 0;
  536. }
  537. static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
  538. {
  539. uint32_t data, data1, data2, suvd_flags;
  540. data = RREG32(mmUVD_CGC_CTRL);
  541. data1 = RREG32(mmUVD_SUVD_CGC_GATE);
  542. data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
  543. data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
  544. UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
  545. suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
  546. UVD_SUVD_CGC_GATE__SIT_MASK |
  547. UVD_SUVD_CGC_GATE__SMP_MASK |
  548. UVD_SUVD_CGC_GATE__SCM_MASK |
  549. UVD_SUVD_CGC_GATE__SDB_MASK;
  550. data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
  551. (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
  552. (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
  553. data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
  554. UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
  555. UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
  556. UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
  557. UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
  558. UVD_CGC_CTRL__SYS_MODE_MASK |
  559. UVD_CGC_CTRL__UDEC_MODE_MASK |
  560. UVD_CGC_CTRL__MPEG2_MODE_MASK |
  561. UVD_CGC_CTRL__REGS_MODE_MASK |
  562. UVD_CGC_CTRL__RBC_MODE_MASK |
  563. UVD_CGC_CTRL__LMI_MC_MODE_MASK |
  564. UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
  565. UVD_CGC_CTRL__IDCT_MODE_MASK |
  566. UVD_CGC_CTRL__MPRD_MODE_MASK |
  567. UVD_CGC_CTRL__MPC_MODE_MASK |
  568. UVD_CGC_CTRL__LBSI_MODE_MASK |
  569. UVD_CGC_CTRL__LRBBM_MODE_MASK |
  570. UVD_CGC_CTRL__WCB_MODE_MASK |
  571. UVD_CGC_CTRL__VCPU_MODE_MASK |
  572. UVD_CGC_CTRL__JPEG_MODE_MASK |
  573. UVD_CGC_CTRL__SCPU_MODE_MASK);
  574. data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
  575. UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
  576. UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
  577. UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
  578. UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
  579. data1 |= suvd_flags;
  580. WREG32(mmUVD_CGC_CTRL, data);
  581. WREG32(mmUVD_CGC_GATE, 0);
  582. WREG32(mmUVD_SUVD_CGC_GATE, data1);
  583. WREG32(mmUVD_SUVD_CGC_CTRL, data2);
  584. }
  585. #if 0
  586. static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
  587. {
  588. uint32_t data, data1, cgc_flags, suvd_flags;
  589. data = RREG32(mmUVD_CGC_GATE);
  590. data1 = RREG32(mmUVD_SUVD_CGC_GATE);
  591. cgc_flags = UVD_CGC_GATE__SYS_MASK |
  592. UVD_CGC_GATE__UDEC_MASK |
  593. UVD_CGC_GATE__MPEG2_MASK |
  594. UVD_CGC_GATE__RBC_MASK |
  595. UVD_CGC_GATE__LMI_MC_MASK |
  596. UVD_CGC_GATE__IDCT_MASK |
  597. UVD_CGC_GATE__MPRD_MASK |
  598. UVD_CGC_GATE__MPC_MASK |
  599. UVD_CGC_GATE__LBSI_MASK |
  600. UVD_CGC_GATE__LRBBM_MASK |
  601. UVD_CGC_GATE__UDEC_RE_MASK |
  602. UVD_CGC_GATE__UDEC_CM_MASK |
  603. UVD_CGC_GATE__UDEC_IT_MASK |
  604. UVD_CGC_GATE__UDEC_DB_MASK |
  605. UVD_CGC_GATE__UDEC_MP_MASK |
  606. UVD_CGC_GATE__WCB_MASK |
  607. UVD_CGC_GATE__VCPU_MASK |
  608. UVD_CGC_GATE__SCPU_MASK;
  609. suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
  610. UVD_SUVD_CGC_GATE__SIT_MASK |
  611. UVD_SUVD_CGC_GATE__SMP_MASK |
  612. UVD_SUVD_CGC_GATE__SCM_MASK |
  613. UVD_SUVD_CGC_GATE__SDB_MASK;
  614. data |= cgc_flags;
  615. data1 |= suvd_flags;
  616. WREG32(mmUVD_CGC_GATE, data);
  617. WREG32(mmUVD_SUVD_CGC_GATE, data1);
  618. }
  619. #endif
  620. static int uvd_v5_0_set_clockgating_state(void *handle,
  621. enum amd_clockgating_state state)
  622. {
  623. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  624. bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
  625. static int curstate = -1;
  626. if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
  627. return 0;
  628. if (curstate == state)
  629. return 0;
  630. curstate = state;
  631. if (enable) {
  632. /* disable HW gating and enable Sw gating */
  633. uvd_v5_0_set_sw_clock_gating(adev);
  634. } else {
  635. /* wait for STATUS to clear */
  636. if (uvd_v5_0_wait_for_idle(handle))
  637. return -EBUSY;
  638. /* enable HW gates because UVD is idle */
  639. /* uvd_v5_0_set_hw_clock_gating(adev); */
  640. }
  641. return 0;
  642. }
  643. static int uvd_v5_0_set_powergating_state(void *handle,
  644. enum amd_powergating_state state)
  645. {
  646. /* This doesn't actually powergate the UVD block.
  647. * That's done in the dpm code via the SMC. This
  648. * just re-inits the block as necessary. The actual
  649. * gating still happens in the dpm code. We should
  650. * revisit this when there is a cleaner line between
  651. * the smc and the hw blocks
  652. */
  653. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  654. if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
  655. return 0;
  656. if (state == AMD_PG_STATE_GATE) {
  657. uvd_v5_0_stop(adev);
  658. return 0;
  659. } else {
  660. return uvd_v5_0_start(adev);
  661. }
  662. }
  663. const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
  664. .name = "uvd_v5_0",
  665. .early_init = uvd_v5_0_early_init,
  666. .late_init = NULL,
  667. .sw_init = uvd_v5_0_sw_init,
  668. .sw_fini = uvd_v5_0_sw_fini,
  669. .hw_init = uvd_v5_0_hw_init,
  670. .hw_fini = uvd_v5_0_hw_fini,
  671. .suspend = uvd_v5_0_suspend,
  672. .resume = uvd_v5_0_resume,
  673. .is_idle = uvd_v5_0_is_idle,
  674. .wait_for_idle = uvd_v5_0_wait_for_idle,
  675. .soft_reset = uvd_v5_0_soft_reset,
  676. .set_clockgating_state = uvd_v5_0_set_clockgating_state,
  677. .set_powergating_state = uvd_v5_0_set_powergating_state,
  678. };
  679. static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
  680. .get_rptr = uvd_v5_0_ring_get_rptr,
  681. .get_wptr = uvd_v5_0_ring_get_wptr,
  682. .set_wptr = uvd_v5_0_ring_set_wptr,
  683. .parse_cs = amdgpu_uvd_ring_parse_cs,
  684. .emit_ib = uvd_v5_0_ring_emit_ib,
  685. .emit_fence = uvd_v5_0_ring_emit_fence,
  686. .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
  687. .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
  688. .test_ring = uvd_v5_0_ring_test_ring,
  689. .test_ib = amdgpu_uvd_ring_test_ib,
  690. .insert_nop = amdgpu_ring_insert_nop,
  691. .pad_ib = amdgpu_ring_generic_pad_ib,
  692. .begin_use = amdgpu_uvd_ring_begin_use,
  693. .end_use = amdgpu_uvd_ring_end_use,
  694. .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
  695. .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
  696. };
  697. static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
  698. {
  699. adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
  700. }
  701. static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
  702. .set = uvd_v5_0_set_interrupt_state,
  703. .process = uvd_v5_0_process_interrupt,
  704. };
  705. static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
  706. {
  707. adev->uvd.irq.num_types = 1;
  708. adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
  709. }