vcn_v1_0.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. #include "amdgpu_vcn.h"
  27. #include "soc15d.h"
  28. #include "soc15_common.h"
  29. #include "vega10/soc15ip.h"
  30. #include "raven1/VCN/vcn_1_0_offset.h"
  31. #include "raven1/VCN/vcn_1_0_sh_mask.h"
  32. #include "vega10/HDP/hdp_4_0_offset.h"
  33. #include "raven1/MMHUB/mmhub_9_1_offset.h"
  34. #include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
  35. static int vcn_v1_0_start(struct amdgpu_device *adev);
  36. static int vcn_v1_0_stop(struct amdgpu_device *adev);
  37. /**
  38. * vcn_v1_0_early_init - set function pointers
  39. *
  40. * @handle: amdgpu_device pointer
  41. *
  42. * Set ring and irq function pointers
  43. */
  44. static int vcn_v1_0_early_init(void *handle)
  45. {
  46. return 0;
  47. }
  48. /**
  49. * vcn_v1_0_sw_init - sw init for VCN block
  50. *
  51. * @handle: amdgpu_device pointer
  52. *
  53. * Load firmware and sw initialization
  54. */
  55. static int vcn_v1_0_sw_init(void *handle)
  56. {
  57. int r;
  58. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  59. /* VCN TRAP */
  60. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
  61. if (r)
  62. return r;
  63. r = amdgpu_vcn_sw_init(adev);
  64. if (r)
  65. return r;
  66. r = amdgpu_vcn_resume(adev);
  67. if (r)
  68. return r;
  69. return r;
  70. }
  71. /**
  72. * vcn_v1_0_sw_fini - sw fini for VCN block
  73. *
  74. * @handle: amdgpu_device pointer
  75. *
  76. * VCN suspend and free up sw allocation
  77. */
  78. static int vcn_v1_0_sw_fini(void *handle)
  79. {
  80. int r;
  81. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  82. r = amdgpu_vcn_suspend(adev);
  83. if (r)
  84. return r;
  85. r = amdgpu_vcn_sw_fini(adev);
  86. return r;
  87. }
  88. /**
  89. * vcn_v1_0_hw_init - start and test VCN block
  90. *
  91. * @handle: amdgpu_device pointer
  92. *
  93. * Initialize the hardware, boot up the VCPU and do some testing
  94. */
  95. static int vcn_v1_0_hw_init(void *handle)
  96. {
  97. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  98. struct amdgpu_ring *ring = &adev->vcn.ring_dec;
  99. int r;
  100. r = vcn_v1_0_start(adev);
  101. if (r)
  102. goto done;
  103. ring->ready = true;
  104. r = amdgpu_ring_test_ring(ring);
  105. if (r) {
  106. ring->ready = false;
  107. goto done;
  108. }
  109. done:
  110. if (!r)
  111. DRM_INFO("VCN decode initialized successfully.\n");
  112. return r;
  113. }
  114. /**
  115. * vcn_v1_0_hw_fini - stop the hardware block
  116. *
  117. * @handle: amdgpu_device pointer
  118. *
  119. * Stop the VCN block, mark ring as not ready any more
  120. */
  121. static int vcn_v1_0_hw_fini(void *handle)
  122. {
  123. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  124. struct amdgpu_ring *ring = &adev->vcn.ring_dec;
  125. int r;
  126. r = vcn_v1_0_stop(adev);
  127. if (r)
  128. return r;
  129. ring->ready = false;
  130. return 0;
  131. }
  132. /**
  133. * vcn_v1_0_suspend - suspend VCN block
  134. *
  135. * @handle: amdgpu_device pointer
  136. *
  137. * HW fini and suspend VCN block
  138. */
  139. static int vcn_v1_0_suspend(void *handle)
  140. {
  141. int r;
  142. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  143. r = vcn_v1_0_hw_fini(adev);
  144. if (r)
  145. return r;
  146. r = amdgpu_vcn_suspend(adev);
  147. return r;
  148. }
  149. /**
  150. * vcn_v1_0_resume - resume VCN block
  151. *
  152. * @handle: amdgpu_device pointer
  153. *
  154. * Resume firmware and hw init VCN block
  155. */
  156. static int vcn_v1_0_resume(void *handle)
  157. {
  158. int r;
  159. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  160. r = amdgpu_vcn_resume(adev);
  161. if (r)
  162. return r;
  163. r = vcn_v1_0_hw_init(adev);
  164. return r;
  165. }
  166. /**
  167. * vcn_v1_0_mc_resume - memory controller programming
  168. *
  169. * @adev: amdgpu_device pointer
  170. *
  171. * Let the VCN memory controller know it's offsets
  172. */
  173. static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
  174. {
  175. uint64_t offset;
  176. uint32_t size;
  177. /* programm memory controller bits 0-27 */
  178. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
  179. lower_32_bits(adev->vcn.gpu_addr));
  180. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
  181. upper_32_bits(adev->vcn.gpu_addr));
  182. /* Current FW has no signed header, but will be added later on */
  183. /* offset = AMDGPU_VCN_FIRMWARE_OFFSET; */
  184. offset = 0;
  185. size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
  186. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), offset >> 3);
  187. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
  188. offset += size;
  189. size = AMDGPU_VCN_HEAP_SIZE;
  190. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), offset >> 3);
  191. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), size);
  192. offset += size;
  193. size = AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40);
  194. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), offset >> 3);
  195. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), size);
  196. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
  197. adev->gfx.config.gb_addr_config);
  198. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
  199. adev->gfx.config.gb_addr_config);
  200. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
  201. adev->gfx.config.gb_addr_config);
  202. }
  203. /**
  204. * vcn_v1_0_start - start VCN block
  205. *
  206. * @adev: amdgpu_device pointer
  207. *
  208. * Setup and start the VCN block
  209. */
  210. static int vcn_v1_0_start(struct amdgpu_device *adev)
  211. {
  212. uint32_t lmi_swap_cntl;
  213. int i, j, r;
  214. /* disable byte swapping */
  215. lmi_swap_cntl = 0;
  216. vcn_v1_0_mc_resume(adev);
  217. /* disable clock gating */
  218. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
  219. ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
  220. /* disable interupt */
  221. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
  222. ~UVD_MASTINT_EN__VCPU_EN_MASK);
  223. /* stall UMC and register bus before resetting VCPU */
  224. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
  225. UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
  226. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  227. mdelay(1);
  228. /* put LMI, VCPU, RBC etc... into reset */
  229. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
  230. UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
  231. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
  232. UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
  233. UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
  234. UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
  235. UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
  236. UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
  237. UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
  238. mdelay(5);
  239. /* initialize VCN memory controller */
  240. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
  241. (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
  242. UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
  243. UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
  244. UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
  245. UVD_LMI_CTRL__REQ_MODE_MASK |
  246. 0x00100000L);
  247. #ifdef __BIG_ENDIAN
  248. /* swap (8 in 32) RB and IB */
  249. lmi_swap_cntl = 0xa;
  250. #endif
  251. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl);
  252. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
  253. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
  254. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
  255. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
  256. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
  257. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
  258. /* take all subblocks out of reset, except VCPU */
  259. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
  260. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  261. mdelay(5);
  262. /* enable VCPU clock */
  263. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
  264. UVD_VCPU_CNTL__CLK_EN_MASK);
  265. /* enable UMC */
  266. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
  267. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  268. /* boot up the VCPU */
  269. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
  270. mdelay(10);
  271. for (i = 0; i < 10; ++i) {
  272. uint32_t status;
  273. for (j = 0; j < 100; ++j) {
  274. status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS));
  275. if (status & 2)
  276. break;
  277. mdelay(10);
  278. }
  279. r = 0;
  280. if (status & 2)
  281. break;
  282. DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
  283. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
  284. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
  285. ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  286. mdelay(10);
  287. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
  288. ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  289. mdelay(10);
  290. r = -1;
  291. }
  292. if (r) {
  293. DRM_ERROR("VCN decode not responding, giving up!!!\n");
  294. return r;
  295. }
  296. /* enable master interrupt */
  297. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
  298. (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
  299. ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
  300. /* clear the bit 4 of VCN_STATUS */
  301. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
  302. ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
  303. return 0;
  304. }
  305. /**
  306. * vcn_v1_0_stop - stop VCN block
  307. *
  308. * @adev: amdgpu_device pointer
  309. *
  310. * stop the VCN block
  311. */
  312. static int vcn_v1_0_stop(struct amdgpu_device *adev)
  313. {
  314. /* Stall UMC and register bus before resetting VCPU */
  315. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
  316. UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
  317. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  318. mdelay(1);
  319. /* put VCPU into reset */
  320. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
  321. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  322. mdelay(5);
  323. /* disable VCPU clock */
  324. WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0);
  325. /* Unstall UMC and register bus */
  326. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
  327. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  328. return 0;
  329. }
  330. static int vcn_v1_0_set_clockgating_state(void *handle,
  331. enum amd_clockgating_state state)
  332. {
  333. /* needed for driver unload*/
  334. return 0;
  335. }
  336. static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
  337. .name = "vcn_v1_0",
  338. .early_init = vcn_v1_0_early_init,
  339. .late_init = NULL,
  340. .sw_init = vcn_v1_0_sw_init,
  341. .sw_fini = vcn_v1_0_sw_fini,
  342. .hw_init = vcn_v1_0_hw_init,
  343. .hw_fini = vcn_v1_0_hw_fini,
  344. .suspend = vcn_v1_0_suspend,
  345. .resume = vcn_v1_0_resume,
  346. .is_idle = NULL /* vcn_v1_0_is_idle */,
  347. .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
  348. .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
  349. .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
  350. .soft_reset = NULL /* vcn_v1_0_soft_reset */,
  351. .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
  352. .set_clockgating_state = vcn_v1_0_set_clockgating_state,
  353. .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
  354. };