vcn_v1_0.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. #include "amdgpu_vcn.h"
  27. #include "soc15d.h"
  28. #include "soc15_common.h"
  29. #include "vcn/vcn_1_0_offset.h"
  30. #include "vcn/vcn_1_0_sh_mask.h"
  31. #include "hdp/hdp_4_0_offset.h"
  32. #include "mmhub/mmhub_9_1_offset.h"
  33. #include "mmhub/mmhub_9_1_sh_mask.h"
  34. static int vcn_v1_0_start(struct amdgpu_device *adev);
  35. static int vcn_v1_0_stop(struct amdgpu_device *adev);
  36. static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
  37. static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  38. static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
  39. /**
  40. * vcn_v1_0_early_init - set function pointers
  41. *
  42. * @handle: amdgpu_device pointer
  43. *
  44. * Set ring and irq function pointers
  45. */
  46. static int vcn_v1_0_early_init(void *handle)
  47. {
  48. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  49. adev->vcn.num_enc_rings = 2;
  50. vcn_v1_0_set_dec_ring_funcs(adev);
  51. vcn_v1_0_set_enc_ring_funcs(adev);
  52. vcn_v1_0_set_irq_funcs(adev);
  53. return 0;
  54. }
  55. /**
  56. * vcn_v1_0_sw_init - sw init for VCN block
  57. *
  58. * @handle: amdgpu_device pointer
  59. *
  60. * Load firmware and sw initialization
  61. */
  62. static int vcn_v1_0_sw_init(void *handle)
  63. {
  64. struct amdgpu_ring *ring;
  65. int i, r;
  66. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  67. /* VCN DEC TRAP */
  68. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
  69. if (r)
  70. return r;
  71. /* VCN ENC TRAP */
  72. for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
  73. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, i + 119,
  74. &adev->vcn.irq);
  75. if (r)
  76. return r;
  77. }
  78. r = amdgpu_vcn_sw_init(adev);
  79. if (r)
  80. return r;
  81. r = amdgpu_vcn_resume(adev);
  82. if (r)
  83. return r;
  84. ring = &adev->vcn.ring_dec;
  85. sprintf(ring->name, "vcn_dec");
  86. r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
  87. if (r)
  88. return r;
  89. for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
  90. ring = &adev->vcn.ring_enc[i];
  91. sprintf(ring->name, "vcn_enc%d", i);
  92. r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
  93. if (r)
  94. return r;
  95. }
  96. return r;
  97. }
  98. /**
  99. * vcn_v1_0_sw_fini - sw fini for VCN block
  100. *
  101. * @handle: amdgpu_device pointer
  102. *
  103. * VCN suspend and free up sw allocation
  104. */
  105. static int vcn_v1_0_sw_fini(void *handle)
  106. {
  107. int r;
  108. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  109. r = amdgpu_vcn_suspend(adev);
  110. if (r)
  111. return r;
  112. r = amdgpu_vcn_sw_fini(adev);
  113. return r;
  114. }
  115. /**
  116. * vcn_v1_0_hw_init - start and test VCN block
  117. *
  118. * @handle: amdgpu_device pointer
  119. *
  120. * Initialize the hardware, boot up the VCPU and do some testing
  121. */
  122. static int vcn_v1_0_hw_init(void *handle)
  123. {
  124. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  125. struct amdgpu_ring *ring = &adev->vcn.ring_dec;
  126. int i, r;
  127. r = vcn_v1_0_start(adev);
  128. if (r)
  129. goto done;
  130. ring->ready = true;
  131. r = amdgpu_ring_test_ring(ring);
  132. if (r) {
  133. ring->ready = false;
  134. goto done;
  135. }
  136. for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
  137. ring = &adev->vcn.ring_enc[i];
  138. ring->ready = true;
  139. r = amdgpu_ring_test_ring(ring);
  140. if (r) {
  141. ring->ready = false;
  142. goto done;
  143. }
  144. }
  145. done:
  146. if (!r)
  147. DRM_INFO("VCN decode and encode initialized successfully.\n");
  148. return r;
  149. }
  150. /**
  151. * vcn_v1_0_hw_fini - stop the hardware block
  152. *
  153. * @handle: amdgpu_device pointer
  154. *
  155. * Stop the VCN block, mark ring as not ready any more
  156. */
  157. static int vcn_v1_0_hw_fini(void *handle)
  158. {
  159. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  160. struct amdgpu_ring *ring = &adev->vcn.ring_dec;
  161. int r;
  162. r = vcn_v1_0_stop(adev);
  163. if (r)
  164. return r;
  165. ring->ready = false;
  166. return 0;
  167. }
  168. /**
  169. * vcn_v1_0_suspend - suspend VCN block
  170. *
  171. * @handle: amdgpu_device pointer
  172. *
  173. * HW fini and suspend VCN block
  174. */
  175. static int vcn_v1_0_suspend(void *handle)
  176. {
  177. int r;
  178. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  179. r = vcn_v1_0_hw_fini(adev);
  180. if (r)
  181. return r;
  182. r = amdgpu_vcn_suspend(adev);
  183. return r;
  184. }
  185. /**
  186. * vcn_v1_0_resume - resume VCN block
  187. *
  188. * @handle: amdgpu_device pointer
  189. *
  190. * Resume firmware and hw init VCN block
  191. */
  192. static int vcn_v1_0_resume(void *handle)
  193. {
  194. int r;
  195. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  196. r = amdgpu_vcn_resume(adev);
  197. if (r)
  198. return r;
  199. r = vcn_v1_0_hw_init(adev);
  200. return r;
  201. }
  202. /**
  203. * vcn_v1_0_mc_resume - memory controller programming
  204. *
  205. * @adev: amdgpu_device pointer
  206. *
  207. * Let the VCN memory controller know it's offsets
  208. */
  209. static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
  210. {
  211. uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
  212. WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
  213. lower_32_bits(adev->vcn.gpu_addr));
  214. WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
  215. upper_32_bits(adev->vcn.gpu_addr));
  216. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
  217. AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
  218. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
  219. WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
  220. lower_32_bits(adev->vcn.gpu_addr + size));
  221. WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
  222. upper_32_bits(adev->vcn.gpu_addr + size));
  223. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
  224. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
  225. WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
  226. lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
  227. WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
  228. upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
  229. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
  230. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
  231. AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
  232. WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
  233. adev->gfx.config.gb_addr_config);
  234. WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
  235. adev->gfx.config.gb_addr_config);
  236. WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
  237. adev->gfx.config.gb_addr_config);
  238. }
  239. /**
  240. * vcn_v1_0_disable_clock_gating - disable VCN clock gating
  241. *
  242. * @adev: amdgpu_device pointer
  243. * @sw: enable SW clock gating
  244. *
  245. * Disable clock gating for VCN block
  246. */
  247. static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw)
  248. {
  249. uint32_t data;
  250. /* JPEG disable CGC */
  251. data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
  252. if (sw)
  253. data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
  254. else
  255. data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
  256. data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
  257. data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
  258. WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
  259. data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
  260. data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
  261. WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
  262. /* UVD disable CGC */
  263. data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
  264. if (sw)
  265. data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
  266. else
  267. data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
  268. data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
  269. data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
  270. WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
  271. data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
  272. data &= ~(UVD_CGC_GATE__SYS_MASK
  273. | UVD_CGC_GATE__UDEC_MASK
  274. | UVD_CGC_GATE__MPEG2_MASK
  275. | UVD_CGC_GATE__REGS_MASK
  276. | UVD_CGC_GATE__RBC_MASK
  277. | UVD_CGC_GATE__LMI_MC_MASK
  278. | UVD_CGC_GATE__LMI_UMC_MASK
  279. | UVD_CGC_GATE__IDCT_MASK
  280. | UVD_CGC_GATE__MPRD_MASK
  281. | UVD_CGC_GATE__MPC_MASK
  282. | UVD_CGC_GATE__LBSI_MASK
  283. | UVD_CGC_GATE__LRBBM_MASK
  284. | UVD_CGC_GATE__UDEC_RE_MASK
  285. | UVD_CGC_GATE__UDEC_CM_MASK
  286. | UVD_CGC_GATE__UDEC_IT_MASK
  287. | UVD_CGC_GATE__UDEC_DB_MASK
  288. | UVD_CGC_GATE__UDEC_MP_MASK
  289. | UVD_CGC_GATE__WCB_MASK
  290. | UVD_CGC_GATE__VCPU_MASK
  291. | UVD_CGC_GATE__SCPU_MASK);
  292. WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
  293. data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
  294. data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
  295. | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
  296. | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
  297. | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
  298. | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
  299. | UVD_CGC_CTRL__SYS_MODE_MASK
  300. | UVD_CGC_CTRL__UDEC_MODE_MASK
  301. | UVD_CGC_CTRL__MPEG2_MODE_MASK
  302. | UVD_CGC_CTRL__REGS_MODE_MASK
  303. | UVD_CGC_CTRL__RBC_MODE_MASK
  304. | UVD_CGC_CTRL__LMI_MC_MODE_MASK
  305. | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
  306. | UVD_CGC_CTRL__IDCT_MODE_MASK
  307. | UVD_CGC_CTRL__MPRD_MODE_MASK
  308. | UVD_CGC_CTRL__MPC_MODE_MASK
  309. | UVD_CGC_CTRL__LBSI_MODE_MASK
  310. | UVD_CGC_CTRL__LRBBM_MODE_MASK
  311. | UVD_CGC_CTRL__WCB_MODE_MASK
  312. | UVD_CGC_CTRL__VCPU_MODE_MASK
  313. | UVD_CGC_CTRL__SCPU_MODE_MASK);
  314. WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
  315. /* turn on */
  316. data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
  317. data |= (UVD_SUVD_CGC_GATE__SRE_MASK
  318. | UVD_SUVD_CGC_GATE__SIT_MASK
  319. | UVD_SUVD_CGC_GATE__SMP_MASK
  320. | UVD_SUVD_CGC_GATE__SCM_MASK
  321. | UVD_SUVD_CGC_GATE__SDB_MASK
  322. | UVD_SUVD_CGC_GATE__SRE_H264_MASK
  323. | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
  324. | UVD_SUVD_CGC_GATE__SIT_H264_MASK
  325. | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
  326. | UVD_SUVD_CGC_GATE__SCM_H264_MASK
  327. | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
  328. | UVD_SUVD_CGC_GATE__SDB_H264_MASK
  329. | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
  330. | UVD_SUVD_CGC_GATE__SCLR_MASK
  331. | UVD_SUVD_CGC_GATE__UVD_SC_MASK
  332. | UVD_SUVD_CGC_GATE__ENT_MASK
  333. | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
  334. | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
  335. | UVD_SUVD_CGC_GATE__SITE_MASK
  336. | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
  337. | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
  338. | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
  339. | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
  340. | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
  341. WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
  342. data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
  343. data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
  344. | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
  345. | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
  346. | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
  347. | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
  348. | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
  349. | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
  350. | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
  351. | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
  352. | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
  353. WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
  354. }
  355. /**
  356. * vcn_v1_0_enable_clock_gating - enable VCN clock gating
  357. *
  358. * @adev: amdgpu_device pointer
  359. * @sw: enable SW clock gating
  360. *
  361. * Enable clock gating for VCN block
  362. */
  363. static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw)
  364. {
  365. uint32_t data = 0;
  366. /* enable JPEG CGC */
  367. data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
  368. if (sw)
  369. data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
  370. else
  371. data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
  372. data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
  373. data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
  374. WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
  375. data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
  376. data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
  377. WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
  378. /* enable UVD CGC */
  379. data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
  380. if (sw)
  381. data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
  382. else
  383. data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
  384. data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
  385. data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
  386. WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
  387. data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
  388. data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
  389. | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
  390. | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
  391. | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
  392. | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
  393. | UVD_CGC_CTRL__SYS_MODE_MASK
  394. | UVD_CGC_CTRL__UDEC_MODE_MASK
  395. | UVD_CGC_CTRL__MPEG2_MODE_MASK
  396. | UVD_CGC_CTRL__REGS_MODE_MASK
  397. | UVD_CGC_CTRL__RBC_MODE_MASK
  398. | UVD_CGC_CTRL__LMI_MC_MODE_MASK
  399. | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
  400. | UVD_CGC_CTRL__IDCT_MODE_MASK
  401. | UVD_CGC_CTRL__MPRD_MODE_MASK
  402. | UVD_CGC_CTRL__MPC_MODE_MASK
  403. | UVD_CGC_CTRL__LBSI_MODE_MASK
  404. | UVD_CGC_CTRL__LRBBM_MODE_MASK
  405. | UVD_CGC_CTRL__WCB_MODE_MASK
  406. | UVD_CGC_CTRL__VCPU_MODE_MASK
  407. | UVD_CGC_CTRL__SCPU_MODE_MASK);
  408. WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
  409. data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
  410. data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
  411. | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
  412. | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
  413. | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
  414. | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
  415. | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
  416. | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
  417. | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
  418. | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
  419. | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
  420. WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
  421. }
  422. /**
  423. * vcn_v1_0_start - start VCN block
  424. *
  425. * @adev: amdgpu_device pointer
  426. *
  427. * Setup and start the VCN block
  428. */
  429. static int vcn_v1_0_start(struct amdgpu_device *adev)
  430. {
  431. struct amdgpu_ring *ring = &adev->vcn.ring_dec;
  432. uint32_t rb_bufsz, tmp;
  433. uint32_t lmi_swap_cntl;
  434. int i, j, r;
  435. /* disable byte swapping */
  436. lmi_swap_cntl = 0;
  437. vcn_v1_0_mc_resume(adev);
  438. /* disable clock gating */
  439. vcn_v1_0_disable_clock_gating(adev, true);
  440. /* disable interupt */
  441. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
  442. ~UVD_MASTINT_EN__VCPU_EN_MASK);
  443. /* stall UMC and register bus before resetting VCPU */
  444. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
  445. UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
  446. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  447. mdelay(1);
  448. /* put LMI, VCPU, RBC etc... into reset */
  449. WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
  450. UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
  451. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
  452. UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
  453. UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
  454. UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
  455. UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
  456. UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
  457. UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
  458. mdelay(5);
  459. /* initialize VCN memory controller */
  460. WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
  461. (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
  462. UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
  463. UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
  464. UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
  465. UVD_LMI_CTRL__REQ_MODE_MASK |
  466. 0x00100000L);
  467. #ifdef __BIG_ENDIAN
  468. /* swap (8 in 32) RB and IB */
  469. lmi_swap_cntl = 0xa;
  470. #endif
  471. WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
  472. WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
  473. WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
  474. WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
  475. WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
  476. WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
  477. WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
  478. /* take all subblocks out of reset, except VCPU */
  479. WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
  480. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  481. mdelay(5);
  482. /* enable VCPU clock */
  483. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
  484. UVD_VCPU_CNTL__CLK_EN_MASK);
  485. /* enable UMC */
  486. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
  487. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  488. /* boot up the VCPU */
  489. WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
  490. mdelay(10);
  491. for (i = 0; i < 10; ++i) {
  492. uint32_t status;
  493. for (j = 0; j < 100; ++j) {
  494. status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
  495. if (status & 2)
  496. break;
  497. mdelay(10);
  498. }
  499. r = 0;
  500. if (status & 2)
  501. break;
  502. DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
  503. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
  504. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
  505. ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  506. mdelay(10);
  507. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
  508. ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  509. mdelay(10);
  510. r = -1;
  511. }
  512. if (r) {
  513. DRM_ERROR("VCN decode not responding, giving up!!!\n");
  514. return r;
  515. }
  516. /* enable master interrupt */
  517. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
  518. (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
  519. ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
  520. /* clear the bit 4 of VCN_STATUS */
  521. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
  522. ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
  523. /* force RBC into idle state */
  524. rb_bufsz = order_base_2(ring->ring_size);
  525. tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
  526. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
  527. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
  528. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
  529. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
  530. tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
  531. WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
  532. /* set the write pointer delay */
  533. WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
  534. /* set the wb address */
  535. WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
  536. (upper_32_bits(ring->gpu_addr) >> 2));
  537. /* programm the RB_BASE for ring buffer */
  538. WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
  539. lower_32_bits(ring->gpu_addr));
  540. WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
  541. upper_32_bits(ring->gpu_addr));
  542. /* Initialize the ring buffer's read and write pointers */
  543. WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
  544. ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
  545. WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
  546. lower_32_bits(ring->wptr));
  547. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
  548. ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
  549. ring = &adev->vcn.ring_enc[0];
  550. WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
  551. WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
  552. WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
  553. WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
  554. WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
  555. ring = &adev->vcn.ring_enc[1];
  556. WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
  557. WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
  558. WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
  559. WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
  560. WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
  561. return 0;
  562. }
  563. /**
  564. * vcn_v1_0_stop - stop VCN block
  565. *
  566. * @adev: amdgpu_device pointer
  567. *
  568. * stop the VCN block
  569. */
  570. static int vcn_v1_0_stop(struct amdgpu_device *adev)
  571. {
  572. /* force RBC into idle state */
  573. WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
  574. /* Stall UMC and register bus before resetting VCPU */
  575. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
  576. UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
  577. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  578. mdelay(1);
  579. /* put VCPU into reset */
  580. WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
  581. UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
  582. mdelay(5);
  583. /* disable VCPU clock */
  584. WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
  585. /* Unstall UMC and register bus */
  586. WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
  587. ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  588. /* enable clock gating */
  589. vcn_v1_0_enable_clock_gating(adev, true);
  590. return 0;
  591. }
  592. static int vcn_v1_0_set_clockgating_state(void *handle,
  593. enum amd_clockgating_state state)
  594. {
  595. /* needed for driver unload*/
  596. return 0;
  597. }
  598. /**
  599. * vcn_v1_0_dec_ring_get_rptr - get read pointer
  600. *
  601. * @ring: amdgpu_ring pointer
  602. *
  603. * Returns the current hardware read pointer
  604. */
  605. static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
  606. {
  607. struct amdgpu_device *adev = ring->adev;
  608. return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
  609. }
  610. /**
  611. * vcn_v1_0_dec_ring_get_wptr - get write pointer
  612. *
  613. * @ring: amdgpu_ring pointer
  614. *
  615. * Returns the current hardware write pointer
  616. */
  617. static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
  618. {
  619. struct amdgpu_device *adev = ring->adev;
  620. return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
  621. }
  622. /**
  623. * vcn_v1_0_dec_ring_set_wptr - set write pointer
  624. *
  625. * @ring: amdgpu_ring pointer
  626. *
  627. * Commits the write pointer to the hardware
  628. */
  629. static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
  630. {
  631. struct amdgpu_device *adev = ring->adev;
  632. WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
  633. }
  634. /**
  635. * vcn_v1_0_dec_ring_insert_start - insert a start command
  636. *
  637. * @ring: amdgpu_ring pointer
  638. *
  639. * Write a start command to the ring.
  640. */
  641. static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
  642. {
  643. struct amdgpu_device *adev = ring->adev;
  644. amdgpu_ring_write(ring,
  645. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
  646. amdgpu_ring_write(ring, 0);
  647. amdgpu_ring_write(ring,
  648. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
  649. amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
  650. }
  651. /**
  652. * vcn_v1_0_dec_ring_insert_end - insert a end command
  653. *
  654. * @ring: amdgpu_ring pointer
  655. *
  656. * Write a end command to the ring.
  657. */
  658. static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
  659. {
  660. struct amdgpu_device *adev = ring->adev;
  661. amdgpu_ring_write(ring,
  662. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
  663. amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
  664. }
  665. /**
  666. * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
  667. *
  668. * @ring: amdgpu_ring pointer
  669. * @fence: fence to emit
  670. *
  671. * Write a fence and a trap command to the ring.
  672. */
  673. static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
  674. unsigned flags)
  675. {
  676. struct amdgpu_device *adev = ring->adev;
  677. WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
  678. amdgpu_ring_write(ring,
  679. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
  680. amdgpu_ring_write(ring, seq);
  681. amdgpu_ring_write(ring,
  682. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
  683. amdgpu_ring_write(ring, addr & 0xffffffff);
  684. amdgpu_ring_write(ring,
  685. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
  686. amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
  687. amdgpu_ring_write(ring,
  688. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
  689. amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
  690. amdgpu_ring_write(ring,
  691. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
  692. amdgpu_ring_write(ring, 0);
  693. amdgpu_ring_write(ring,
  694. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
  695. amdgpu_ring_write(ring, 0);
  696. amdgpu_ring_write(ring,
  697. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
  698. amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
  699. }
  700. /**
  701. * vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
  702. *
  703. * @ring: amdgpu_ring pointer
  704. *
  705. * Emits an hdp invalidate.
  706. */
  707. static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
  708. {
  709. struct amdgpu_device *adev = ring->adev;
  710. amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
  711. amdgpu_ring_write(ring, 1);
  712. }
  713. /**
  714. * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
  715. *
  716. * @ring: amdgpu_ring pointer
  717. * @ib: indirect buffer to execute
  718. *
  719. * Write ring commands to execute the indirect buffer
  720. */
  721. static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
  722. struct amdgpu_ib *ib,
  723. unsigned vm_id, bool ctx_switch)
  724. {
  725. struct amdgpu_device *adev = ring->adev;
  726. amdgpu_ring_write(ring,
  727. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
  728. amdgpu_ring_write(ring, vm_id);
  729. amdgpu_ring_write(ring,
  730. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
  731. amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
  732. amdgpu_ring_write(ring,
  733. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
  734. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  735. amdgpu_ring_write(ring,
  736. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
  737. amdgpu_ring_write(ring, ib->length_dw);
  738. }
  739. static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
  740. uint32_t data0, uint32_t data1)
  741. {
  742. struct amdgpu_device *adev = ring->adev;
  743. amdgpu_ring_write(ring,
  744. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
  745. amdgpu_ring_write(ring, data0);
  746. amdgpu_ring_write(ring,
  747. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
  748. amdgpu_ring_write(ring, data1);
  749. amdgpu_ring_write(ring,
  750. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
  751. amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
  752. }
  753. static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
  754. uint32_t data0, uint32_t data1, uint32_t mask)
  755. {
  756. struct amdgpu_device *adev = ring->adev;
  757. amdgpu_ring_write(ring,
  758. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
  759. amdgpu_ring_write(ring, data0);
  760. amdgpu_ring_write(ring,
  761. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
  762. amdgpu_ring_write(ring, data1);
  763. amdgpu_ring_write(ring,
  764. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
  765. amdgpu_ring_write(ring, mask);
  766. amdgpu_ring_write(ring,
  767. PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
  768. amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
  769. }
  770. static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
  771. unsigned vm_id, uint64_t pd_addr)
  772. {
  773. struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
  774. uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
  775. uint32_t data0, data1, mask;
  776. unsigned eng = ring->vm_inv_eng;
  777. pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
  778. pd_addr |= AMDGPU_PTE_VALID;
  779. data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
  780. data1 = upper_32_bits(pd_addr);
  781. vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
  782. data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
  783. data1 = lower_32_bits(pd_addr);
  784. vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
  785. data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
  786. data1 = lower_32_bits(pd_addr);
  787. mask = 0xffffffff;
  788. vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
  789. /* flush TLB */
  790. data0 = (hub->vm_inv_eng0_req + eng) << 2;
  791. data1 = req;
  792. vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
  793. /* wait for flush */
  794. data0 = (hub->vm_inv_eng0_ack + eng) << 2;
  795. data1 = 1 << vm_id;
  796. mask = 1 << vm_id;
  797. vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
  798. }
  799. /**
  800. * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
  801. *
  802. * @ring: amdgpu_ring pointer
  803. *
  804. * Returns the current hardware enc read pointer
  805. */
  806. static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  807. {
  808. struct amdgpu_device *adev = ring->adev;
  809. if (ring == &adev->vcn.ring_enc[0])
  810. return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
  811. else
  812. return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
  813. }
  814. /**
  815. * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
  816. *
  817. * @ring: amdgpu_ring pointer
  818. *
  819. * Returns the current hardware enc write pointer
  820. */
  821. static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
  822. {
  823. struct amdgpu_device *adev = ring->adev;
  824. if (ring == &adev->vcn.ring_enc[0])
  825. return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
  826. else
  827. return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
  828. }
  829. /**
  830. * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
  831. *
  832. * @ring: amdgpu_ring pointer
  833. *
  834. * Commits the enc write pointer to the hardware
  835. */
  836. static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
  837. {
  838. struct amdgpu_device *adev = ring->adev;
  839. if (ring == &adev->vcn.ring_enc[0])
  840. WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
  841. lower_32_bits(ring->wptr));
  842. else
  843. WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
  844. lower_32_bits(ring->wptr));
  845. }
  846. /**
  847. * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
  848. *
  849. * @ring: amdgpu_ring pointer
  850. * @fence: fence to emit
  851. *
  852. * Write enc a fence and a trap command to the ring.
  853. */
  854. static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
  855. u64 seq, unsigned flags)
  856. {
  857. WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
  858. amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
  859. amdgpu_ring_write(ring, addr);
  860. amdgpu_ring_write(ring, upper_32_bits(addr));
  861. amdgpu_ring_write(ring, seq);
  862. amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
  863. }
  864. static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
  865. {
  866. amdgpu_ring_write(ring, VCN_ENC_CMD_END);
  867. }
  868. /**
  869. * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
  870. *
  871. * @ring: amdgpu_ring pointer
  872. * @ib: indirect buffer to execute
  873. *
  874. * Write enc ring commands to execute the indirect buffer
  875. */
  876. static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
  877. struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
  878. {
  879. amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
  880. amdgpu_ring_write(ring, vm_id);
  881. amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
  882. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  883. amdgpu_ring_write(ring, ib->length_dw);
  884. }
  885. static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
  886. unsigned int vm_id, uint64_t pd_addr)
  887. {
  888. struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
  889. uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
  890. unsigned eng = ring->vm_inv_eng;
  891. pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
  892. pd_addr |= AMDGPU_PTE_VALID;
  893. amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
  894. amdgpu_ring_write(ring,
  895. (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
  896. amdgpu_ring_write(ring, upper_32_bits(pd_addr));
  897. amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
  898. amdgpu_ring_write(ring,
  899. (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
  900. amdgpu_ring_write(ring, lower_32_bits(pd_addr));
  901. amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
  902. amdgpu_ring_write(ring,
  903. (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
  904. amdgpu_ring_write(ring, 0xffffffff);
  905. amdgpu_ring_write(ring, lower_32_bits(pd_addr));
  906. /* flush TLB */
  907. amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
  908. amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
  909. amdgpu_ring_write(ring, req);
  910. /* wait for flush */
  911. amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
  912. amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
  913. amdgpu_ring_write(ring, 1 << vm_id);
  914. amdgpu_ring_write(ring, 1 << vm_id);
  915. }
  916. static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
  917. struct amdgpu_irq_src *source,
  918. unsigned type,
  919. enum amdgpu_interrupt_state state)
  920. {
  921. return 0;
  922. }
  923. static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
  924. struct amdgpu_irq_src *source,
  925. struct amdgpu_iv_entry *entry)
  926. {
  927. DRM_DEBUG("IH: VCN TRAP\n");
  928. switch (entry->src_id) {
  929. case 124:
  930. amdgpu_fence_process(&adev->vcn.ring_dec);
  931. break;
  932. case 119:
  933. amdgpu_fence_process(&adev->vcn.ring_enc[0]);
  934. break;
  935. case 120:
  936. amdgpu_fence_process(&adev->vcn.ring_enc[1]);
  937. break;
  938. default:
  939. DRM_ERROR("Unhandled interrupt: %d %d\n",
  940. entry->src_id, entry->src_data[0]);
  941. break;
  942. }
  943. return 0;
  944. }
  945. static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  946. {
  947. int i;
  948. struct amdgpu_device *adev = ring->adev;
  949. for (i = 0; i < count; i++)
  950. amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
  951. }
  952. static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
  953. .name = "vcn_v1_0",
  954. .early_init = vcn_v1_0_early_init,
  955. .late_init = NULL,
  956. .sw_init = vcn_v1_0_sw_init,
  957. .sw_fini = vcn_v1_0_sw_fini,
  958. .hw_init = vcn_v1_0_hw_init,
  959. .hw_fini = vcn_v1_0_hw_fini,
  960. .suspend = vcn_v1_0_suspend,
  961. .resume = vcn_v1_0_resume,
  962. .is_idle = NULL /* vcn_v1_0_is_idle */,
  963. .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
  964. .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
  965. .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
  966. .soft_reset = NULL /* vcn_v1_0_soft_reset */,
  967. .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
  968. .set_clockgating_state = vcn_v1_0_set_clockgating_state,
  969. .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
  970. };
  971. static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
  972. .type = AMDGPU_RING_TYPE_VCN_DEC,
  973. .align_mask = 0xf,
  974. .nop = PACKET0(0x81ff, 0),
  975. .support_64bit_ptrs = false,
  976. .vmhub = AMDGPU_MMHUB,
  977. .get_rptr = vcn_v1_0_dec_ring_get_rptr,
  978. .get_wptr = vcn_v1_0_dec_ring_get_wptr,
  979. .set_wptr = vcn_v1_0_dec_ring_set_wptr,
  980. .emit_frame_size =
  981. 2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */
  982. 34 + /* vcn_v1_0_dec_ring_emit_vm_flush */
  983. 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
  984. 6,
  985. .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
  986. .emit_ib = vcn_v1_0_dec_ring_emit_ib,
  987. .emit_fence = vcn_v1_0_dec_ring_emit_fence,
  988. .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
  989. .emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
  990. .test_ring = amdgpu_vcn_dec_ring_test_ring,
  991. .test_ib = amdgpu_vcn_dec_ring_test_ib,
  992. .insert_nop = vcn_v1_0_ring_insert_nop,
  993. .insert_start = vcn_v1_0_dec_ring_insert_start,
  994. .insert_end = vcn_v1_0_dec_ring_insert_end,
  995. .pad_ib = amdgpu_ring_generic_pad_ib,
  996. .begin_use = amdgpu_vcn_ring_begin_use,
  997. .end_use = amdgpu_vcn_ring_end_use,
  998. };
  999. static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
  1000. .type = AMDGPU_RING_TYPE_VCN_ENC,
  1001. .align_mask = 0x3f,
  1002. .nop = VCN_ENC_CMD_NO_OP,
  1003. .support_64bit_ptrs = false,
  1004. .vmhub = AMDGPU_MMHUB,
  1005. .get_rptr = vcn_v1_0_enc_ring_get_rptr,
  1006. .get_wptr = vcn_v1_0_enc_ring_get_wptr,
  1007. .set_wptr = vcn_v1_0_enc_ring_set_wptr,
  1008. .emit_frame_size =
  1009. 17 + /* vcn_v1_0_enc_ring_emit_vm_flush */
  1010. 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
  1011. 1, /* vcn_v1_0_enc_ring_insert_end */
  1012. .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
  1013. .emit_ib = vcn_v1_0_enc_ring_emit_ib,
  1014. .emit_fence = vcn_v1_0_enc_ring_emit_fence,
  1015. .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
  1016. .test_ring = amdgpu_vcn_enc_ring_test_ring,
  1017. .test_ib = amdgpu_vcn_enc_ring_test_ib,
  1018. .insert_nop = amdgpu_ring_insert_nop,
  1019. .insert_end = vcn_v1_0_enc_ring_insert_end,
  1020. .pad_ib = amdgpu_ring_generic_pad_ib,
  1021. .begin_use = amdgpu_vcn_ring_begin_use,
  1022. .end_use = amdgpu_vcn_ring_end_use,
  1023. };
  1024. static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
  1025. {
  1026. adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
  1027. DRM_INFO("VCN decode is enabled in VM mode\n");
  1028. }
  1029. static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
  1030. {
  1031. int i;
  1032. for (i = 0; i < adev->vcn.num_enc_rings; ++i)
  1033. adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
  1034. DRM_INFO("VCN encode is enabled in VM mode\n");
  1035. }
  1036. static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
  1037. .set = vcn_v1_0_set_interrupt_state,
  1038. .process = vcn_v1_0_process_interrupt,
  1039. };
  1040. static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
  1041. {
  1042. adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
  1043. adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
  1044. }
  1045. const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
  1046. {
  1047. .type = AMD_IP_BLOCK_TYPE_VCN,
  1048. .major = 1,
  1049. .minor = 0,
  1050. .rev = 0,
  1051. .funcs = &vcn_v1_0_ip_funcs,
  1052. };