vce_v3_0.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. * Authors: Christian König <christian.koenig@amd.com>
  26. */
  27. #include <linux/firmware.h>
  28. #include <drm/drmP.h>
  29. #include "amdgpu.h"
  30. #include "amdgpu_vce.h"
  31. #include "vid.h"
  32. #include "vce/vce_3_0_d.h"
  33. #include "vce/vce_3_0_sh_mask.h"
  34. #include "oss/oss_3_0_d.h"
  35. #include "oss/oss_3_0_sh_mask.h"
  36. #include "gca/gfx_8_0_d.h"
  37. #include "smu/smu_7_1_2_d.h"
  38. #include "smu/smu_7_1_2_sh_mask.h"
  39. #include "gca/gfx_8_0_d.h"
  40. #include "gca/gfx_8_0_sh_mask.h"
  41. #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
  42. #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
  43. #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
  44. #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
  45. #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
  46. #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
  47. #define VCE_V3_0_FW_SIZE (384 * 1024)
  48. #define VCE_V3_0_STACK_SIZE (64 * 1024)
  49. #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
  50. static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
  51. static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
  52. static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
  53. static int vce_v3_0_wait_for_idle(void *handle);
  54. /**
  55. * vce_v3_0_ring_get_rptr - get read pointer
  56. *
  57. * @ring: amdgpu_ring pointer
  58. *
  59. * Returns the current hardware read pointer
  60. */
  61. static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
  62. {
  63. struct amdgpu_device *adev = ring->adev;
  64. if (ring == &adev->vce.ring[0])
  65. return RREG32(mmVCE_RB_RPTR);
  66. else if (ring == &adev->vce.ring[1])
  67. return RREG32(mmVCE_RB_RPTR2);
  68. else
  69. return RREG32(mmVCE_RB_RPTR3);
  70. }
  71. /**
  72. * vce_v3_0_ring_get_wptr - get write pointer
  73. *
  74. * @ring: amdgpu_ring pointer
  75. *
  76. * Returns the current hardware write pointer
  77. */
  78. static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
  79. {
  80. struct amdgpu_device *adev = ring->adev;
  81. if (ring == &adev->vce.ring[0])
  82. return RREG32(mmVCE_RB_WPTR);
  83. else if (ring == &adev->vce.ring[1])
  84. return RREG32(mmVCE_RB_WPTR2);
  85. else
  86. return RREG32(mmVCE_RB_WPTR3);
  87. }
  88. /**
  89. * vce_v3_0_ring_set_wptr - set write pointer
  90. *
  91. * @ring: amdgpu_ring pointer
  92. *
  93. * Commits the write pointer to the hardware
  94. */
  95. static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
  96. {
  97. struct amdgpu_device *adev = ring->adev;
  98. if (ring == &adev->vce.ring[0])
  99. WREG32(mmVCE_RB_WPTR, ring->wptr);
  100. else if (ring == &adev->vce.ring[1])
  101. WREG32(mmVCE_RB_WPTR2, ring->wptr);
  102. else
  103. WREG32(mmVCE_RB_WPTR3, ring->wptr);
  104. }
  105. static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
  106. {
  107. WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
  108. }
  109. static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
  110. bool gated)
  111. {
  112. u32 data;
  113. /* Set Override to disable Clock Gating */
  114. vce_v3_0_override_vce_clock_gating(adev, true);
  115. /* This function enables MGCG which is controlled by firmware.
  116. With the clocks in the gated state the core is still
  117. accessible but the firmware will throttle the clocks on the
  118. fly as necessary.
  119. */
  120. if (gated) {
  121. data = RREG32(mmVCE_CLOCK_GATING_B);
  122. data |= 0x1ff;
  123. data &= ~0xef0000;
  124. WREG32(mmVCE_CLOCK_GATING_B, data);
  125. data = RREG32(mmVCE_UENC_CLOCK_GATING);
  126. data |= 0x3ff000;
  127. data &= ~0xffc00000;
  128. WREG32(mmVCE_UENC_CLOCK_GATING, data);
  129. data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
  130. data |= 0x2;
  131. data &= ~0x00010000;
  132. WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
  133. data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
  134. data |= 0x37f;
  135. WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
  136. data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
  137. data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
  138. VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
  139. VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
  140. 0x8;
  141. WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
  142. } else {
  143. data = RREG32(mmVCE_CLOCK_GATING_B);
  144. data &= ~0x80010;
  145. data |= 0xe70008;
  146. WREG32(mmVCE_CLOCK_GATING_B, data);
  147. data = RREG32(mmVCE_UENC_CLOCK_GATING);
  148. data |= 0xffc00000;
  149. WREG32(mmVCE_UENC_CLOCK_GATING, data);
  150. data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
  151. data |= 0x10000;
  152. WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
  153. data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
  154. data &= ~0xffc00000;
  155. WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
  156. data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
  157. data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
  158. VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
  159. VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
  160. 0x8);
  161. WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
  162. }
  163. vce_v3_0_override_vce_clock_gating(adev, false);
  164. }
  165. static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
  166. {
  167. int i, j;
  168. for (i = 0; i < 10; ++i) {
  169. for (j = 0; j < 100; ++j) {
  170. uint32_t status = RREG32(mmVCE_STATUS);
  171. if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
  172. return 0;
  173. mdelay(10);
  174. }
  175. DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
  176. WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
  177. mdelay(10);
  178. WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
  179. mdelay(10);
  180. }
  181. return -ETIMEDOUT;
  182. }
  183. /**
  184. * vce_v3_0_start - start VCE block
  185. *
  186. * @adev: amdgpu_device pointer
  187. *
  188. * Setup and start the VCE block
  189. */
  190. static int vce_v3_0_start(struct amdgpu_device *adev)
  191. {
  192. struct amdgpu_ring *ring;
  193. int idx, r;
  194. ring = &adev->vce.ring[0];
  195. WREG32(mmVCE_RB_RPTR, ring->wptr);
  196. WREG32(mmVCE_RB_WPTR, ring->wptr);
  197. WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
  198. WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
  199. WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
  200. ring = &adev->vce.ring[1];
  201. WREG32(mmVCE_RB_RPTR2, ring->wptr);
  202. WREG32(mmVCE_RB_WPTR2, ring->wptr);
  203. WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
  204. WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
  205. WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
  206. ring = &adev->vce.ring[2];
  207. WREG32(mmVCE_RB_RPTR3, ring->wptr);
  208. WREG32(mmVCE_RB_WPTR3, ring->wptr);
  209. WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
  210. WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
  211. WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
  212. mutex_lock(&adev->grbm_idx_mutex);
  213. for (idx = 0; idx < 2; ++idx) {
  214. if (adev->vce.harvest_config & (1 << idx))
  215. continue;
  216. WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
  217. vce_v3_0_mc_resume(adev, idx);
  218. WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
  219. if (adev->asic_type >= CHIP_STONEY)
  220. WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
  221. else
  222. WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
  223. WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
  224. mdelay(100);
  225. r = vce_v3_0_firmware_loaded(adev);
  226. /* clear BUSY flag */
  227. WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
  228. if (r) {
  229. DRM_ERROR("VCE not responding, giving up!!!\n");
  230. mutex_unlock(&adev->grbm_idx_mutex);
  231. return r;
  232. }
  233. }
  234. WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
  235. mutex_unlock(&adev->grbm_idx_mutex);
  236. return 0;
  237. }
  238. static int vce_v3_0_stop(struct amdgpu_device *adev)
  239. {
  240. int idx;
  241. mutex_lock(&adev->grbm_idx_mutex);
  242. for (idx = 0; idx < 2; ++idx) {
  243. if (adev->vce.harvest_config & (1 << idx))
  244. continue;
  245. WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
  246. if (adev->asic_type >= CHIP_STONEY)
  247. WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
  248. else
  249. WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
  250. /* hold on ECPU */
  251. WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
  252. /* clear BUSY flag */
  253. WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
  254. /* Set Clock-Gating off */
  255. if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
  256. vce_v3_0_set_vce_sw_clock_gating(adev, false);
  257. }
  258. WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
  259. mutex_unlock(&adev->grbm_idx_mutex);
  260. return 0;
  261. }
  262. #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
  263. #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
  264. #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
  265. static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
  266. {
  267. u32 tmp;
  268. /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
  269. if ((adev->asic_type == CHIP_FIJI) ||
  270. (adev->asic_type == CHIP_STONEY) ||
  271. (adev->asic_type == CHIP_POLARIS10) ||
  272. (adev->asic_type == CHIP_POLARIS11))
  273. return AMDGPU_VCE_HARVEST_VCE1;
  274. /* Tonga and CZ are dual or single pipe */
  275. if (adev->flags & AMD_IS_APU)
  276. tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
  277. VCE_HARVEST_FUSE_MACRO__MASK) >>
  278. VCE_HARVEST_FUSE_MACRO__SHIFT;
  279. else
  280. tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
  281. CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
  282. CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
  283. switch (tmp) {
  284. case 1:
  285. return AMDGPU_VCE_HARVEST_VCE0;
  286. case 2:
  287. return AMDGPU_VCE_HARVEST_VCE1;
  288. case 3:
  289. return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
  290. default:
  291. return 0;
  292. }
  293. }
  294. static int vce_v3_0_early_init(void *handle)
  295. {
  296. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  297. adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
  298. if ((adev->vce.harvest_config &
  299. (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
  300. (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
  301. return -ENOENT;
  302. adev->vce.num_rings = 3;
  303. vce_v3_0_set_ring_funcs(adev);
  304. vce_v3_0_set_irq_funcs(adev);
  305. return 0;
  306. }
  307. static int vce_v3_0_sw_init(void *handle)
  308. {
  309. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  310. struct amdgpu_ring *ring;
  311. int r, i;
  312. /* VCE */
  313. r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
  314. if (r)
  315. return r;
  316. r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
  317. (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
  318. if (r)
  319. return r;
  320. r = amdgpu_vce_resume(adev);
  321. if (r)
  322. return r;
  323. for (i = 0; i < adev->vce.num_rings; i++) {
  324. ring = &adev->vce.ring[i];
  325. sprintf(ring->name, "vce%d", i);
  326. r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
  327. &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
  328. if (r)
  329. return r;
  330. }
  331. return r;
  332. }
  333. static int vce_v3_0_sw_fini(void *handle)
  334. {
  335. int r;
  336. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  337. r = amdgpu_vce_suspend(adev);
  338. if (r)
  339. return r;
  340. r = amdgpu_vce_sw_fini(adev);
  341. if (r)
  342. return r;
  343. return r;
  344. }
  345. static int vce_v3_0_hw_init(void *handle)
  346. {
  347. int r, i;
  348. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  349. r = vce_v3_0_start(adev);
  350. if (r)
  351. return r;
  352. for (i = 0; i < adev->vce.num_rings; i++)
  353. adev->vce.ring[i].ready = false;
  354. for (i = 0; i < adev->vce.num_rings; i++) {
  355. r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
  356. if (r)
  357. return r;
  358. else
  359. adev->vce.ring[i].ready = true;
  360. }
  361. DRM_INFO("VCE initialized successfully.\n");
  362. return 0;
  363. }
  364. static int vce_v3_0_hw_fini(void *handle)
  365. {
  366. int r;
  367. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  368. r = vce_v3_0_wait_for_idle(handle);
  369. if (r)
  370. return r;
  371. return vce_v3_0_stop(adev);
  372. }
  373. static int vce_v3_0_suspend(void *handle)
  374. {
  375. int r;
  376. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  377. r = vce_v3_0_hw_fini(adev);
  378. if (r)
  379. return r;
  380. r = amdgpu_vce_suspend(adev);
  381. if (r)
  382. return r;
  383. return r;
  384. }
  385. static int vce_v3_0_resume(void *handle)
  386. {
  387. int r;
  388. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  389. r = amdgpu_vce_resume(adev);
  390. if (r)
  391. return r;
  392. r = vce_v3_0_hw_init(adev);
  393. if (r)
  394. return r;
  395. return r;
  396. }
  397. static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
  398. {
  399. uint32_t offset, size;
  400. WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
  401. WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
  402. WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
  403. WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
  404. WREG32(mmVCE_LMI_CTRL, 0x00398000);
  405. WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
  406. WREG32(mmVCE_LMI_SWAP_CNTL, 0);
  407. WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
  408. WREG32(mmVCE_LMI_VM_CTRL, 0);
  409. if (adev->asic_type >= CHIP_STONEY) {
  410. WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
  411. WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
  412. WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
  413. } else
  414. WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
  415. offset = AMDGPU_VCE_FIRMWARE_OFFSET;
  416. size = VCE_V3_0_FW_SIZE;
  417. WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
  418. WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
  419. if (idx == 0) {
  420. offset += size;
  421. size = VCE_V3_0_STACK_SIZE;
  422. WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
  423. WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
  424. offset += size;
  425. size = VCE_V3_0_DATA_SIZE;
  426. WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
  427. WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
  428. } else {
  429. offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
  430. size = VCE_V3_0_STACK_SIZE;
  431. WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
  432. WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
  433. offset += size;
  434. size = VCE_V3_0_DATA_SIZE;
  435. WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
  436. WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
  437. }
  438. WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
  439. WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
  440. }
  441. static bool vce_v3_0_is_idle(void *handle)
  442. {
  443. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  444. u32 mask = 0;
  445. mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
  446. mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
  447. return !(RREG32(mmSRBM_STATUS2) & mask);
  448. }
  449. static int vce_v3_0_wait_for_idle(void *handle)
  450. {
  451. unsigned i;
  452. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  453. for (i = 0; i < adev->usec_timeout; i++)
  454. if (vce_v3_0_is_idle(handle))
  455. return 0;
  456. return -ETIMEDOUT;
  457. }
  458. #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
  459. #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
  460. #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
  461. #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
  462. VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
  463. static bool vce_v3_0_check_soft_reset(void *handle)
  464. {
  465. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  466. u32 srbm_soft_reset = 0;
  467. /* According to VCE team , we should use VCE_STATUS instead
  468. * SRBM_STATUS.VCE_BUSY bit for busy status checking.
  469. * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
  470. * instance's registers are accessed
  471. * (0 for 1st instance, 10 for 2nd instance).
  472. *
  473. *VCE_STATUS
  474. *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
  475. *|----+----+-----------+----+----+----+----------+---------+----|
  476. *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
  477. *
  478. * VCE team suggest use bit 3--bit 6 for busy status check
  479. */
  480. mutex_lock(&adev->grbm_idx_mutex);
  481. WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
  482. if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
  483. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
  484. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
  485. }
  486. WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
  487. if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
  488. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
  489. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
  490. }
  491. WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
  492. mutex_unlock(&adev->grbm_idx_mutex);
  493. if (srbm_soft_reset) {
  494. adev->vce.srbm_soft_reset = srbm_soft_reset;
  495. return true;
  496. } else {
  497. adev->vce.srbm_soft_reset = 0;
  498. return false;
  499. }
  500. }
  501. static int vce_v3_0_soft_reset(void *handle)
  502. {
  503. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  504. u32 srbm_soft_reset;
  505. if (!adev->vce.srbm_soft_reset)
  506. return 0;
  507. srbm_soft_reset = adev->vce.srbm_soft_reset;
  508. if (srbm_soft_reset) {
  509. u32 tmp;
  510. tmp = RREG32(mmSRBM_SOFT_RESET);
  511. tmp |= srbm_soft_reset;
  512. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  513. WREG32(mmSRBM_SOFT_RESET, tmp);
  514. tmp = RREG32(mmSRBM_SOFT_RESET);
  515. udelay(50);
  516. tmp &= ~srbm_soft_reset;
  517. WREG32(mmSRBM_SOFT_RESET, tmp);
  518. tmp = RREG32(mmSRBM_SOFT_RESET);
  519. /* Wait a little for things to settle down */
  520. udelay(50);
  521. }
  522. return 0;
  523. }
  524. static int vce_v3_0_pre_soft_reset(void *handle)
  525. {
  526. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  527. if (!adev->vce.srbm_soft_reset)
  528. return 0;
  529. mdelay(5);
  530. return vce_v3_0_suspend(adev);
  531. }
  532. static int vce_v3_0_post_soft_reset(void *handle)
  533. {
  534. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  535. if (!adev->vce.srbm_soft_reset)
  536. return 0;
  537. mdelay(5);
  538. return vce_v3_0_resume(adev);
  539. }
  540. static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
  541. struct amdgpu_irq_src *source,
  542. unsigned type,
  543. enum amdgpu_interrupt_state state)
  544. {
  545. uint32_t val = 0;
  546. if (state == AMDGPU_IRQ_STATE_ENABLE)
  547. val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
  548. WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
  549. return 0;
  550. }
  551. static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
  552. struct amdgpu_irq_src *source,
  553. struct amdgpu_iv_entry *entry)
  554. {
  555. DRM_DEBUG("IH: VCE\n");
  556. WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
  557. switch (entry->src_data) {
  558. case 0:
  559. case 1:
  560. case 2:
  561. amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
  562. break;
  563. default:
  564. DRM_ERROR("Unhandled interrupt: %d %d\n",
  565. entry->src_id, entry->src_data);
  566. break;
  567. }
  568. return 0;
  569. }
  570. static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
  571. {
  572. u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
  573. if (enable)
  574. tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
  575. else
  576. tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
  577. WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
  578. }
  579. static int vce_v3_0_set_clockgating_state(void *handle,
  580. enum amd_clockgating_state state)
  581. {
  582. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  583. bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
  584. int i;
  585. if ((adev->asic_type == CHIP_POLARIS10) ||
  586. (adev->asic_type == CHIP_TONGA) ||
  587. (adev->asic_type == CHIP_FIJI))
  588. vce_v3_0_set_bypass_mode(adev, enable);
  589. if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
  590. return 0;
  591. mutex_lock(&adev->grbm_idx_mutex);
  592. for (i = 0; i < 2; i++) {
  593. /* Program VCE Instance 0 or 1 if not harvested */
  594. if (adev->vce.harvest_config & (1 << i))
  595. continue;
  596. WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
  597. if (enable) {
  598. /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
  599. uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
  600. data &= ~(0xf | 0xff0);
  601. data |= ((0x0 << 0) | (0x04 << 4));
  602. WREG32(mmVCE_CLOCK_GATING_A, data);
  603. /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
  604. data = RREG32(mmVCE_UENC_CLOCK_GATING);
  605. data &= ~(0xf | 0xff0);
  606. data |= ((0x0 << 0) | (0x04 << 4));
  607. WREG32(mmVCE_UENC_CLOCK_GATING, data);
  608. }
  609. vce_v3_0_set_vce_sw_clock_gating(adev, enable);
  610. }
  611. WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
  612. mutex_unlock(&adev->grbm_idx_mutex);
  613. return 0;
  614. }
  615. static int vce_v3_0_set_powergating_state(void *handle,
  616. enum amd_powergating_state state)
  617. {
  618. /* This doesn't actually powergate the VCE block.
  619. * That's done in the dpm code via the SMC. This
  620. * just re-inits the block as necessary. The actual
  621. * gating still happens in the dpm code. We should
  622. * revisit this when there is a cleaner line between
  623. * the smc and the hw blocks
  624. */
  625. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  626. if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
  627. return 0;
  628. if (state == AMD_PG_STATE_GATE)
  629. /* XXX do we need a vce_v3_0_stop()? */
  630. return 0;
  631. else
  632. return vce_v3_0_start(adev);
  633. }
  634. static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
  635. struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
  636. {
  637. amdgpu_ring_write(ring, VCE_CMD_IB_VM);
  638. amdgpu_ring_write(ring, vm_id);
  639. amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
  640. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  641. amdgpu_ring_write(ring, ib->length_dw);
  642. }
  643. static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
  644. unsigned int vm_id, uint64_t pd_addr)
  645. {
  646. amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
  647. amdgpu_ring_write(ring, vm_id);
  648. amdgpu_ring_write(ring, pd_addr >> 12);
  649. amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
  650. amdgpu_ring_write(ring, vm_id);
  651. amdgpu_ring_write(ring, VCE_CMD_END);
  652. }
  653. static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
  654. {
  655. uint32_t seq = ring->fence_drv.sync_seq;
  656. uint64_t addr = ring->fence_drv.gpu_addr;
  657. amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
  658. amdgpu_ring_write(ring, lower_32_bits(addr));
  659. amdgpu_ring_write(ring, upper_32_bits(addr));
  660. amdgpu_ring_write(ring, seq);
  661. }
  662. static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
  663. {
  664. return
  665. 5; /* vce_v3_0_ring_emit_ib */
  666. }
  667. static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
  668. {
  669. return
  670. 4 + /* vce_v3_0_emit_pipeline_sync */
  671. 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
  672. }
  673. static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
  674. {
  675. return
  676. 6 + /* vce_v3_0_emit_vm_flush */
  677. 4 + /* vce_v3_0_emit_pipeline_sync */
  678. 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
  679. }
  680. const struct amd_ip_funcs vce_v3_0_ip_funcs = {
  681. .name = "vce_v3_0",
  682. .early_init = vce_v3_0_early_init,
  683. .late_init = NULL,
  684. .sw_init = vce_v3_0_sw_init,
  685. .sw_fini = vce_v3_0_sw_fini,
  686. .hw_init = vce_v3_0_hw_init,
  687. .hw_fini = vce_v3_0_hw_fini,
  688. .suspend = vce_v3_0_suspend,
  689. .resume = vce_v3_0_resume,
  690. .is_idle = vce_v3_0_is_idle,
  691. .wait_for_idle = vce_v3_0_wait_for_idle,
  692. .check_soft_reset = vce_v3_0_check_soft_reset,
  693. .pre_soft_reset = vce_v3_0_pre_soft_reset,
  694. .soft_reset = vce_v3_0_soft_reset,
  695. .post_soft_reset = vce_v3_0_post_soft_reset,
  696. .set_clockgating_state = vce_v3_0_set_clockgating_state,
  697. .set_powergating_state = vce_v3_0_set_powergating_state,
  698. };
  699. static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
  700. .get_rptr = vce_v3_0_ring_get_rptr,
  701. .get_wptr = vce_v3_0_ring_get_wptr,
  702. .set_wptr = vce_v3_0_ring_set_wptr,
  703. .parse_cs = amdgpu_vce_ring_parse_cs,
  704. .emit_ib = amdgpu_vce_ring_emit_ib,
  705. .emit_fence = amdgpu_vce_ring_emit_fence,
  706. .test_ring = amdgpu_vce_ring_test_ring,
  707. .test_ib = amdgpu_vce_ring_test_ib,
  708. .insert_nop = amdgpu_ring_insert_nop,
  709. .pad_ib = amdgpu_ring_generic_pad_ib,
  710. .begin_use = amdgpu_vce_ring_begin_use,
  711. .end_use = amdgpu_vce_ring_end_use,
  712. .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
  713. .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
  714. };
  715. static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
  716. .get_rptr = vce_v3_0_ring_get_rptr,
  717. .get_wptr = vce_v3_0_ring_get_wptr,
  718. .set_wptr = vce_v3_0_ring_set_wptr,
  719. .parse_cs = NULL,
  720. .emit_ib = vce_v3_0_ring_emit_ib,
  721. .emit_vm_flush = vce_v3_0_emit_vm_flush,
  722. .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
  723. .emit_fence = amdgpu_vce_ring_emit_fence,
  724. .test_ring = amdgpu_vce_ring_test_ring,
  725. .test_ib = amdgpu_vce_ring_test_ib,
  726. .insert_nop = amdgpu_ring_insert_nop,
  727. .pad_ib = amdgpu_ring_generic_pad_ib,
  728. .begin_use = amdgpu_vce_ring_begin_use,
  729. .end_use = amdgpu_vce_ring_end_use,
  730. .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
  731. .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
  732. };
  733. static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
  734. {
  735. int i;
  736. if (adev->asic_type >= CHIP_STONEY) {
  737. for (i = 0; i < adev->vce.num_rings; i++)
  738. adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
  739. DRM_INFO("VCE enabled in VM mode\n");
  740. } else {
  741. for (i = 0; i < adev->vce.num_rings; i++)
  742. adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
  743. DRM_INFO("VCE enabled in physical mode\n");
  744. }
  745. }
  746. static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
  747. .set = vce_v3_0_set_interrupt_state,
  748. .process = vce_v3_0_process_interrupt,
  749. };
  750. static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
  751. {
  752. adev->vce.irq.num_types = 1;
  753. adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
  754. };