mmhub_v1_0.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #include "mmhub_v1_0.h"
  25. #include "vega10/soc15ip.h"
  26. #include "vega10/MMHUB/mmhub_1_0_offset.h"
  27. #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
  28. #include "vega10/MMHUB/mmhub_1_0_default.h"
  29. #include "vega10/ATHUB/athub_1_0_offset.h"
  30. #include "vega10/ATHUB/athub_1_0_sh_mask.h"
  31. #include "vega10/ATHUB/athub_1_0_default.h"
  32. #include "vega10/vega10_enum.h"
  33. #include "soc15_common.h"
  34. #define mmDAGB0_CNTL_MISC2_RV 0x008f
  35. #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
  36. u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
  37. {
  38. u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
  39. base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
  40. base <<= 24;
  41. return base;
  42. }
  43. static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
  44. {
  45. uint64_t value;
  46. BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
  47. value = adev->gart.table_addr - adev->mc.vram_start +
  48. adev->vm_manager.vram_base_offset;
  49. value &= 0x0000FFFFFFFFF000ULL;
  50. value |= 0x1; /* valid bit */
  51. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
  52. lower_32_bits(value));
  53. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
  54. upper_32_bits(value));
  55. }
  56. static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  57. {
  58. mmhub_v1_0_init_gart_pt_regs(adev);
  59. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
  60. (u32)(adev->mc.gtt_start >> 12));
  61. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
  62. (u32)(adev->mc.gtt_start >> 44));
  63. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
  64. (u32)(adev->mc.gtt_end >> 12));
  65. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
  66. (u32)(adev->mc.gtt_end >> 44));
  67. }
  68. static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
  69. {
  70. uint64_t value;
  71. uint32_t tmp;
  72. /* Disable AGP. */
  73. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
  74. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0);
  75. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FFFFFF);
  76. /* Program the system aperture low logical page number. */
  77. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  78. adev->mc.vram_start >> 18);
  79. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  80. adev->mc.vram_end >> 18);
  81. /* Set default page address. */
  82. value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
  83. adev->vm_manager.vram_base_offset;
  84. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
  85. (u32)(value >> 12));
  86. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
  87. (u32)(value >> 44));
  88. /* Program "protection fault". */
  89. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
  90. (u32)(adev->dummy_page.addr >> 12));
  91. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
  92. (u32)((u64)adev->dummy_page.addr >> 44));
  93. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
  94. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
  95. ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
  96. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
  97. }
  98. static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
  99. {
  100. uint32_t tmp;
  101. /* Setup TLB control */
  102. tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
  103. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  104. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
  105. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  106. ENABLE_ADVANCED_DRIVER_MODEL, 1);
  107. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  108. SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
  109. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
  110. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  111. MTYPE, MTYPE_UC);/* XXX for emulation. */
  112. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
  113. WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
  114. }
  115. static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
  116. {
  117. uint32_t tmp;
  118. /* Setup L2 cache */
  119. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
  120. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  121. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
  122. /* XXX for emulation, Refer to closed source code.*/
  123. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
  124. 0);
  125. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
  126. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
  127. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
  128. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
  129. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
  130. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  131. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  132. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
  133. tmp = mmVM_L2_CNTL3_DEFAULT;
  134. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
  135. tmp = mmVM_L2_CNTL4_DEFAULT;
  136. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
  137. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
  138. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
  139. }
  140. static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
  141. {
  142. uint32_t tmp;
  143. tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  144. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  145. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  146. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
  147. }
  148. static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
  149. {
  150. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
  151. 0XFFFFFFFF);
  152. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
  153. 0x0000000F);
  154. WREG32_SOC15(MMHUB, 0,
  155. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
  156. WREG32_SOC15(MMHUB, 0,
  157. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
  158. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
  159. 0);
  160. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
  161. 0);
  162. }
  163. static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
  164. {
  165. int i;
  166. uint32_t tmp;
  167. for (i = 0; i <= 14; i++) {
  168. tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
  169. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  170. ENABLE_CONTEXT, 1);
  171. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  172. PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
  173. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  174. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  175. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  176. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  177. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  178. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  179. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  180. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  181. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  182. READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  183. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  184. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  185. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  186. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  187. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  188. PAGE_TABLE_BLOCK_SIZE,
  189. adev->vm_manager.block_size - 9);
  190. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
  191. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
  192. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
  193. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
  194. lower_32_bits(adev->vm_manager.max_pfn - 1));
  195. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
  196. upper_32_bits(adev->vm_manager.max_pfn - 1));
  197. }
  198. }
  199. static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
  200. {
  201. unsigned i;
  202. for (i = 0; i < 18; ++i) {
  203. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
  204. 2 * i, 0xffffffff);
  205. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
  206. 2 * i, 0x1f);
  207. }
  208. }
  209. int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
  210. {
  211. if (amdgpu_sriov_vf(adev)) {
  212. /*
  213. * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
  214. * VF copy registers so vbios post doesn't program them, for
  215. * SRIOV driver need to program them
  216. */
  217. WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
  218. adev->mc.vram_start >> 24);
  219. WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
  220. adev->mc.vram_end >> 24);
  221. }
  222. /* GART Enable. */
  223. mmhub_v1_0_init_gart_aperture_regs(adev);
  224. mmhub_v1_0_init_system_aperture_regs(adev);
  225. mmhub_v1_0_init_tlb_regs(adev);
  226. mmhub_v1_0_init_cache_regs(adev);
  227. mmhub_v1_0_enable_system_domain(adev);
  228. mmhub_v1_0_disable_identity_aperture(adev);
  229. mmhub_v1_0_setup_vmid_config(adev);
  230. mmhub_v1_0_program_invalidation(adev);
  231. return 0;
  232. }
  233. void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
  234. {
  235. u32 tmp;
  236. u32 i;
  237. /* Disable all tables */
  238. for (i = 0; i < 16; i++)
  239. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0);
  240. /* Setup TLB control */
  241. tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
  242. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  243. tmp = REG_SET_FIELD(tmp,
  244. MC_VM_MX_L1_TLB_CNTL,
  245. ENABLE_ADVANCED_DRIVER_MODEL,
  246. 0);
  247. WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
  248. /* Setup L2 cache */
  249. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
  250. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  251. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
  252. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
  253. }
  254. /**
  255. * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
  256. *
  257. * @adev: amdgpu_device pointer
  258. * @value: true redirects VM faults to the default page
  259. */
  260. void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
  261. {
  262. u32 tmp;
  263. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
  264. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  265. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  266. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  267. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  268. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  269. PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  270. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  271. PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  272. tmp = REG_SET_FIELD(tmp,
  273. VM_L2_PROTECTION_FAULT_CNTL,
  274. TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
  275. value);
  276. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  277. NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  278. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  279. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  280. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  281. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  282. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  283. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  284. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  285. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  286. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  287. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  288. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
  289. }
  290. void mmhub_v1_0_init(struct amdgpu_device *adev)
  291. {
  292. struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
  293. hub->ctx0_ptb_addr_lo32 =
  294. SOC15_REG_OFFSET(MMHUB, 0,
  295. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
  296. hub->ctx0_ptb_addr_hi32 =
  297. SOC15_REG_OFFSET(MMHUB, 0,
  298. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
  299. hub->vm_inv_eng0_req =
  300. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
  301. hub->vm_inv_eng0_ack =
  302. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
  303. hub->vm_context0_cntl =
  304. SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  305. hub->vm_l2_pro_fault_status =
  306. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
  307. hub->vm_l2_pro_fault_cntl =
  308. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
  309. }
  310. static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  311. bool enable)
  312. {
  313. uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
  314. def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  315. if (adev->asic_type != CHIP_RAVEN) {
  316. def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
  317. def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
  318. } else
  319. def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
  320. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
  321. data |= ATC_L2_MISC_CG__ENABLE_MASK;
  322. data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  323. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  324. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  325. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  326. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  327. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  328. if (adev->asic_type != CHIP_RAVEN)
  329. data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  330. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  331. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  332. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  333. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  334. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  335. } else {
  336. data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
  337. data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  338. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  339. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  340. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  341. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  342. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  343. if (adev->asic_type != CHIP_RAVEN)
  344. data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  345. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  346. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  347. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  348. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  349. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  350. }
  351. if (def != data)
  352. WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
  353. if (def1 != data1) {
  354. if (adev->asic_type != CHIP_RAVEN)
  355. WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
  356. else
  357. WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
  358. }
  359. if (adev->asic_type != CHIP_RAVEN && def2 != data2)
  360. WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
  361. }
  362. static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  363. bool enable)
  364. {
  365. uint32_t def, data;
  366. def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  367. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
  368. data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  369. else
  370. data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  371. if (def != data)
  372. WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
  373. }
  374. static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  375. bool enable)
  376. {
  377. uint32_t def, data;
  378. def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  379. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
  380. data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  381. else
  382. data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  383. if (def != data)
  384. WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
  385. }
  386. static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  387. bool enable)
  388. {
  389. uint32_t def, data;
  390. def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  391. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
  392. (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  393. data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  394. else
  395. data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  396. if(def != data)
  397. WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
  398. }
  399. int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
  400. enum amd_clockgating_state state)
  401. {
  402. if (amdgpu_sriov_vf(adev))
  403. return 0;
  404. switch (adev->asic_type) {
  405. case CHIP_VEGA10:
  406. case CHIP_RAVEN:
  407. mmhub_v1_0_update_medium_grain_clock_gating(adev,
  408. state == AMD_CG_STATE_GATE ? true : false);
  409. athub_update_medium_grain_clock_gating(adev,
  410. state == AMD_CG_STATE_GATE ? true : false);
  411. mmhub_v1_0_update_medium_grain_light_sleep(adev,
  412. state == AMD_CG_STATE_GATE ? true : false);
  413. athub_update_medium_grain_light_sleep(adev,
  414. state == AMD_CG_STATE_GATE ? true : false);
  415. break;
  416. default:
  417. break;
  418. }
  419. return 0;
  420. }
  421. void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
  422. {
  423. int data;
  424. if (amdgpu_sriov_vf(adev))
  425. *flags = 0;
  426. /* AMD_CG_SUPPORT_MC_MGCG */
  427. data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  428. if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
  429. *flags |= AMD_CG_SUPPORT_MC_MGCG;
  430. /* AMD_CG_SUPPORT_MC_LS */
  431. data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  432. if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
  433. *flags |= AMD_CG_SUPPORT_MC_LS;
  434. }