mmhub_v1_0.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #include "mmhub_v1_0.h"
  25. #include "mmhub/mmhub_1_0_offset.h"
  26. #include "mmhub/mmhub_1_0_sh_mask.h"
  27. #include "mmhub/mmhub_1_0_default.h"
  28. #include "athub/athub_1_0_offset.h"
  29. #include "athub/athub_1_0_sh_mask.h"
  30. #include "vega10_enum.h"
  31. #include "soc15_common.h"
  32. #define mmDAGB0_CNTL_MISC2_RV 0x008f
  33. #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
  34. u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
  35. {
  36. u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
  37. u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
  38. base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
  39. base <<= 24;
  40. top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
  41. top <<= 24;
  42. adev->gmc.fb_start = base;
  43. adev->gmc.fb_end = top;
  44. return base;
  45. }
  46. static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
  47. {
  48. uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
  49. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
  50. lower_32_bits(value));
  51. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
  52. upper_32_bits(value));
  53. }
  54. static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  55. {
  56. mmhub_v1_0_init_gart_pt_regs(adev);
  57. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
  58. (u32)(adev->gmc.gart_start >> 12));
  59. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
  60. (u32)(adev->gmc.gart_start >> 44));
  61. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
  62. (u32)(adev->gmc.gart_end >> 12));
  63. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
  64. (u32)(adev->gmc.gart_end >> 44));
  65. }
  66. static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
  67. {
  68. uint64_t value;
  69. uint32_t tmp;
  70. /* Program the AGP BAR */
  71. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
  72. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
  73. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
  74. /* Program the system aperture low logical page number. */
  75. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  76. min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
  77. if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
  78. /*
  79. * Raven2 has a HW issue that it is unable to use the vram which
  80. * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
  81. * workaround that increase system aperture high address (add 1)
  82. * to get rid of the VM fault and hardware hang.
  83. */
  84. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  85. max((adev->gmc.vram_end >> 18) + 0x1,
  86. adev->gmc.agp_end >> 18));
  87. else
  88. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  89. max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
  90. /* Set default page address. */
  91. value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
  92. adev->vm_manager.vram_base_offset;
  93. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
  94. (u32)(value >> 12));
  95. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
  96. (u32)(value >> 44));
  97. /* Program "protection fault". */
  98. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
  99. (u32)(adev->dummy_page_addr >> 12));
  100. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
  101. (u32)((u64)adev->dummy_page_addr >> 44));
  102. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
  103. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
  104. ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
  105. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
  106. }
  107. static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
  108. {
  109. uint32_t tmp;
  110. /* Setup TLB control */
  111. tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
  112. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  113. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
  114. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  115. ENABLE_ADVANCED_DRIVER_MODEL, 1);
  116. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  117. SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
  118. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
  119. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  120. MTYPE, MTYPE_UC);/* XXX for emulation. */
  121. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
  122. WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
  123. }
  124. static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
  125. {
  126. uint32_t tmp;
  127. /* Setup L2 cache */
  128. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
  129. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  130. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
  131. /* XXX for emulation, Refer to closed source code.*/
  132. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
  133. 0);
  134. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
  135. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
  136. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
  137. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
  138. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
  139. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  140. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  141. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
  142. if (adev->gmc.translate_further) {
  143. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
  144. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
  145. L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
  146. } else {
  147. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
  148. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
  149. L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
  150. }
  151. tmp = mmVM_L2_CNTL4_DEFAULT;
  152. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
  153. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
  154. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
  155. }
  156. static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
  157. {
  158. uint32_t tmp;
  159. tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  160. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  161. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  162. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
  163. }
  164. static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
  165. {
  166. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
  167. 0XFFFFFFFF);
  168. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
  169. 0x0000000F);
  170. WREG32_SOC15(MMHUB, 0,
  171. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
  172. WREG32_SOC15(MMHUB, 0,
  173. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
  174. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
  175. 0);
  176. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
  177. 0);
  178. }
  179. static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
  180. {
  181. unsigned num_level, block_size;
  182. uint32_t tmp;
  183. int i;
  184. num_level = adev->vm_manager.num_level;
  185. block_size = adev->vm_manager.block_size;
  186. if (adev->gmc.translate_further)
  187. num_level -= 1;
  188. else
  189. block_size -= 9;
  190. for (i = 0; i <= 14; i++) {
  191. tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
  192. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
  193. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
  194. num_level);
  195. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  196. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  197. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  198. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
  199. 1);
  200. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  201. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  202. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  203. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  204. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  205. READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  206. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  207. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  208. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  209. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  210. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  211. PAGE_TABLE_BLOCK_SIZE,
  212. block_size);
  213. /* Send no-retry XNACK on fault to suppress VM fault storm. */
  214. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  215. RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
  216. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
  217. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
  218. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
  219. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
  220. lower_32_bits(adev->vm_manager.max_pfn - 1));
  221. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
  222. upper_32_bits(adev->vm_manager.max_pfn - 1));
  223. }
  224. }
  225. static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
  226. {
  227. unsigned i;
  228. for (i = 0; i < 18; ++i) {
  229. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
  230. 2 * i, 0xffffffff);
  231. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
  232. 2 * i, 0x1f);
  233. }
  234. }
  235. void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
  236. bool enable)
  237. {
  238. if (amdgpu_sriov_vf(adev))
  239. return;
  240. if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
  241. if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
  242. amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
  243. }
  244. }
  245. int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
  246. {
  247. if (amdgpu_sriov_vf(adev)) {
  248. /*
  249. * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
  250. * VF copy registers so vbios post doesn't program them, for
  251. * SRIOV driver need to program them
  252. */
  253. WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
  254. adev->gmc.vram_start >> 24);
  255. WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
  256. adev->gmc.vram_end >> 24);
  257. }
  258. /* GART Enable. */
  259. mmhub_v1_0_init_gart_aperture_regs(adev);
  260. mmhub_v1_0_init_system_aperture_regs(adev);
  261. mmhub_v1_0_init_tlb_regs(adev);
  262. mmhub_v1_0_init_cache_regs(adev);
  263. mmhub_v1_0_enable_system_domain(adev);
  264. mmhub_v1_0_disable_identity_aperture(adev);
  265. mmhub_v1_0_setup_vmid_config(adev);
  266. mmhub_v1_0_program_invalidation(adev);
  267. return 0;
  268. }
  269. void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
  270. {
  271. u32 tmp;
  272. u32 i;
  273. /* Disable all tables */
  274. for (i = 0; i < 16; i++)
  275. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0);
  276. /* Setup TLB control */
  277. tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
  278. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  279. tmp = REG_SET_FIELD(tmp,
  280. MC_VM_MX_L1_TLB_CNTL,
  281. ENABLE_ADVANCED_DRIVER_MODEL,
  282. 0);
  283. WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
  284. /* Setup L2 cache */
  285. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
  286. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  287. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
  288. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
  289. }
  290. /**
  291. * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
  292. *
  293. * @adev: amdgpu_device pointer
  294. * @value: true redirects VM faults to the default page
  295. */
  296. void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
  297. {
  298. u32 tmp;
  299. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
  300. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  301. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  302. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  303. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  304. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  305. PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  306. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  307. PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  308. tmp = REG_SET_FIELD(tmp,
  309. VM_L2_PROTECTION_FAULT_CNTL,
  310. TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
  311. value);
  312. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  313. NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  314. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  315. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  316. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  317. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  318. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  319. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  320. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  321. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  322. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  323. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  324. if (!value) {
  325. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  326. CRASH_ON_NO_RETRY_FAULT, 1);
  327. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  328. CRASH_ON_RETRY_FAULT, 1);
  329. }
  330. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
  331. }
  332. void mmhub_v1_0_init(struct amdgpu_device *adev)
  333. {
  334. struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
  335. hub->ctx0_ptb_addr_lo32 =
  336. SOC15_REG_OFFSET(MMHUB, 0,
  337. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
  338. hub->ctx0_ptb_addr_hi32 =
  339. SOC15_REG_OFFSET(MMHUB, 0,
  340. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
  341. hub->vm_inv_eng0_req =
  342. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
  343. hub->vm_inv_eng0_ack =
  344. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
  345. hub->vm_context0_cntl =
  346. SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  347. hub->vm_l2_pro_fault_status =
  348. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
  349. hub->vm_l2_pro_fault_cntl =
  350. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
  351. }
  352. static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  353. bool enable)
  354. {
  355. uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
  356. def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  357. if (adev->asic_type != CHIP_RAVEN) {
  358. def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
  359. def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
  360. } else
  361. def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
  362. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
  363. data |= ATC_L2_MISC_CG__ENABLE_MASK;
  364. data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  365. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  366. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  367. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  368. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  369. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  370. if (adev->asic_type != CHIP_RAVEN)
  371. data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  372. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  373. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  374. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  375. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  376. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  377. } else {
  378. data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
  379. data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  380. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  381. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  382. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  383. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  384. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  385. if (adev->asic_type != CHIP_RAVEN)
  386. data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  387. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  388. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  389. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  390. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  391. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  392. }
  393. if (def != data)
  394. WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
  395. if (def1 != data1) {
  396. if (adev->asic_type != CHIP_RAVEN)
  397. WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
  398. else
  399. WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
  400. }
  401. if (adev->asic_type != CHIP_RAVEN && def2 != data2)
  402. WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
  403. }
  404. static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  405. bool enable)
  406. {
  407. uint32_t def, data;
  408. def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  409. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
  410. data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  411. else
  412. data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  413. if (def != data)
  414. WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
  415. }
  416. static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  417. bool enable)
  418. {
  419. uint32_t def, data;
  420. def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  421. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
  422. data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  423. else
  424. data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  425. if (def != data)
  426. WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
  427. }
  428. static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  429. bool enable)
  430. {
  431. uint32_t def, data;
  432. def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  433. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
  434. (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  435. data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  436. else
  437. data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  438. if(def != data)
  439. WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
  440. }
  441. int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
  442. enum amd_clockgating_state state)
  443. {
  444. if (amdgpu_sriov_vf(adev))
  445. return 0;
  446. switch (adev->asic_type) {
  447. case CHIP_VEGA10:
  448. case CHIP_VEGA12:
  449. case CHIP_VEGA20:
  450. case CHIP_RAVEN:
  451. mmhub_v1_0_update_medium_grain_clock_gating(adev,
  452. state == AMD_CG_STATE_GATE ? true : false);
  453. athub_update_medium_grain_clock_gating(adev,
  454. state == AMD_CG_STATE_GATE ? true : false);
  455. mmhub_v1_0_update_medium_grain_light_sleep(adev,
  456. state == AMD_CG_STATE_GATE ? true : false);
  457. athub_update_medium_grain_light_sleep(adev,
  458. state == AMD_CG_STATE_GATE ? true : false);
  459. break;
  460. default:
  461. break;
  462. }
  463. return 0;
  464. }
  465. void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
  466. {
  467. int data;
  468. if (amdgpu_sriov_vf(adev))
  469. *flags = 0;
  470. /* AMD_CG_SUPPORT_MC_MGCG */
  471. data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  472. if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
  473. *flags |= AMD_CG_SUPPORT_MC_MGCG;
  474. /* AMD_CG_SUPPORT_MC_LS */
  475. data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  476. if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
  477. *flags |= AMD_CG_SUPPORT_MC_LS;
  478. }