mmhub_v1_0.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #include "mmhub_v1_0.h"
  25. #include "vega10/soc15ip.h"
  26. #include "vega10/MMHUB/mmhub_1_0_offset.h"
  27. #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
  28. #include "vega10/MMHUB/mmhub_1_0_default.h"
  29. #include "vega10/ATHUB/athub_1_0_offset.h"
  30. #include "vega10/ATHUB/athub_1_0_sh_mask.h"
  31. #include "vega10/ATHUB/athub_1_0_default.h"
  32. #include "vega10/vega10_enum.h"
  33. #include "soc15_common.h"
  34. #define mmDAGB0_CNTL_MISC2_RV 0x008f
  35. #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
  36. u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
  37. {
  38. u64 base = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE));
  39. base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
  40. base <<= 24;
  41. return base;
  42. }
  43. int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
  44. {
  45. u32 tmp;
  46. u64 value;
  47. uint64_t addr;
  48. u32 i;
  49. /* Program MC. */
  50. /* Update configuration */
  51. DRM_INFO("%s -- in\n", __func__);
  52. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
  53. adev->mc.vram_start >> 18);
  54. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
  55. adev->mc.vram_end >> 18);
  56. value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
  57. adev->vm_manager.vram_base_offset;
  58. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  59. mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
  60. (u32)(value >> 12));
  61. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  62. mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
  63. (u32)(value >> 44));
  64. if (amdgpu_sriov_vf(adev)) {
  65. /* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
  66. vbios post doesn't program them, for SRIOV driver need to program them */
  67. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE),
  68. adev->mc.vram_start >> 24);
  69. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP),
  70. adev->mc.vram_end >> 24);
  71. }
  72. /* Disable AGP. */
  73. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BASE), 0);
  74. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_TOP), 0);
  75. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BOT), 0x00FFFFFF);
  76. /* GART Enable. */
  77. /* Setup TLB control */
  78. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
  79. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  80. tmp = REG_SET_FIELD(tmp,
  81. MC_VM_MX_L1_TLB_CNTL,
  82. SYSTEM_ACCESS_MODE,
  83. 3);
  84. tmp = REG_SET_FIELD(tmp,
  85. MC_VM_MX_L1_TLB_CNTL,
  86. ENABLE_ADVANCED_DRIVER_MODEL,
  87. 1);
  88. tmp = REG_SET_FIELD(tmp,
  89. MC_VM_MX_L1_TLB_CNTL,
  90. SYSTEM_APERTURE_UNMAPPED_ACCESS,
  91. 0);
  92. tmp = REG_SET_FIELD(tmp,
  93. MC_VM_MX_L1_TLB_CNTL,
  94. ECO_BITS,
  95. 0);
  96. tmp = REG_SET_FIELD(tmp,
  97. MC_VM_MX_L1_TLB_CNTL,
  98. MTYPE,
  99. MTYPE_UC);/* XXX for emulation. */
  100. tmp = REG_SET_FIELD(tmp,
  101. MC_VM_MX_L1_TLB_CNTL,
  102. ATC_EN,
  103. 1);
  104. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
  105. /* Setup L2 cache */
  106. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
  107. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  108. tmp = REG_SET_FIELD(tmp,
  109. VM_L2_CNTL,
  110. ENABLE_L2_FRAGMENT_PROCESSING,
  111. 0);
  112. tmp = REG_SET_FIELD(tmp,
  113. VM_L2_CNTL,
  114. L2_PDE0_CACHE_TAG_GENERATION_MODE,
  115. 0);/* XXX for emulation, Refer to closed source code.*/
  116. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
  117. tmp = REG_SET_FIELD(tmp,
  118. VM_L2_CNTL,
  119. CONTEXT1_IDENTITY_ACCESS_MODE,
  120. 1);
  121. tmp = REG_SET_FIELD(tmp,
  122. VM_L2_CNTL,
  123. IDENTITY_MODE_FRAGMENT_SIZE,
  124. 0);
  125. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
  126. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2));
  127. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  128. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  129. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2), tmp);
  130. tmp = mmVM_L2_CNTL3_DEFAULT;
  131. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), tmp);
  132. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4));
  133. tmp = REG_SET_FIELD(tmp,
  134. VM_L2_CNTL4,
  135. VMC_TAP_PDE_REQUEST_PHYSICAL,
  136. 0);
  137. tmp = REG_SET_FIELD(tmp,
  138. VM_L2_CNTL4,
  139. VMC_TAP_PTE_REQUEST_PHYSICAL,
  140. 0);
  141. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4), tmp);
  142. /* setup context0 */
  143. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  144. mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
  145. (u32)(adev->mc.gtt_start >> 12));
  146. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  147. mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
  148. (u32)(adev->mc.gtt_start >> 44));
  149. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  150. mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
  151. (u32)(adev->mc.gtt_end >> 12));
  152. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  153. mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
  154. (u32)(adev->mc.gtt_end >> 44));
  155. BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
  156. value = adev->gart.table_addr - adev->mc.vram_start +
  157. adev->vm_manager.vram_base_offset;
  158. value &= 0x0000FFFFFFFFF000ULL;
  159. value |= 0x1; /* valid bit */
  160. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  161. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
  162. (u32)value);
  163. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  164. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
  165. (u32)(value >> 32));
  166. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  167. mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
  168. (u32)(adev->dummy_page.addr >> 12));
  169. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  170. mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
  171. (u32)((u64)adev->dummy_page.addr >> 44));
  172. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
  173. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
  174. ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
  175. 1);
  176. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
  177. addr = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  178. tmp = RREG32(addr);
  179. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  180. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  181. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL), tmp);
  182. tmp = RREG32(addr);
  183. /* Disable identity aperture.*/
  184. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  185. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
  186. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  187. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
  188. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  189. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
  190. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  191. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
  192. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  193. mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
  194. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  195. mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
  196. for (i = 0; i <= 14; i++) {
  197. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL)
  198. + i);
  199. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  200. ENABLE_CONTEXT, 1);
  201. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  202. PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
  203. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  204. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  205. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  206. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  207. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  208. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  209. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  210. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  211. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  212. READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  213. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  214. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  215. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  216. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  217. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  218. PAGE_TABLE_BLOCK_SIZE,
  219. adev->vm_manager.block_size - 9);
  220. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
  221. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
  222. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
  223. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
  224. lower_32_bits(adev->vm_manager.max_pfn - 1));
  225. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2,
  226. upper_32_bits(adev->vm_manager.max_pfn - 1));
  227. }
  228. return 0;
  229. }
  230. void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
  231. {
  232. u32 tmp;
  233. u32 i;
  234. /* Disable all tables */
  235. for (i = 0; i < 16; i++)
  236. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL) + i, 0);
  237. /* Setup TLB control */
  238. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
  239. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  240. tmp = REG_SET_FIELD(tmp,
  241. MC_VM_MX_L1_TLB_CNTL,
  242. ENABLE_ADVANCED_DRIVER_MODEL,
  243. 0);
  244. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
  245. /* Setup L2 cache */
  246. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
  247. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  248. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
  249. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), 0);
  250. }
  251. /**
  252. * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
  253. *
  254. * @adev: amdgpu_device pointer
  255. * @value: true redirects VM faults to the default page
  256. */
  257. void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
  258. {
  259. u32 tmp;
  260. tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
  261. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  262. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  263. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  264. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  265. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  266. PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  267. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  268. PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  269. tmp = REG_SET_FIELD(tmp,
  270. VM_L2_PROTECTION_FAULT_CNTL,
  271. TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
  272. value);
  273. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  274. NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  275. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  276. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  277. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  278. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  279. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  280. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  281. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  282. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  283. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  284. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  285. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
  286. }
  287. static int mmhub_v1_0_early_init(void *handle)
  288. {
  289. return 0;
  290. }
  291. static int mmhub_v1_0_late_init(void *handle)
  292. {
  293. return 0;
  294. }
  295. static int mmhub_v1_0_sw_init(void *handle)
  296. {
  297. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  298. struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
  299. hub->ctx0_ptb_addr_lo32 =
  300. SOC15_REG_OFFSET(MMHUB, 0,
  301. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
  302. hub->ctx0_ptb_addr_hi32 =
  303. SOC15_REG_OFFSET(MMHUB, 0,
  304. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
  305. hub->vm_inv_eng0_req =
  306. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
  307. hub->vm_inv_eng0_ack =
  308. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
  309. hub->vm_context0_cntl =
  310. SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  311. hub->vm_l2_pro_fault_status =
  312. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
  313. hub->vm_l2_pro_fault_cntl =
  314. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
  315. return 0;
  316. }
  317. static int mmhub_v1_0_sw_fini(void *handle)
  318. {
  319. return 0;
  320. }
  321. static int mmhub_v1_0_hw_init(void *handle)
  322. {
  323. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  324. unsigned i;
  325. for (i = 0; i < 18; ++i) {
  326. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  327. mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
  328. 2 * i, 0xffffffff);
  329. WREG32(SOC15_REG_OFFSET(MMHUB, 0,
  330. mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
  331. 2 * i, 0x1f);
  332. }
  333. return 0;
  334. }
  335. static int mmhub_v1_0_hw_fini(void *handle)
  336. {
  337. return 0;
  338. }
  339. static int mmhub_v1_0_suspend(void *handle)
  340. {
  341. return 0;
  342. }
  343. static int mmhub_v1_0_resume(void *handle)
  344. {
  345. return 0;
  346. }
  347. static bool mmhub_v1_0_is_idle(void *handle)
  348. {
  349. return true;
  350. }
  351. static int mmhub_v1_0_wait_for_idle(void *handle)
  352. {
  353. return 0;
  354. }
  355. static int mmhub_v1_0_soft_reset(void *handle)
  356. {
  357. return 0;
  358. }
  359. static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  360. bool enable)
  361. {
  362. uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
  363. def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
  364. if (adev->asic_type != CHIP_RAVEN) {
  365. def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2));
  366. def2 = data2 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2));
  367. } else
  368. def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV));
  369. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
  370. data |= ATC_L2_MISC_CG__ENABLE_MASK;
  371. data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  372. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  373. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  374. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  375. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  376. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  377. if (adev->asic_type != CHIP_RAVEN)
  378. data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  379. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  380. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  381. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  382. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  383. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  384. } else {
  385. data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
  386. data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  387. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  388. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  389. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  390. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  391. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  392. if (adev->asic_type != CHIP_RAVEN)
  393. data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  394. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  395. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  396. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  397. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  398. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  399. }
  400. if (def != data)
  401. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
  402. if (def1 != data1) {
  403. if (adev->asic_type != CHIP_RAVEN)
  404. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2), data1);
  405. else
  406. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV), data1);
  407. }
  408. if (adev->asic_type != CHIP_RAVEN && def2 != data2)
  409. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2), data2);
  410. }
  411. static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  412. bool enable)
  413. {
  414. uint32_t def, data;
  415. def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
  416. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
  417. data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  418. else
  419. data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  420. if (def != data)
  421. WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
  422. }
  423. static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  424. bool enable)
  425. {
  426. uint32_t def, data;
  427. def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
  428. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
  429. data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  430. else
  431. data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  432. if (def != data)
  433. WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
  434. }
  435. static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  436. bool enable)
  437. {
  438. uint32_t def, data;
  439. def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
  440. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
  441. (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  442. data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  443. else
  444. data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  445. if(def != data)
  446. WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
  447. }
  448. static int mmhub_v1_0_set_clockgating_state(void *handle,
  449. enum amd_clockgating_state state)
  450. {
  451. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  452. if (amdgpu_sriov_vf(adev))
  453. return 0;
  454. switch (adev->asic_type) {
  455. case CHIP_VEGA10:
  456. case CHIP_RAVEN:
  457. mmhub_v1_0_update_medium_grain_clock_gating(adev,
  458. state == AMD_CG_STATE_GATE ? true : false);
  459. athub_update_medium_grain_clock_gating(adev,
  460. state == AMD_CG_STATE_GATE ? true : false);
  461. mmhub_v1_0_update_medium_grain_light_sleep(adev,
  462. state == AMD_CG_STATE_GATE ? true : false);
  463. athub_update_medium_grain_light_sleep(adev,
  464. state == AMD_CG_STATE_GATE ? true : false);
  465. break;
  466. default:
  467. break;
  468. }
  469. return 0;
  470. }
  471. static void mmhub_v1_0_get_clockgating_state(void *handle, u32 *flags)
  472. {
  473. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  474. int data;
  475. if (amdgpu_sriov_vf(adev))
  476. *flags = 0;
  477. /* AMD_CG_SUPPORT_MC_MGCG */
  478. data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
  479. if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
  480. *flags |= AMD_CG_SUPPORT_MC_MGCG;
  481. /* AMD_CG_SUPPORT_MC_LS */
  482. data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
  483. if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
  484. *flags |= AMD_CG_SUPPORT_MC_LS;
  485. }
  486. static int mmhub_v1_0_set_powergating_state(void *handle,
  487. enum amd_powergating_state state)
  488. {
  489. return 0;
  490. }
  491. const struct amd_ip_funcs mmhub_v1_0_ip_funcs = {
  492. .name = "mmhub_v1_0",
  493. .early_init = mmhub_v1_0_early_init,
  494. .late_init = mmhub_v1_0_late_init,
  495. .sw_init = mmhub_v1_0_sw_init,
  496. .sw_fini = mmhub_v1_0_sw_fini,
  497. .hw_init = mmhub_v1_0_hw_init,
  498. .hw_fini = mmhub_v1_0_hw_fini,
  499. .suspend = mmhub_v1_0_suspend,
  500. .resume = mmhub_v1_0_resume,
  501. .is_idle = mmhub_v1_0_is_idle,
  502. .wait_for_idle = mmhub_v1_0_wait_for_idle,
  503. .soft_reset = mmhub_v1_0_soft_reset,
  504. .set_clockgating_state = mmhub_v1_0_set_clockgating_state,
  505. .set_powergating_state = mmhub_v1_0_set_powergating_state,
  506. .get_clockgating_state = mmhub_v1_0_get_clockgating_state,
  507. };
  508. const struct amdgpu_ip_block_version mmhub_v1_0_ip_block =
  509. {
  510. .type = AMD_IP_BLOCK_TYPE_MMHUB,
  511. .major = 1,
  512. .minor = 0,
  513. .rev = 0,
  514. .funcs = &mmhub_v1_0_ip_funcs,
  515. };