mmhub_v1_0.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #include "mmhub_v1_0.h"
  25. #include "mmhub/mmhub_1_0_offset.h"
  26. #include "mmhub/mmhub_1_0_sh_mask.h"
  27. #include "mmhub/mmhub_1_0_default.h"
  28. #include "athub/athub_1_0_offset.h"
  29. #include "athub/athub_1_0_sh_mask.h"
  30. #include "vega10_enum.h"
  31. #include "soc15_common.h"
  32. #define mmDAGB0_CNTL_MISC2_RV 0x008f
  33. #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
  34. u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
  35. {
  36. u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
  37. base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
  38. base <<= 24;
  39. return base;
  40. }
  41. static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
  42. {
  43. uint64_t value;
  44. BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
  45. value = adev->gart.table_addr - adev->gmc.vram_start +
  46. adev->vm_manager.vram_base_offset;
  47. value &= 0x0000FFFFFFFFF000ULL;
  48. value |= 0x1; /* valid bit */
  49. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
  50. lower_32_bits(value));
  51. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
  52. upper_32_bits(value));
  53. }
  54. static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  55. {
  56. mmhub_v1_0_init_gart_pt_regs(adev);
  57. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
  58. (u32)(adev->gmc.gart_start >> 12));
  59. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
  60. (u32)(adev->gmc.gart_start >> 44));
  61. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
  62. (u32)(adev->gmc.gart_end >> 12));
  63. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
  64. (u32)(adev->gmc.gart_end >> 44));
  65. }
  66. static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
  67. {
  68. uint64_t value;
  69. uint32_t tmp;
  70. /* Disable AGP. */
  71. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
  72. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0);
  73. WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FFFFFF);
  74. /* Program the system aperture low logical page number. */
  75. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  76. adev->gmc.vram_start >> 18);
  77. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  78. adev->gmc.vram_end >> 18);
  79. /* Set default page address. */
  80. value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
  81. adev->vm_manager.vram_base_offset;
  82. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
  83. (u32)(value >> 12));
  84. WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
  85. (u32)(value >> 44));
  86. /* Program "protection fault". */
  87. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
  88. (u32)(adev->dummy_page_addr >> 12));
  89. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
  90. (u32)((u64)adev->dummy_page_addr >> 44));
  91. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
  92. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
  93. ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
  94. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2, tmp);
  95. }
  96. static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
  97. {
  98. uint32_t tmp;
  99. /* Setup TLB control */
  100. tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
  101. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  102. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
  103. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  104. ENABLE_ADVANCED_DRIVER_MODEL, 1);
  105. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  106. SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
  107. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
  108. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
  109. MTYPE, MTYPE_UC);/* XXX for emulation. */
  110. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
  111. WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
  112. }
  113. static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
  114. {
  115. uint32_t tmp;
  116. /* Setup L2 cache */
  117. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
  118. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  119. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
  120. /* XXX for emulation, Refer to closed source code.*/
  121. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
  122. 0);
  123. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
  124. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
  125. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
  126. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
  127. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2);
  128. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  129. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  130. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
  131. if (adev->gmc.translate_further) {
  132. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
  133. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
  134. L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
  135. } else {
  136. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
  137. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
  138. L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
  139. }
  140. tmp = mmVM_L2_CNTL4_DEFAULT;
  141. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
  142. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
  143. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL4, tmp);
  144. }
  145. static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
  146. {
  147. uint32_t tmp;
  148. tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  149. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  150. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  151. WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
  152. }
  153. static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
  154. {
  155. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
  156. 0XFFFFFFFF);
  157. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
  158. 0x0000000F);
  159. WREG32_SOC15(MMHUB, 0,
  160. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
  161. WREG32_SOC15(MMHUB, 0,
  162. mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
  163. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
  164. 0);
  165. WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
  166. 0);
  167. }
  168. static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
  169. {
  170. unsigned num_level, block_size;
  171. uint32_t tmp;
  172. int i;
  173. num_level = adev->vm_manager.num_level;
  174. block_size = adev->vm_manager.block_size;
  175. if (adev->gmc.translate_further)
  176. num_level -= 1;
  177. else
  178. block_size -= 9;
  179. for (i = 0; i <= 14; i++) {
  180. tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
  181. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
  182. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
  183. num_level);
  184. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  185. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  186. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  187. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
  188. 1);
  189. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  190. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  191. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  192. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  193. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  194. READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  195. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  196. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  197. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  198. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  199. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  200. PAGE_TABLE_BLOCK_SIZE,
  201. block_size);
  202. /* Send no-retry XNACK on fault to suppress VM fault storm. */
  203. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  204. RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
  205. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
  206. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
  207. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
  208. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
  209. lower_32_bits(adev->vm_manager.max_pfn - 1));
  210. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
  211. upper_32_bits(adev->vm_manager.max_pfn - 1));
  212. }
  213. }
  214. static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
  215. {
  216. unsigned i;
  217. for (i = 0; i < 18; ++i) {
  218. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
  219. 2 * i, 0xffffffff);
  220. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
  221. 2 * i, 0x1f);
  222. }
  223. }
  224. struct pctl_data {
  225. uint32_t index;
  226. uint32_t data;
  227. };
  228. static const struct pctl_data pctl0_data[] = {
  229. {0x0, 0x7a640},
  230. {0x9, 0x2a64a},
  231. {0xd, 0x2a680},
  232. {0x11, 0x6a684},
  233. {0x19, 0xea68e},
  234. {0x29, 0xa69e},
  235. {0x2b, 0x0010a6c0},
  236. {0x3d, 0x83a707},
  237. {0xc2, 0x8a7a4},
  238. {0xcc, 0x1a7b8},
  239. {0xcf, 0xfa7cc},
  240. {0xe0, 0x17a7dd},
  241. {0xf9, 0xa7dc},
  242. {0xfb, 0x12a7f5},
  243. {0x10f, 0xa808},
  244. {0x111, 0x12a810},
  245. {0x125, 0x7a82c}
  246. };
  247. #define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
  248. #define PCTL0_RENG_EXEC_END_PTR 0x12d
  249. #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
  250. #define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
  251. static const struct pctl_data pctl1_data[] = {
  252. {0x0, 0x39a000},
  253. {0x3b, 0x44a040},
  254. {0x81, 0x2a08d},
  255. {0x85, 0x6ba094},
  256. {0xf2, 0x18a100},
  257. {0x10c, 0x4a132},
  258. {0x112, 0xca141},
  259. {0x120, 0x2fa158},
  260. {0x151, 0x17a1d0},
  261. {0x16a, 0x1a1e9},
  262. {0x16d, 0x13a1ec},
  263. {0x182, 0x7a201},
  264. {0x18b, 0x3a20a},
  265. {0x190, 0x7a580},
  266. {0x199, 0xa590},
  267. {0x19b, 0x4a594},
  268. {0x1a1, 0x1a59c},
  269. {0x1a4, 0x7a82c},
  270. {0x1ad, 0xfa7cc},
  271. {0x1be, 0x17a7dd},
  272. {0x1d7, 0x12a810},
  273. {0x1eb, 0x4000a7e1},
  274. {0x1ec, 0x5000a7f5},
  275. {0x1ed, 0x4000a7e2},
  276. {0x1ee, 0x5000a7dc},
  277. {0x1ef, 0x4000a7e3},
  278. {0x1f0, 0x5000a7f6},
  279. {0x1f1, 0x5000a7e4}
  280. };
  281. #define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
  282. #define PCTL1_RENG_EXEC_END_PTR 0x1f1
  283. #define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
  284. #define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d
  285. #define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580
  286. #define PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT 0xa59d
  287. #define PCTL1_STCTRL_REG_SAVE_RANGE2_BASE 0xa82c
  288. #define PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT 0xa833
  289. static void mmhub_v1_0_power_gating_write_save_ranges(struct amdgpu_device *adev)
  290. {
  291. uint32_t tmp = 0;
  292. /* PCTL0_STCTRL_REGISTER_SAVE_RANGE0 */
  293. tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
  294. STCTRL_REGISTER_SAVE_BASE,
  295. PCTL0_STCTRL_REG_SAVE_RANGE0_BASE);
  296. tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
  297. STCTRL_REGISTER_SAVE_LIMIT,
  298. PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT);
  299. WREG32_SOC15(MMHUB, 0, mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0, tmp);
  300. /* PCTL1_STCTRL_REGISTER_SAVE_RANGE0 */
  301. tmp = 0;
  302. tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
  303. STCTRL_REGISTER_SAVE_BASE,
  304. PCTL1_STCTRL_REG_SAVE_RANGE0_BASE);
  305. tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
  306. STCTRL_REGISTER_SAVE_LIMIT,
  307. PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT);
  308. WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0, tmp);
  309. /* PCTL1_STCTRL_REGISTER_SAVE_RANGE1 */
  310. tmp = 0;
  311. tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
  312. STCTRL_REGISTER_SAVE_BASE,
  313. PCTL1_STCTRL_REG_SAVE_RANGE1_BASE);
  314. tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
  315. STCTRL_REGISTER_SAVE_LIMIT,
  316. PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT);
  317. WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1, tmp);
  318. /* PCTL1_STCTRL_REGISTER_SAVE_RANGE2 */
  319. tmp = 0;
  320. tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
  321. STCTRL_REGISTER_SAVE_BASE,
  322. PCTL1_STCTRL_REG_SAVE_RANGE2_BASE);
  323. tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
  324. STCTRL_REGISTER_SAVE_LIMIT,
  325. PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT);
  326. WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2, tmp);
  327. }
  328. void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
  329. {
  330. uint32_t pctl0_misc = 0;
  331. uint32_t pctl0_reng_execute = 0;
  332. uint32_t pctl1_misc = 0;
  333. uint32_t pctl1_reng_execute = 0;
  334. int i = 0;
  335. if (amdgpu_sriov_vf(adev))
  336. return;
  337. /****************** pctl0 **********************/
  338. pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC);
  339. pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
  340. /* Light sleep must be disabled before writing to pctl0 registers */
  341. pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
  342. WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
  343. /* Write data used to access ram of register engine */
  344. for (i = 0; i < PCTL0_DATA_LEN; i++) {
  345. WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_INDEX,
  346. pctl0_data[i].index);
  347. WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_DATA,
  348. pctl0_data[i].data);
  349. }
  350. /* Re-enable light sleep */
  351. pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
  352. WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
  353. /****************** pctl1 **********************/
  354. pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
  355. pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
  356. /* Light sleep must be disabled before writing to pctl1 registers */
  357. pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
  358. WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
  359. /* Write data used to access ram of register engine */
  360. for (i = 0; i < PCTL1_DATA_LEN; i++) {
  361. WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_INDEX,
  362. pctl1_data[i].index);
  363. WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_DATA,
  364. pctl1_data[i].data);
  365. }
  366. /* Re-enable light sleep */
  367. pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
  368. WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
  369. mmhub_v1_0_power_gating_write_save_ranges(adev);
  370. /* Set the reng execute end ptr for pctl0 */
  371. pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
  372. PCTL0_RENG_EXECUTE,
  373. RENG_EXECUTE_END_PTR,
  374. PCTL0_RENG_EXEC_END_PTR);
  375. WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
  376. /* Set the reng execute end ptr for pctl1 */
  377. pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
  378. PCTL1_RENG_EXECUTE,
  379. RENG_EXECUTE_END_PTR,
  380. PCTL1_RENG_EXEC_END_PTR);
  381. WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
  382. }
  383. void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
  384. bool enable)
  385. {
  386. uint32_t pctl0_reng_execute = 0;
  387. uint32_t pctl1_reng_execute = 0;
  388. if (amdgpu_sriov_vf(adev))
  389. return;
  390. pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
  391. pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
  392. if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
  393. pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
  394. PCTL0_RENG_EXECUTE,
  395. RENG_EXECUTE_ON_PWR_UP, 1);
  396. pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
  397. PCTL0_RENG_EXECUTE,
  398. RENG_EXECUTE_ON_REG_UPDATE, 1);
  399. WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
  400. pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
  401. PCTL1_RENG_EXECUTE,
  402. RENG_EXECUTE_ON_PWR_UP, 1);
  403. pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
  404. PCTL1_RENG_EXECUTE,
  405. RENG_EXECUTE_ON_REG_UPDATE, 1);
  406. WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
  407. if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
  408. amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
  409. } else {
  410. pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
  411. PCTL0_RENG_EXECUTE,
  412. RENG_EXECUTE_ON_PWR_UP, 0);
  413. pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
  414. PCTL0_RENG_EXECUTE,
  415. RENG_EXECUTE_ON_REG_UPDATE, 0);
  416. WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
  417. pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
  418. PCTL1_RENG_EXECUTE,
  419. RENG_EXECUTE_ON_PWR_UP, 0);
  420. pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
  421. PCTL1_RENG_EXECUTE,
  422. RENG_EXECUTE_ON_REG_UPDATE, 0);
  423. WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
  424. }
  425. }
  426. int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
  427. {
  428. if (amdgpu_sriov_vf(adev)) {
  429. /*
  430. * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
  431. * VF copy registers so vbios post doesn't program them, for
  432. * SRIOV driver need to program them
  433. */
  434. WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
  435. adev->gmc.vram_start >> 24);
  436. WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
  437. adev->gmc.vram_end >> 24);
  438. }
  439. /* GART Enable. */
  440. mmhub_v1_0_init_gart_aperture_regs(adev);
  441. mmhub_v1_0_init_system_aperture_regs(adev);
  442. mmhub_v1_0_init_tlb_regs(adev);
  443. mmhub_v1_0_init_cache_regs(adev);
  444. mmhub_v1_0_enable_system_domain(adev);
  445. mmhub_v1_0_disable_identity_aperture(adev);
  446. mmhub_v1_0_setup_vmid_config(adev);
  447. mmhub_v1_0_program_invalidation(adev);
  448. return 0;
  449. }
  450. void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
  451. {
  452. u32 tmp;
  453. u32 i;
  454. /* Disable all tables */
  455. for (i = 0; i < 16; i++)
  456. WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0);
  457. /* Setup TLB control */
  458. tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
  459. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  460. tmp = REG_SET_FIELD(tmp,
  461. MC_VM_MX_L1_TLB_CNTL,
  462. ENABLE_ADVANCED_DRIVER_MODEL,
  463. 0);
  464. WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
  465. /* Setup L2 cache */
  466. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
  467. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  468. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
  469. WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
  470. }
  471. /**
  472. * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
  473. *
  474. * @adev: amdgpu_device pointer
  475. * @value: true redirects VM faults to the default page
  476. */
  477. void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
  478. {
  479. u32 tmp;
  480. tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
  481. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  482. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  483. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  484. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  485. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  486. PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  487. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  488. PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  489. tmp = REG_SET_FIELD(tmp,
  490. VM_L2_PROTECTION_FAULT_CNTL,
  491. TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
  492. value);
  493. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  494. NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  495. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  496. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  497. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  498. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  499. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  500. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  501. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  502. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  503. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  504. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  505. if (!value) {
  506. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  507. CRASH_ON_NO_RETRY_FAULT, 1);
  508. tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
  509. CRASH_ON_RETRY_FAULT, 1);
  510. }
  511. WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
  512. }
  513. void mmhub_v1_0_init(struct amdgpu_device *adev)
  514. {
  515. struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
  516. hub->ctx0_ptb_addr_lo32 =
  517. SOC15_REG_OFFSET(MMHUB, 0,
  518. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
  519. hub->ctx0_ptb_addr_hi32 =
  520. SOC15_REG_OFFSET(MMHUB, 0,
  521. mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
  522. hub->vm_inv_eng0_req =
  523. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
  524. hub->vm_inv_eng0_ack =
  525. SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
  526. hub->vm_context0_cntl =
  527. SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
  528. hub->vm_l2_pro_fault_status =
  529. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
  530. hub->vm_l2_pro_fault_cntl =
  531. SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
  532. }
  533. static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  534. bool enable)
  535. {
  536. uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
  537. def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  538. if (adev->asic_type != CHIP_RAVEN) {
  539. def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
  540. def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2);
  541. } else
  542. def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV);
  543. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
  544. data |= ATC_L2_MISC_CG__ENABLE_MASK;
  545. data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  546. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  547. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  548. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  549. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  550. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  551. if (adev->asic_type != CHIP_RAVEN)
  552. data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  553. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  554. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  555. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  556. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  557. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  558. } else {
  559. data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
  560. data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  561. DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  562. DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  563. DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  564. DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  565. DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  566. if (adev->asic_type != CHIP_RAVEN)
  567. data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
  568. DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
  569. DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
  570. DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
  571. DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
  572. DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
  573. }
  574. if (def != data)
  575. WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
  576. if (def1 != data1) {
  577. if (adev->asic_type != CHIP_RAVEN)
  578. WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
  579. else
  580. WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1);
  581. }
  582. if (adev->asic_type != CHIP_RAVEN && def2 != data2)
  583. WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
  584. }
  585. static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  586. bool enable)
  587. {
  588. uint32_t def, data;
  589. def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  590. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
  591. data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  592. else
  593. data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
  594. if (def != data)
  595. WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
  596. }
  597. static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  598. bool enable)
  599. {
  600. uint32_t def, data;
  601. def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  602. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
  603. data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  604. else
  605. data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
  606. if (def != data)
  607. WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
  608. }
  609. static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  610. bool enable)
  611. {
  612. uint32_t def, data;
  613. def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  614. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
  615. (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  616. data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  617. else
  618. data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
  619. if(def != data)
  620. WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
  621. }
  622. int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
  623. enum amd_clockgating_state state)
  624. {
  625. if (amdgpu_sriov_vf(adev))
  626. return 0;
  627. switch (adev->asic_type) {
  628. case CHIP_VEGA10:
  629. case CHIP_VEGA12:
  630. case CHIP_VEGA20:
  631. case CHIP_RAVEN:
  632. mmhub_v1_0_update_medium_grain_clock_gating(adev,
  633. state == AMD_CG_STATE_GATE ? true : false);
  634. athub_update_medium_grain_clock_gating(adev,
  635. state == AMD_CG_STATE_GATE ? true : false);
  636. mmhub_v1_0_update_medium_grain_light_sleep(adev,
  637. state == AMD_CG_STATE_GATE ? true : false);
  638. athub_update_medium_grain_light_sleep(adev,
  639. state == AMD_CG_STATE_GATE ? true : false);
  640. break;
  641. default:
  642. break;
  643. }
  644. return 0;
  645. }
  646. void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
  647. {
  648. int data;
  649. if (amdgpu_sriov_vf(adev))
  650. *flags = 0;
  651. /* AMD_CG_SUPPORT_MC_MGCG */
  652. data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
  653. if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
  654. *flags |= AMD_CG_SUPPORT_MC_MGCG;
  655. /* AMD_CG_SUPPORT_MC_LS */
  656. data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
  657. if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
  658. *flags |= AMD_CG_SUPPORT_MC_LS;
  659. }