gmc_v6_0.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "gmc_v6_0.h"
  27. #include "amdgpu_ucode.h"
  28. #include "bif/bif_3_0_d.h"
  29. #include "bif/bif_3_0_sh_mask.h"
  30. #include "oss/oss_1_0_d.h"
  31. #include "oss/oss_1_0_sh_mask.h"
  32. #include "gmc/gmc_6_0_d.h"
  33. #include "gmc/gmc_6_0_sh_mask.h"
  34. #include "dce/dce_6_0_d.h"
  35. #include "dce/dce_6_0_sh_mask.h"
  36. #include "si_enums.h"
  37. static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
  38. static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  39. static int gmc_v6_0_wait_for_idle(void *handle);
  40. MODULE_FIRMWARE("radeon/tahiti_mc.bin");
  41. MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
  42. MODULE_FIRMWARE("radeon/verde_mc.bin");
  43. MODULE_FIRMWARE("radeon/oland_mc.bin");
  44. MODULE_FIRMWARE("radeon/si58_mc.bin");
  45. #define MC_SEQ_MISC0__MT__MASK 0xf0000000
  46. #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
  47. #define MC_SEQ_MISC0__MT__DDR2 0x20000000
  48. #define MC_SEQ_MISC0__MT__GDDR3 0x30000000
  49. #define MC_SEQ_MISC0__MT__GDDR4 0x40000000
  50. #define MC_SEQ_MISC0__MT__GDDR5 0x50000000
  51. #define MC_SEQ_MISC0__MT__HBM 0x60000000
  52. #define MC_SEQ_MISC0__MT__DDR3 0xB0000000
  53. static const u32 crtc_offsets[6] =
  54. {
  55. SI_CRTC0_REGISTER_OFFSET,
  56. SI_CRTC1_REGISTER_OFFSET,
  57. SI_CRTC2_REGISTER_OFFSET,
  58. SI_CRTC3_REGISTER_OFFSET,
  59. SI_CRTC4_REGISTER_OFFSET,
  60. SI_CRTC5_REGISTER_OFFSET
  61. };
  62. static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
  63. struct amdgpu_mode_mc_save *save)
  64. {
  65. u32 blackout;
  66. if (adev->mode_info.num_crtc)
  67. amdgpu_display_stop_mc_access(adev, save);
  68. gmc_v6_0_wait_for_idle((void *)adev);
  69. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  70. if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  71. /* Block CPU access */
  72. WREG32(mmBIF_FB_EN, 0);
  73. /* blackout the MC */
  74. blackout = REG_SET_FIELD(blackout,
  75. MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  76. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  77. }
  78. /* wait for the MC to settle */
  79. udelay(100);
  80. }
  81. static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
  82. struct amdgpu_mode_mc_save *save)
  83. {
  84. u32 tmp;
  85. /* unblackout the MC */
  86. tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  87. tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  88. WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  89. /* allow CPU access */
  90. tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  91. tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  92. WREG32(mmBIF_FB_EN, tmp);
  93. if (adev->mode_info.num_crtc)
  94. amdgpu_display_resume_mc_access(adev, save);
  95. }
  96. static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
  97. {
  98. const char *chip_name;
  99. char fw_name[30];
  100. int err;
  101. bool is_58_fw = false;
  102. DRM_DEBUG("\n");
  103. switch (adev->asic_type) {
  104. case CHIP_TAHITI:
  105. chip_name = "tahiti";
  106. break;
  107. case CHIP_PITCAIRN:
  108. chip_name = "pitcairn";
  109. break;
  110. case CHIP_VERDE:
  111. chip_name = "verde";
  112. break;
  113. case CHIP_OLAND:
  114. chip_name = "oland";
  115. break;
  116. case CHIP_HAINAN:
  117. chip_name = "hainan";
  118. break;
  119. default: BUG();
  120. }
  121. /* this memory configuration requires special firmware */
  122. if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
  123. is_58_fw = true;
  124. if (is_58_fw)
  125. snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
  126. else
  127. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  128. err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
  129. if (err)
  130. goto out;
  131. err = amdgpu_ucode_validate(adev->mc.fw);
  132. out:
  133. if (err) {
  134. dev_err(adev->dev,
  135. "si_mc: Failed to load firmware \"%s\"\n",
  136. fw_name);
  137. release_firmware(adev->mc.fw);
  138. adev->mc.fw = NULL;
  139. }
  140. return err;
  141. }
  142. static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
  143. {
  144. const __le32 *new_fw_data = NULL;
  145. u32 running;
  146. const __le32 *new_io_mc_regs = NULL;
  147. int i, regs_size, ucode_size;
  148. const struct mc_firmware_header_v1_0 *hdr;
  149. if (!adev->mc.fw)
  150. return -EINVAL;
  151. hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
  152. amdgpu_ucode_print_mc_hdr(&hdr->header);
  153. adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  154. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  155. new_io_mc_regs = (const __le32 *)
  156. (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  157. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  158. new_fw_data = (const __le32 *)
  159. (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  160. running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
  161. if (running == 0) {
  162. /* reset the engine and set to writable */
  163. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  164. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  165. /* load mc io regs */
  166. for (i = 0; i < regs_size; i++) {
  167. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
  168. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
  169. }
  170. /* load the MC ucode */
  171. for (i = 0; i < ucode_size; i++) {
  172. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
  173. }
  174. /* put the engine back into the active state */
  175. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  176. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  177. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  178. /* wait for training to complete */
  179. for (i = 0; i < adev->usec_timeout; i++) {
  180. if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
  181. break;
  182. udelay(1);
  183. }
  184. for (i = 0; i < adev->usec_timeout; i++) {
  185. if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
  186. break;
  187. udelay(1);
  188. }
  189. }
  190. return 0;
  191. }
  192. static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
  193. struct amdgpu_mc *mc)
  194. {
  195. if (mc->mc_vram_size > 0xFFC0000000ULL) {
  196. dev_warn(adev->dev, "limiting VRAM\n");
  197. mc->real_vram_size = 0xFFC0000000ULL;
  198. mc->mc_vram_size = 0xFFC0000000ULL;
  199. }
  200. amdgpu_vram_location(adev, &adev->mc, 0);
  201. adev->mc.gtt_base_align = 0;
  202. amdgpu_gtt_location(adev, mc);
  203. }
  204. static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
  205. {
  206. struct amdgpu_mode_mc_save save;
  207. u32 tmp;
  208. int i, j;
  209. /* Initialize HDP */
  210. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  211. WREG32((0xb05 + j), 0x00000000);
  212. WREG32((0xb06 + j), 0x00000000);
  213. WREG32((0xb07 + j), 0x00000000);
  214. WREG32((0xb08 + j), 0x00000000);
  215. WREG32((0xb09 + j), 0x00000000);
  216. }
  217. WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
  218. if (adev->mode_info.num_crtc)
  219. amdgpu_display_set_vga_render_state(adev, false);
  220. gmc_v6_0_mc_stop(adev, &save);
  221. if (gmc_v6_0_wait_for_idle((void *)adev)) {
  222. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  223. }
  224. WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
  225. /* Update configuration */
  226. WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  227. adev->mc.vram_start >> 12);
  228. WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  229. adev->mc.vram_end >> 12);
  230. WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  231. adev->vram_scratch.gpu_addr >> 12);
  232. tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
  233. tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
  234. WREG32(mmMC_VM_FB_LOCATION, tmp);
  235. /* XXX double check these! */
  236. WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
  237. WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  238. WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  239. WREG32(mmMC_VM_AGP_BASE, 0);
  240. WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
  241. WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
  242. if (gmc_v6_0_wait_for_idle((void *)adev)) {
  243. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  244. }
  245. gmc_v6_0_mc_resume(adev, &save);
  246. }
  247. static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
  248. {
  249. u32 tmp;
  250. int chansize, numchan;
  251. tmp = RREG32(mmMC_ARB_RAMCFG);
  252. if (tmp & (1 << 11)) {
  253. chansize = 16;
  254. } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
  255. chansize = 64;
  256. } else {
  257. chansize = 32;
  258. }
  259. tmp = RREG32(mmMC_SHARED_CHMAP);
  260. switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
  261. case 0:
  262. default:
  263. numchan = 1;
  264. break;
  265. case 1:
  266. numchan = 2;
  267. break;
  268. case 2:
  269. numchan = 4;
  270. break;
  271. case 3:
  272. numchan = 8;
  273. break;
  274. case 4:
  275. numchan = 3;
  276. break;
  277. case 5:
  278. numchan = 6;
  279. break;
  280. case 6:
  281. numchan = 10;
  282. break;
  283. case 7:
  284. numchan = 12;
  285. break;
  286. case 8:
  287. numchan = 16;
  288. break;
  289. }
  290. adev->mc.vram_width = numchan * chansize;
  291. /* Could aper size report 0 ? */
  292. adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
  293. adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
  294. /* size in MB on si */
  295. adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  296. adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  297. adev->mc.visible_vram_size = adev->mc.aper_size;
  298. /* unless the user had overridden it, set the gart
  299. * size equal to the 1024 or vram, whichever is larger.
  300. */
  301. if (amdgpu_gart_size == -1)
  302. adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
  303. else
  304. adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
  305. gmc_v6_0_vram_gtt_location(adev, &adev->mc);
  306. return 0;
  307. }
  308. static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  309. uint32_t vmid)
  310. {
  311. WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  312. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  313. }
  314. static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
  315. void *cpu_pt_addr,
  316. uint32_t gpu_page_idx,
  317. uint64_t addr,
  318. uint64_t flags)
  319. {
  320. void __iomem *ptr = (void *)cpu_pt_addr;
  321. uint64_t value;
  322. value = addr & 0xFFFFFFFFFFFFF000ULL;
  323. value |= flags;
  324. writeq(value, ptr + (gpu_page_idx * 8));
  325. return 0;
  326. }
  327. static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
  328. uint32_t flags)
  329. {
  330. uint64_t pte_flag = 0;
  331. if (flags & AMDGPU_VM_PAGE_READABLE)
  332. pte_flag |= AMDGPU_PTE_READABLE;
  333. if (flags & AMDGPU_VM_PAGE_WRITEABLE)
  334. pte_flag |= AMDGPU_PTE_WRITEABLE;
  335. if (flags & AMDGPU_VM_PAGE_PRT)
  336. pte_flag |= AMDGPU_PTE_PRT;
  337. return pte_flag;
  338. }
  339. static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
  340. bool value)
  341. {
  342. u32 tmp;
  343. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  344. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  345. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  346. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  347. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  348. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  349. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  350. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  351. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  352. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  353. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  354. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  355. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  356. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  357. }
  358. /**
  359. + * gmc_v8_0_set_prt - set PRT VM fault
  360. + *
  361. + * @adev: amdgpu_device pointer
  362. + * @enable: enable/disable VM fault handling for PRT
  363. +*/
  364. static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
  365. {
  366. u32 tmp;
  367. if (enable && !adev->mc.prt_warning) {
  368. dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
  369. adev->mc.prt_warning = true;
  370. }
  371. tmp = RREG32(mmVM_PRT_CNTL);
  372. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  373. CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
  374. enable);
  375. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  376. TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
  377. enable);
  378. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  379. L2_CACHE_STORE_INVALID_ENTRIES,
  380. enable);
  381. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  382. L1_TLB_STORE_INVALID_ENTRIES,
  383. enable);
  384. WREG32(mmVM_PRT_CNTL, tmp);
  385. if (enable) {
  386. uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
  387. uint32_t high = adev->vm_manager.max_pfn;
  388. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
  389. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
  390. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
  391. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
  392. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
  393. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
  394. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
  395. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
  396. } else {
  397. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
  398. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
  399. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
  400. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
  401. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
  402. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
  403. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
  404. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
  405. }
  406. }
  407. static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
  408. {
  409. int r, i;
  410. if (adev->gart.robj == NULL) {
  411. dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
  412. return -EINVAL;
  413. }
  414. r = amdgpu_gart_table_vram_pin(adev);
  415. if (r)
  416. return r;
  417. /* Setup TLB control */
  418. WREG32(mmMC_VM_MX_L1_TLB_CNTL,
  419. (0xA << 7) |
  420. MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
  421. MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
  422. MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
  423. MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
  424. (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
  425. /* Setup L2 cache */
  426. WREG32(mmVM_L2_CNTL,
  427. VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
  428. VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
  429. VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  430. VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  431. (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
  432. (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
  433. WREG32(mmVM_L2_CNTL2,
  434. VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
  435. VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
  436. WREG32(mmVM_L2_CNTL3,
  437. VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
  438. (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
  439. (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
  440. /* setup context0 */
  441. WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
  442. WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
  443. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
  444. WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  445. (u32)(adev->dummy_page.addr >> 12));
  446. WREG32(mmVM_CONTEXT0_CNTL2, 0);
  447. WREG32(mmVM_CONTEXT0_CNTL,
  448. VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
  449. (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
  450. VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
  451. WREG32(0x575, 0);
  452. WREG32(0x576, 0);
  453. WREG32(0x577, 0);
  454. /* empty context1-15 */
  455. /* set vm size, must be a multiple of 4 */
  456. WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  457. WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
  458. /* Assign the pt base to something valid for now; the pts used for
  459. * the VMs are determined by the application and setup and assigned
  460. * on the fly in the vm part of radeon_gart.c
  461. */
  462. for (i = 1; i < 16; i++) {
  463. if (i < 8)
  464. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
  465. adev->gart.table_addr >> 12);
  466. else
  467. WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
  468. adev->gart.table_addr >> 12);
  469. }
  470. /* enable context1-15 */
  471. WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  472. (u32)(adev->dummy_page.addr >> 12));
  473. WREG32(mmVM_CONTEXT1_CNTL2, 4);
  474. WREG32(mmVM_CONTEXT1_CNTL,
  475. VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
  476. (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
  477. ((adev->vm_manager.block_size - 9)
  478. << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
  479. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
  480. gmc_v6_0_set_fault_enable_default(adev, false);
  481. else
  482. gmc_v6_0_set_fault_enable_default(adev, true);
  483. gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
  484. dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
  485. (unsigned)(adev->mc.gtt_size >> 20),
  486. (unsigned long long)adev->gart.table_addr);
  487. adev->gart.ready = true;
  488. return 0;
  489. }
  490. static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
  491. {
  492. int r;
  493. if (adev->gart.robj) {
  494. dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
  495. return 0;
  496. }
  497. r = amdgpu_gart_init(adev);
  498. if (r)
  499. return r;
  500. adev->gart.table_size = adev->gart.num_gpu_pages * 8;
  501. adev->gart.gart_pte_flags = 0;
  502. return amdgpu_gart_table_vram_alloc(adev);
  503. }
  504. static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
  505. {
  506. /*unsigned i;
  507. for (i = 1; i < 16; ++i) {
  508. uint32_t reg;
  509. if (i < 8)
  510. reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
  511. else
  512. reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
  513. adev->vm_manager.saved_table_addr[i] = RREG32(reg);
  514. }*/
  515. /* Disable all tables */
  516. WREG32(mmVM_CONTEXT0_CNTL, 0);
  517. WREG32(mmVM_CONTEXT1_CNTL, 0);
  518. /* Setup TLB control */
  519. WREG32(mmMC_VM_MX_L1_TLB_CNTL,
  520. MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
  521. (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
  522. /* Setup L2 cache */
  523. WREG32(mmVM_L2_CNTL,
  524. VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  525. VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  526. (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
  527. (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
  528. WREG32(mmVM_L2_CNTL2, 0);
  529. WREG32(mmVM_L2_CNTL3,
  530. VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
  531. (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
  532. amdgpu_gart_table_vram_unpin(adev);
  533. }
  534. static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
  535. {
  536. amdgpu_gart_table_vram_free(adev);
  537. amdgpu_gart_fini(adev);
  538. }
  539. static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
  540. {
  541. /*
  542. * number of VMs
  543. * VMID 0 is reserved for System
  544. * amdgpu graphics/compute will use VMIDs 1-7
  545. * amdkfd will use VMIDs 8-15
  546. */
  547. adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
  548. adev->vm_manager.num_level = 1;
  549. amdgpu_vm_manager_init(adev);
  550. /* base offset of vram pages */
  551. if (adev->flags & AMD_IS_APU) {
  552. u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
  553. tmp <<= 22;
  554. adev->vm_manager.vram_base_offset = tmp;
  555. } else
  556. adev->vm_manager.vram_base_offset = 0;
  557. return 0;
  558. }
  559. static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
  560. {
  561. }
  562. static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
  563. u32 status, u32 addr, u32 mc_client)
  564. {
  565. u32 mc_id;
  566. u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
  567. u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  568. PROTECTIONS);
  569. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  570. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  571. mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  572. MEMORY_CLIENT_ID);
  573. dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  574. protections, vmid, addr,
  575. REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  576. MEMORY_CLIENT_RW) ?
  577. "write" : "read", block, mc_client, mc_id);
  578. }
  579. /*
  580. static const u32 mc_cg_registers[] = {
  581. MC_HUB_MISC_HUB_CG,
  582. MC_HUB_MISC_SIP_CG,
  583. MC_HUB_MISC_VM_CG,
  584. MC_XPB_CLK_GAT,
  585. ATC_MISC_CG,
  586. MC_CITF_MISC_WR_CG,
  587. MC_CITF_MISC_RD_CG,
  588. MC_CITF_MISC_VM_CG,
  589. VM_L2_CG,
  590. };
  591. static const u32 mc_cg_ls_en[] = {
  592. MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
  593. MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
  594. MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  595. MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
  596. ATC_MISC_CG__MEM_LS_ENABLE_MASK,
  597. MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
  598. MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
  599. MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  600. VM_L2_CG__MEM_LS_ENABLE_MASK,
  601. };
  602. static const u32 mc_cg_en[] = {
  603. MC_HUB_MISC_HUB_CG__ENABLE_MASK,
  604. MC_HUB_MISC_SIP_CG__ENABLE_MASK,
  605. MC_HUB_MISC_VM_CG__ENABLE_MASK,
  606. MC_XPB_CLK_GAT__ENABLE_MASK,
  607. ATC_MISC_CG__ENABLE_MASK,
  608. MC_CITF_MISC_WR_CG__ENABLE_MASK,
  609. MC_CITF_MISC_RD_CG__ENABLE_MASK,
  610. MC_CITF_MISC_VM_CG__ENABLE_MASK,
  611. VM_L2_CG__ENABLE_MASK,
  612. };
  613. static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
  614. bool enable)
  615. {
  616. int i;
  617. u32 orig, data;
  618. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  619. orig = data = RREG32(mc_cg_registers[i]);
  620. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
  621. data |= mc_cg_ls_en[i];
  622. else
  623. data &= ~mc_cg_ls_en[i];
  624. if (data != orig)
  625. WREG32(mc_cg_registers[i], data);
  626. }
  627. }
  628. static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
  629. bool enable)
  630. {
  631. int i;
  632. u32 orig, data;
  633. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  634. orig = data = RREG32(mc_cg_registers[i]);
  635. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
  636. data |= mc_cg_en[i];
  637. else
  638. data &= ~mc_cg_en[i];
  639. if (data != orig)
  640. WREG32(mc_cg_registers[i], data);
  641. }
  642. }
  643. static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
  644. bool enable)
  645. {
  646. u32 orig, data;
  647. orig = data = RREG32_PCIE(ixPCIE_CNTL2);
  648. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
  649. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
  650. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
  651. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
  652. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
  653. } else {
  654. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
  655. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
  656. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
  657. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
  658. }
  659. if (orig != data)
  660. WREG32_PCIE(ixPCIE_CNTL2, data);
  661. }
  662. static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
  663. bool enable)
  664. {
  665. u32 orig, data;
  666. orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
  667. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
  668. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
  669. else
  670. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
  671. if (orig != data)
  672. WREG32(mmHDP_HOST_PATH_CNTL, data);
  673. }
  674. static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
  675. bool enable)
  676. {
  677. u32 orig, data;
  678. orig = data = RREG32(mmHDP_MEM_POWER_LS);
  679. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
  680. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
  681. else
  682. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
  683. if (orig != data)
  684. WREG32(mmHDP_MEM_POWER_LS, data);
  685. }
  686. */
  687. static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
  688. {
  689. switch (mc_seq_vram_type) {
  690. case MC_SEQ_MISC0__MT__GDDR1:
  691. return AMDGPU_VRAM_TYPE_GDDR1;
  692. case MC_SEQ_MISC0__MT__DDR2:
  693. return AMDGPU_VRAM_TYPE_DDR2;
  694. case MC_SEQ_MISC0__MT__GDDR3:
  695. return AMDGPU_VRAM_TYPE_GDDR3;
  696. case MC_SEQ_MISC0__MT__GDDR4:
  697. return AMDGPU_VRAM_TYPE_GDDR4;
  698. case MC_SEQ_MISC0__MT__GDDR5:
  699. return AMDGPU_VRAM_TYPE_GDDR5;
  700. case MC_SEQ_MISC0__MT__DDR3:
  701. return AMDGPU_VRAM_TYPE_DDR3;
  702. default:
  703. return AMDGPU_VRAM_TYPE_UNKNOWN;
  704. }
  705. }
  706. static int gmc_v6_0_early_init(void *handle)
  707. {
  708. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  709. gmc_v6_0_set_gart_funcs(adev);
  710. gmc_v6_0_set_irq_funcs(adev);
  711. if (adev->flags & AMD_IS_APU) {
  712. adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
  713. } else {
  714. u32 tmp = RREG32(mmMC_SEQ_MISC0);
  715. tmp &= MC_SEQ_MISC0__MT__MASK;
  716. adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
  717. }
  718. return 0;
  719. }
  720. static int gmc_v6_0_late_init(void *handle)
  721. {
  722. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  723. if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
  724. return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
  725. else
  726. return 0;
  727. }
  728. static int gmc_v6_0_sw_init(void *handle)
  729. {
  730. int r;
  731. int dma_bits;
  732. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  733. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
  734. if (r)
  735. return r;
  736. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
  737. if (r)
  738. return r;
  739. amdgpu_vm_adjust_size(adev, 64);
  740. adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
  741. adev->mc.mc_mask = 0xffffffffffULL;
  742. adev->need_dma32 = false;
  743. dma_bits = adev->need_dma32 ? 32 : 40;
  744. r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  745. if (r) {
  746. adev->need_dma32 = true;
  747. dma_bits = 32;
  748. dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
  749. }
  750. r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  751. if (r) {
  752. pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
  753. dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
  754. }
  755. r = gmc_v6_0_init_microcode(adev);
  756. if (r) {
  757. dev_err(adev->dev, "Failed to load mc firmware!\n");
  758. return r;
  759. }
  760. r = gmc_v6_0_mc_init(adev);
  761. if (r)
  762. return r;
  763. r = amdgpu_bo_init(adev);
  764. if (r)
  765. return r;
  766. r = gmc_v6_0_gart_init(adev);
  767. if (r)
  768. return r;
  769. if (!adev->vm_manager.enabled) {
  770. r = gmc_v6_0_vm_init(adev);
  771. if (r) {
  772. dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
  773. return r;
  774. }
  775. adev->vm_manager.enabled = true;
  776. }
  777. return r;
  778. }
  779. static int gmc_v6_0_sw_fini(void *handle)
  780. {
  781. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  782. if (adev->vm_manager.enabled) {
  783. gmc_v6_0_vm_fini(adev);
  784. adev->vm_manager.enabled = false;
  785. }
  786. gmc_v6_0_gart_fini(adev);
  787. amdgpu_gem_force_release(adev);
  788. amdgpu_bo_fini(adev);
  789. return 0;
  790. }
  791. static int gmc_v6_0_hw_init(void *handle)
  792. {
  793. int r;
  794. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  795. gmc_v6_0_mc_program(adev);
  796. if (!(adev->flags & AMD_IS_APU)) {
  797. r = gmc_v6_0_mc_load_microcode(adev);
  798. if (r) {
  799. dev_err(adev->dev, "Failed to load MC firmware!\n");
  800. return r;
  801. }
  802. }
  803. r = gmc_v6_0_gart_enable(adev);
  804. if (r)
  805. return r;
  806. return r;
  807. }
  808. static int gmc_v6_0_hw_fini(void *handle)
  809. {
  810. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  811. amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
  812. gmc_v6_0_gart_disable(adev);
  813. return 0;
  814. }
  815. static int gmc_v6_0_suspend(void *handle)
  816. {
  817. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  818. if (adev->vm_manager.enabled) {
  819. gmc_v6_0_vm_fini(adev);
  820. adev->vm_manager.enabled = false;
  821. }
  822. gmc_v6_0_hw_fini(adev);
  823. return 0;
  824. }
  825. static int gmc_v6_0_resume(void *handle)
  826. {
  827. int r;
  828. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  829. r = gmc_v6_0_hw_init(adev);
  830. if (r)
  831. return r;
  832. if (!adev->vm_manager.enabled) {
  833. r = gmc_v6_0_vm_init(adev);
  834. if (r) {
  835. dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
  836. return r;
  837. }
  838. adev->vm_manager.enabled = true;
  839. }
  840. return r;
  841. }
  842. static bool gmc_v6_0_is_idle(void *handle)
  843. {
  844. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  845. u32 tmp = RREG32(mmSRBM_STATUS);
  846. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  847. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
  848. return false;
  849. return true;
  850. }
  851. static int gmc_v6_0_wait_for_idle(void *handle)
  852. {
  853. unsigned i;
  854. u32 tmp;
  855. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  856. for (i = 0; i < adev->usec_timeout; i++) {
  857. tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
  858. SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  859. SRBM_STATUS__MCC_BUSY_MASK |
  860. SRBM_STATUS__MCD_BUSY_MASK |
  861. SRBM_STATUS__VMC_BUSY_MASK);
  862. if (!tmp)
  863. return 0;
  864. udelay(1);
  865. }
  866. return -ETIMEDOUT;
  867. }
  868. static int gmc_v6_0_soft_reset(void *handle)
  869. {
  870. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  871. struct amdgpu_mode_mc_save save;
  872. u32 srbm_soft_reset = 0;
  873. u32 tmp = RREG32(mmSRBM_STATUS);
  874. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  875. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  876. SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
  877. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  878. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
  879. if (!(adev->flags & AMD_IS_APU))
  880. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  881. SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
  882. }
  883. if (srbm_soft_reset) {
  884. gmc_v6_0_mc_stop(adev, &save);
  885. if (gmc_v6_0_wait_for_idle(adev)) {
  886. dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
  887. }
  888. tmp = RREG32(mmSRBM_SOFT_RESET);
  889. tmp |= srbm_soft_reset;
  890. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  891. WREG32(mmSRBM_SOFT_RESET, tmp);
  892. tmp = RREG32(mmSRBM_SOFT_RESET);
  893. udelay(50);
  894. tmp &= ~srbm_soft_reset;
  895. WREG32(mmSRBM_SOFT_RESET, tmp);
  896. tmp = RREG32(mmSRBM_SOFT_RESET);
  897. udelay(50);
  898. gmc_v6_0_mc_resume(adev, &save);
  899. udelay(50);
  900. }
  901. return 0;
  902. }
  903. static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  904. struct amdgpu_irq_src *src,
  905. unsigned type,
  906. enum amdgpu_interrupt_state state)
  907. {
  908. u32 tmp;
  909. u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  910. VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  911. VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  912. VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  913. VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  914. VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
  915. switch (state) {
  916. case AMDGPU_IRQ_STATE_DISABLE:
  917. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  918. tmp &= ~bits;
  919. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  920. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  921. tmp &= ~bits;
  922. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  923. break;
  924. case AMDGPU_IRQ_STATE_ENABLE:
  925. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  926. tmp |= bits;
  927. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  928. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  929. tmp |= bits;
  930. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  931. break;
  932. default:
  933. break;
  934. }
  935. return 0;
  936. }
  937. static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
  938. struct amdgpu_irq_src *source,
  939. struct amdgpu_iv_entry *entry)
  940. {
  941. u32 addr, status;
  942. addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
  943. status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
  944. WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
  945. if (!addr && !status)
  946. return 0;
  947. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
  948. gmc_v6_0_set_fault_enable_default(adev, false);
  949. if (printk_ratelimit()) {
  950. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  951. entry->src_id, entry->src_data[0]);
  952. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  953. addr);
  954. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  955. status);
  956. gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
  957. }
  958. return 0;
  959. }
  960. static int gmc_v6_0_set_clockgating_state(void *handle,
  961. enum amd_clockgating_state state)
  962. {
  963. return 0;
  964. }
  965. static int gmc_v6_0_set_powergating_state(void *handle,
  966. enum amd_powergating_state state)
  967. {
  968. return 0;
  969. }
  970. static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
  971. .name = "gmc_v6_0",
  972. .early_init = gmc_v6_0_early_init,
  973. .late_init = gmc_v6_0_late_init,
  974. .sw_init = gmc_v6_0_sw_init,
  975. .sw_fini = gmc_v6_0_sw_fini,
  976. .hw_init = gmc_v6_0_hw_init,
  977. .hw_fini = gmc_v6_0_hw_fini,
  978. .suspend = gmc_v6_0_suspend,
  979. .resume = gmc_v6_0_resume,
  980. .is_idle = gmc_v6_0_is_idle,
  981. .wait_for_idle = gmc_v6_0_wait_for_idle,
  982. .soft_reset = gmc_v6_0_soft_reset,
  983. .set_clockgating_state = gmc_v6_0_set_clockgating_state,
  984. .set_powergating_state = gmc_v6_0_set_powergating_state,
  985. };
  986. static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
  987. .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
  988. .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
  989. .set_prt = gmc_v6_0_set_prt,
  990. .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
  991. };
  992. static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
  993. .set = gmc_v6_0_vm_fault_interrupt_state,
  994. .process = gmc_v6_0_process_interrupt,
  995. };
  996. static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
  997. {
  998. if (adev->gart.gart_funcs == NULL)
  999. adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
  1000. }
  1001. static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
  1002. {
  1003. adev->mc.vm_fault.num_types = 1;
  1004. adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
  1005. }
  1006. const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
  1007. {
  1008. .type = AMD_IP_BLOCK_TYPE_GMC,
  1009. .major = 6,
  1010. .minor = 0,
  1011. .rev = 0,
  1012. .funcs = &gmc_v6_0_ip_funcs,
  1013. };