gmc_v6_0.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. #include "gmc_v6_0.h"
  27. #include "amdgpu_ucode.h"
  28. #include "bif/bif_3_0_d.h"
  29. #include "bif/bif_3_0_sh_mask.h"
  30. #include "oss/oss_1_0_d.h"
  31. #include "oss/oss_1_0_sh_mask.h"
  32. #include "gmc/gmc_6_0_d.h"
  33. #include "gmc/gmc_6_0_sh_mask.h"
  34. #include "dce/dce_6_0_d.h"
  35. #include "dce/dce_6_0_sh_mask.h"
  36. #include "si_enums.h"
  37. static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
  38. static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  39. static int gmc_v6_0_wait_for_idle(void *handle);
  40. MODULE_FIRMWARE("radeon/tahiti_mc.bin");
  41. MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
  42. MODULE_FIRMWARE("radeon/verde_mc.bin");
  43. MODULE_FIRMWARE("radeon/oland_mc.bin");
  44. MODULE_FIRMWARE("radeon/si58_mc.bin");
  45. #define MC_SEQ_MISC0__MT__MASK 0xf0000000
  46. #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
  47. #define MC_SEQ_MISC0__MT__DDR2 0x20000000
  48. #define MC_SEQ_MISC0__MT__GDDR3 0x30000000
  49. #define MC_SEQ_MISC0__MT__GDDR4 0x40000000
  50. #define MC_SEQ_MISC0__MT__GDDR5 0x50000000
  51. #define MC_SEQ_MISC0__MT__HBM 0x60000000
  52. #define MC_SEQ_MISC0__MT__DDR3 0xB0000000
  53. static const u32 crtc_offsets[6] =
  54. {
  55. SI_CRTC0_REGISTER_OFFSET,
  56. SI_CRTC1_REGISTER_OFFSET,
  57. SI_CRTC2_REGISTER_OFFSET,
  58. SI_CRTC3_REGISTER_OFFSET,
  59. SI_CRTC4_REGISTER_OFFSET,
  60. SI_CRTC5_REGISTER_OFFSET
  61. };
  62. static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
  63. {
  64. u32 blackout;
  65. gmc_v6_0_wait_for_idle((void *)adev);
  66. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  67. if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  68. /* Block CPU access */
  69. WREG32(mmBIF_FB_EN, 0);
  70. /* blackout the MC */
  71. blackout = REG_SET_FIELD(blackout,
  72. MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  73. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  74. }
  75. /* wait for the MC to settle */
  76. udelay(100);
  77. }
  78. static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
  79. {
  80. u32 tmp;
  81. /* unblackout the MC */
  82. tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  83. tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  84. WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  85. /* allow CPU access */
  86. tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  87. tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  88. WREG32(mmBIF_FB_EN, tmp);
  89. }
  90. static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
  91. {
  92. const char *chip_name;
  93. char fw_name[30];
  94. int err;
  95. bool is_58_fw = false;
  96. DRM_DEBUG("\n");
  97. switch (adev->asic_type) {
  98. case CHIP_TAHITI:
  99. chip_name = "tahiti";
  100. break;
  101. case CHIP_PITCAIRN:
  102. chip_name = "pitcairn";
  103. break;
  104. case CHIP_VERDE:
  105. chip_name = "verde";
  106. break;
  107. case CHIP_OLAND:
  108. chip_name = "oland";
  109. break;
  110. case CHIP_HAINAN:
  111. chip_name = "hainan";
  112. break;
  113. default: BUG();
  114. }
  115. /* this memory configuration requires special firmware */
  116. if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
  117. is_58_fw = true;
  118. if (is_58_fw)
  119. snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
  120. else
  121. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  122. err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
  123. if (err)
  124. goto out;
  125. err = amdgpu_ucode_validate(adev->mc.fw);
  126. out:
  127. if (err) {
  128. dev_err(adev->dev,
  129. "si_mc: Failed to load firmware \"%s\"\n",
  130. fw_name);
  131. release_firmware(adev->mc.fw);
  132. adev->mc.fw = NULL;
  133. }
  134. return err;
  135. }
  136. static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
  137. {
  138. const __le32 *new_fw_data = NULL;
  139. u32 running;
  140. const __le32 *new_io_mc_regs = NULL;
  141. int i, regs_size, ucode_size;
  142. const struct mc_firmware_header_v1_0 *hdr;
  143. if (!adev->mc.fw)
  144. return -EINVAL;
  145. hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
  146. amdgpu_ucode_print_mc_hdr(&hdr->header);
  147. adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  148. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  149. new_io_mc_regs = (const __le32 *)
  150. (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  151. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  152. new_fw_data = (const __le32 *)
  153. (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  154. running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
  155. if (running == 0) {
  156. /* reset the engine and set to writable */
  157. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  158. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  159. /* load mc io regs */
  160. for (i = 0; i < regs_size; i++) {
  161. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
  162. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
  163. }
  164. /* load the MC ucode */
  165. for (i = 0; i < ucode_size; i++) {
  166. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
  167. }
  168. /* put the engine back into the active state */
  169. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  170. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  171. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  172. /* wait for training to complete */
  173. for (i = 0; i < adev->usec_timeout; i++) {
  174. if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
  175. break;
  176. udelay(1);
  177. }
  178. for (i = 0; i < adev->usec_timeout; i++) {
  179. if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
  180. break;
  181. udelay(1);
  182. }
  183. }
  184. return 0;
  185. }
  186. static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
  187. struct amdgpu_mc *mc)
  188. {
  189. u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
  190. base <<= 24;
  191. if (mc->mc_vram_size > 0xFFC0000000ULL) {
  192. dev_warn(adev->dev, "limiting VRAM\n");
  193. mc->real_vram_size = 0xFFC0000000ULL;
  194. mc->mc_vram_size = 0xFFC0000000ULL;
  195. }
  196. amdgpu_vram_location(adev, &adev->mc, base);
  197. amdgpu_gart_location(adev, mc);
  198. }
  199. static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
  200. {
  201. int i, j;
  202. /* Initialize HDP */
  203. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  204. WREG32((0xb05 + j), 0x00000000);
  205. WREG32((0xb06 + j), 0x00000000);
  206. WREG32((0xb07 + j), 0x00000000);
  207. WREG32((0xb08 + j), 0x00000000);
  208. WREG32((0xb09 + j), 0x00000000);
  209. }
  210. WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
  211. if (gmc_v6_0_wait_for_idle((void *)adev)) {
  212. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  213. }
  214. if (adev->mode_info.num_crtc) {
  215. u32 tmp;
  216. /* Lockout access through VGA aperture*/
  217. tmp = RREG32(mmVGA_HDP_CONTROL);
  218. tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
  219. WREG32(mmVGA_HDP_CONTROL, tmp);
  220. /* disable VGA render */
  221. tmp = RREG32(mmVGA_RENDER_CONTROL);
  222. tmp &= ~VGA_VSTATUS_CNTL;
  223. WREG32(mmVGA_RENDER_CONTROL, tmp);
  224. }
  225. /* Update configuration */
  226. WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  227. adev->mc.vram_start >> 12);
  228. WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  229. adev->mc.vram_end >> 12);
  230. WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  231. adev->vram_scratch.gpu_addr >> 12);
  232. WREG32(mmMC_VM_AGP_BASE, 0);
  233. WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
  234. WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
  235. if (gmc_v6_0_wait_for_idle((void *)adev)) {
  236. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  237. }
  238. }
  239. static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
  240. {
  241. u32 tmp;
  242. int chansize, numchan;
  243. tmp = RREG32(mmMC_ARB_RAMCFG);
  244. if (tmp & (1 << 11)) {
  245. chansize = 16;
  246. } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
  247. chansize = 64;
  248. } else {
  249. chansize = 32;
  250. }
  251. tmp = RREG32(mmMC_SHARED_CHMAP);
  252. switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
  253. case 0:
  254. default:
  255. numchan = 1;
  256. break;
  257. case 1:
  258. numchan = 2;
  259. break;
  260. case 2:
  261. numchan = 4;
  262. break;
  263. case 3:
  264. numchan = 8;
  265. break;
  266. case 4:
  267. numchan = 3;
  268. break;
  269. case 5:
  270. numchan = 6;
  271. break;
  272. case 6:
  273. numchan = 10;
  274. break;
  275. case 7:
  276. numchan = 12;
  277. break;
  278. case 8:
  279. numchan = 16;
  280. break;
  281. }
  282. adev->mc.vram_width = numchan * chansize;
  283. /* Could aper size report 0 ? */
  284. adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
  285. adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
  286. /* size in MB on si */
  287. adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  288. adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  289. adev->mc.visible_vram_size = adev->mc.aper_size;
  290. amdgpu_gart_set_defaults(adev);
  291. gmc_v6_0_vram_gtt_location(adev, &adev->mc);
  292. return 0;
  293. }
  294. static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  295. uint32_t vmid)
  296. {
  297. WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  298. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  299. }
  300. static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
  301. void *cpu_pt_addr,
  302. uint32_t gpu_page_idx,
  303. uint64_t addr,
  304. uint64_t flags)
  305. {
  306. void __iomem *ptr = (void *)cpu_pt_addr;
  307. uint64_t value;
  308. value = addr & 0xFFFFFFFFFFFFF000ULL;
  309. value |= flags;
  310. writeq(value, ptr + (gpu_page_idx * 8));
  311. return 0;
  312. }
  313. static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
  314. uint32_t flags)
  315. {
  316. uint64_t pte_flag = 0;
  317. if (flags & AMDGPU_VM_PAGE_READABLE)
  318. pte_flag |= AMDGPU_PTE_READABLE;
  319. if (flags & AMDGPU_VM_PAGE_WRITEABLE)
  320. pte_flag |= AMDGPU_PTE_WRITEABLE;
  321. if (flags & AMDGPU_VM_PAGE_PRT)
  322. pte_flag |= AMDGPU_PTE_PRT;
  323. return pte_flag;
  324. }
  325. static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
  326. {
  327. BUG_ON(addr & 0xFFFFFF0000000FFFULL);
  328. return addr;
  329. }
  330. static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
  331. bool value)
  332. {
  333. u32 tmp;
  334. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  335. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  336. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  337. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  338. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  339. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  340. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  341. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  342. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  343. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  344. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  345. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  346. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  347. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  348. }
  349. /**
  350. + * gmc_v8_0_set_prt - set PRT VM fault
  351. + *
  352. + * @adev: amdgpu_device pointer
  353. + * @enable: enable/disable VM fault handling for PRT
  354. +*/
  355. static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
  356. {
  357. u32 tmp;
  358. if (enable && !adev->mc.prt_warning) {
  359. dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
  360. adev->mc.prt_warning = true;
  361. }
  362. tmp = RREG32(mmVM_PRT_CNTL);
  363. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  364. CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
  365. enable);
  366. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  367. TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
  368. enable);
  369. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  370. L2_CACHE_STORE_INVALID_ENTRIES,
  371. enable);
  372. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  373. L1_TLB_STORE_INVALID_ENTRIES,
  374. enable);
  375. WREG32(mmVM_PRT_CNTL, tmp);
  376. if (enable) {
  377. uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
  378. uint32_t high = adev->vm_manager.max_pfn;
  379. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
  380. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
  381. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
  382. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
  383. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
  384. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
  385. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
  386. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
  387. } else {
  388. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
  389. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
  390. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
  391. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
  392. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
  393. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
  394. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
  395. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
  396. }
  397. }
  398. static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
  399. {
  400. int r, i;
  401. if (adev->gart.robj == NULL) {
  402. dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
  403. return -EINVAL;
  404. }
  405. r = amdgpu_gart_table_vram_pin(adev);
  406. if (r)
  407. return r;
  408. /* Setup TLB control */
  409. WREG32(mmMC_VM_MX_L1_TLB_CNTL,
  410. (0xA << 7) |
  411. MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
  412. MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
  413. MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
  414. MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
  415. (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
  416. /* Setup L2 cache */
  417. WREG32(mmVM_L2_CNTL,
  418. VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
  419. VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
  420. VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  421. VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  422. (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
  423. (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
  424. WREG32(mmVM_L2_CNTL2,
  425. VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
  426. VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
  427. WREG32(mmVM_L2_CNTL3,
  428. VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
  429. (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
  430. (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
  431. /* setup context0 */
  432. WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
  433. WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
  434. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
  435. WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  436. (u32)(adev->dummy_page.addr >> 12));
  437. WREG32(mmVM_CONTEXT0_CNTL2, 0);
  438. WREG32(mmVM_CONTEXT0_CNTL,
  439. VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
  440. (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
  441. VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
  442. WREG32(0x575, 0);
  443. WREG32(0x576, 0);
  444. WREG32(0x577, 0);
  445. /* empty context1-15 */
  446. /* set vm size, must be a multiple of 4 */
  447. WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  448. WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
  449. /* Assign the pt base to something valid for now; the pts used for
  450. * the VMs are determined by the application and setup and assigned
  451. * on the fly in the vm part of radeon_gart.c
  452. */
  453. for (i = 1; i < 16; i++) {
  454. if (i < 8)
  455. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
  456. adev->gart.table_addr >> 12);
  457. else
  458. WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
  459. adev->gart.table_addr >> 12);
  460. }
  461. /* enable context1-15 */
  462. WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  463. (u32)(adev->dummy_page.addr >> 12));
  464. WREG32(mmVM_CONTEXT1_CNTL2, 4);
  465. WREG32(mmVM_CONTEXT1_CNTL,
  466. VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
  467. (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
  468. ((adev->vm_manager.block_size - 9)
  469. << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
  470. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
  471. gmc_v6_0_set_fault_enable_default(adev, false);
  472. else
  473. gmc_v6_0_set_fault_enable_default(adev, true);
  474. gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
  475. dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
  476. (unsigned)(adev->mc.gart_size >> 20),
  477. (unsigned long long)adev->gart.table_addr);
  478. adev->gart.ready = true;
  479. return 0;
  480. }
  481. static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
  482. {
  483. int r;
  484. if (adev->gart.robj) {
  485. dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
  486. return 0;
  487. }
  488. r = amdgpu_gart_init(adev);
  489. if (r)
  490. return r;
  491. adev->gart.table_size = adev->gart.num_gpu_pages * 8;
  492. adev->gart.gart_pte_flags = 0;
  493. return amdgpu_gart_table_vram_alloc(adev);
  494. }
  495. static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
  496. {
  497. /*unsigned i;
  498. for (i = 1; i < 16; ++i) {
  499. uint32_t reg;
  500. if (i < 8)
  501. reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
  502. else
  503. reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
  504. adev->vm_manager.saved_table_addr[i] = RREG32(reg);
  505. }*/
  506. /* Disable all tables */
  507. WREG32(mmVM_CONTEXT0_CNTL, 0);
  508. WREG32(mmVM_CONTEXT1_CNTL, 0);
  509. /* Setup TLB control */
  510. WREG32(mmMC_VM_MX_L1_TLB_CNTL,
  511. MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
  512. (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
  513. /* Setup L2 cache */
  514. WREG32(mmVM_L2_CNTL,
  515. VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  516. VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  517. (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
  518. (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
  519. WREG32(mmVM_L2_CNTL2, 0);
  520. WREG32(mmVM_L2_CNTL3,
  521. VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
  522. (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
  523. amdgpu_gart_table_vram_unpin(adev);
  524. }
  525. static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
  526. {
  527. amdgpu_gart_table_vram_free(adev);
  528. amdgpu_gart_fini(adev);
  529. }
  530. static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
  531. u32 status, u32 addr, u32 mc_client)
  532. {
  533. u32 mc_id;
  534. u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
  535. u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  536. PROTECTIONS);
  537. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  538. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  539. mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  540. MEMORY_CLIENT_ID);
  541. dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  542. protections, vmid, addr,
  543. REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  544. MEMORY_CLIENT_RW) ?
  545. "write" : "read", block, mc_client, mc_id);
  546. }
  547. /*
  548. static const u32 mc_cg_registers[] = {
  549. MC_HUB_MISC_HUB_CG,
  550. MC_HUB_MISC_SIP_CG,
  551. MC_HUB_MISC_VM_CG,
  552. MC_XPB_CLK_GAT,
  553. ATC_MISC_CG,
  554. MC_CITF_MISC_WR_CG,
  555. MC_CITF_MISC_RD_CG,
  556. MC_CITF_MISC_VM_CG,
  557. VM_L2_CG,
  558. };
  559. static const u32 mc_cg_ls_en[] = {
  560. MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
  561. MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
  562. MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  563. MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
  564. ATC_MISC_CG__MEM_LS_ENABLE_MASK,
  565. MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
  566. MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
  567. MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  568. VM_L2_CG__MEM_LS_ENABLE_MASK,
  569. };
  570. static const u32 mc_cg_en[] = {
  571. MC_HUB_MISC_HUB_CG__ENABLE_MASK,
  572. MC_HUB_MISC_SIP_CG__ENABLE_MASK,
  573. MC_HUB_MISC_VM_CG__ENABLE_MASK,
  574. MC_XPB_CLK_GAT__ENABLE_MASK,
  575. ATC_MISC_CG__ENABLE_MASK,
  576. MC_CITF_MISC_WR_CG__ENABLE_MASK,
  577. MC_CITF_MISC_RD_CG__ENABLE_MASK,
  578. MC_CITF_MISC_VM_CG__ENABLE_MASK,
  579. VM_L2_CG__ENABLE_MASK,
  580. };
  581. static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
  582. bool enable)
  583. {
  584. int i;
  585. u32 orig, data;
  586. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  587. orig = data = RREG32(mc_cg_registers[i]);
  588. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
  589. data |= mc_cg_ls_en[i];
  590. else
  591. data &= ~mc_cg_ls_en[i];
  592. if (data != orig)
  593. WREG32(mc_cg_registers[i], data);
  594. }
  595. }
  596. static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
  597. bool enable)
  598. {
  599. int i;
  600. u32 orig, data;
  601. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  602. orig = data = RREG32(mc_cg_registers[i]);
  603. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
  604. data |= mc_cg_en[i];
  605. else
  606. data &= ~mc_cg_en[i];
  607. if (data != orig)
  608. WREG32(mc_cg_registers[i], data);
  609. }
  610. }
  611. static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
  612. bool enable)
  613. {
  614. u32 orig, data;
  615. orig = data = RREG32_PCIE(ixPCIE_CNTL2);
  616. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
  617. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
  618. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
  619. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
  620. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
  621. } else {
  622. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
  623. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
  624. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
  625. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
  626. }
  627. if (orig != data)
  628. WREG32_PCIE(ixPCIE_CNTL2, data);
  629. }
  630. static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
  631. bool enable)
  632. {
  633. u32 orig, data;
  634. orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
  635. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
  636. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
  637. else
  638. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
  639. if (orig != data)
  640. WREG32(mmHDP_HOST_PATH_CNTL, data);
  641. }
  642. static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
  643. bool enable)
  644. {
  645. u32 orig, data;
  646. orig = data = RREG32(mmHDP_MEM_POWER_LS);
  647. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
  648. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
  649. else
  650. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
  651. if (orig != data)
  652. WREG32(mmHDP_MEM_POWER_LS, data);
  653. }
  654. */
  655. static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
  656. {
  657. switch (mc_seq_vram_type) {
  658. case MC_SEQ_MISC0__MT__GDDR1:
  659. return AMDGPU_VRAM_TYPE_GDDR1;
  660. case MC_SEQ_MISC0__MT__DDR2:
  661. return AMDGPU_VRAM_TYPE_DDR2;
  662. case MC_SEQ_MISC0__MT__GDDR3:
  663. return AMDGPU_VRAM_TYPE_GDDR3;
  664. case MC_SEQ_MISC0__MT__GDDR4:
  665. return AMDGPU_VRAM_TYPE_GDDR4;
  666. case MC_SEQ_MISC0__MT__GDDR5:
  667. return AMDGPU_VRAM_TYPE_GDDR5;
  668. case MC_SEQ_MISC0__MT__DDR3:
  669. return AMDGPU_VRAM_TYPE_DDR3;
  670. default:
  671. return AMDGPU_VRAM_TYPE_UNKNOWN;
  672. }
  673. }
  674. static int gmc_v6_0_early_init(void *handle)
  675. {
  676. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  677. gmc_v6_0_set_gart_funcs(adev);
  678. gmc_v6_0_set_irq_funcs(adev);
  679. return 0;
  680. }
  681. static int gmc_v6_0_late_init(void *handle)
  682. {
  683. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  684. if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
  685. return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
  686. else
  687. return 0;
  688. }
  689. static int gmc_v6_0_sw_init(void *handle)
  690. {
  691. int r;
  692. int dma_bits;
  693. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  694. if (adev->flags & AMD_IS_APU) {
  695. adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
  696. } else {
  697. u32 tmp = RREG32(mmMC_SEQ_MISC0);
  698. tmp &= MC_SEQ_MISC0__MT__MASK;
  699. adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
  700. }
  701. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
  702. if (r)
  703. return r;
  704. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
  705. if (r)
  706. return r;
  707. amdgpu_vm_adjust_size(adev, 64);
  708. adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
  709. adev->mc.mc_mask = 0xffffffffffULL;
  710. adev->mc.stolen_size = 256 * 1024;
  711. adev->need_dma32 = false;
  712. dma_bits = adev->need_dma32 ? 32 : 40;
  713. r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  714. if (r) {
  715. adev->need_dma32 = true;
  716. dma_bits = 32;
  717. dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
  718. }
  719. r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  720. if (r) {
  721. pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
  722. dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
  723. }
  724. r = gmc_v6_0_init_microcode(adev);
  725. if (r) {
  726. dev_err(adev->dev, "Failed to load mc firmware!\n");
  727. return r;
  728. }
  729. r = gmc_v6_0_mc_init(adev);
  730. if (r)
  731. return r;
  732. r = amdgpu_bo_init(adev);
  733. if (r)
  734. return r;
  735. r = gmc_v6_0_gart_init(adev);
  736. if (r)
  737. return r;
  738. /*
  739. * number of VMs
  740. * VMID 0 is reserved for System
  741. * amdgpu graphics/compute will use VMIDs 1-7
  742. * amdkfd will use VMIDs 8-15
  743. */
  744. adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
  745. adev->vm_manager.num_level = 1;
  746. amdgpu_vm_manager_init(adev);
  747. /* base offset of vram pages */
  748. if (adev->flags & AMD_IS_APU) {
  749. u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
  750. tmp <<= 22;
  751. adev->vm_manager.vram_base_offset = tmp;
  752. } else {
  753. adev->vm_manager.vram_base_offset = 0;
  754. }
  755. return 0;
  756. }
  757. static int gmc_v6_0_sw_fini(void *handle)
  758. {
  759. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  760. amdgpu_vm_manager_fini(adev);
  761. gmc_v6_0_gart_fini(adev);
  762. amdgpu_gem_force_release(adev);
  763. amdgpu_bo_fini(adev);
  764. return 0;
  765. }
  766. static int gmc_v6_0_hw_init(void *handle)
  767. {
  768. int r;
  769. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  770. gmc_v6_0_mc_program(adev);
  771. if (!(adev->flags & AMD_IS_APU)) {
  772. r = gmc_v6_0_mc_load_microcode(adev);
  773. if (r) {
  774. dev_err(adev->dev, "Failed to load MC firmware!\n");
  775. return r;
  776. }
  777. }
  778. r = gmc_v6_0_gart_enable(adev);
  779. if (r)
  780. return r;
  781. return r;
  782. }
  783. static int gmc_v6_0_hw_fini(void *handle)
  784. {
  785. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  786. amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
  787. gmc_v6_0_gart_disable(adev);
  788. return 0;
  789. }
  790. static int gmc_v6_0_suspend(void *handle)
  791. {
  792. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  793. gmc_v6_0_hw_fini(adev);
  794. return 0;
  795. }
  796. static int gmc_v6_0_resume(void *handle)
  797. {
  798. int r;
  799. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  800. r = gmc_v6_0_hw_init(adev);
  801. if (r)
  802. return r;
  803. amdgpu_vm_reset_all_ids(adev);
  804. return 0;
  805. }
  806. static bool gmc_v6_0_is_idle(void *handle)
  807. {
  808. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  809. u32 tmp = RREG32(mmSRBM_STATUS);
  810. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  811. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
  812. return false;
  813. return true;
  814. }
  815. static int gmc_v6_0_wait_for_idle(void *handle)
  816. {
  817. unsigned i;
  818. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  819. for (i = 0; i < adev->usec_timeout; i++) {
  820. if (gmc_v6_0_is_idle(handle))
  821. return 0;
  822. udelay(1);
  823. }
  824. return -ETIMEDOUT;
  825. }
  826. static int gmc_v6_0_soft_reset(void *handle)
  827. {
  828. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  829. u32 srbm_soft_reset = 0;
  830. u32 tmp = RREG32(mmSRBM_STATUS);
  831. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  832. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  833. SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
  834. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  835. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
  836. if (!(adev->flags & AMD_IS_APU))
  837. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  838. SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
  839. }
  840. if (srbm_soft_reset) {
  841. gmc_v6_0_mc_stop(adev);
  842. if (gmc_v6_0_wait_for_idle(adev)) {
  843. dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
  844. }
  845. tmp = RREG32(mmSRBM_SOFT_RESET);
  846. tmp |= srbm_soft_reset;
  847. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  848. WREG32(mmSRBM_SOFT_RESET, tmp);
  849. tmp = RREG32(mmSRBM_SOFT_RESET);
  850. udelay(50);
  851. tmp &= ~srbm_soft_reset;
  852. WREG32(mmSRBM_SOFT_RESET, tmp);
  853. tmp = RREG32(mmSRBM_SOFT_RESET);
  854. udelay(50);
  855. gmc_v6_0_mc_resume(adev);
  856. udelay(50);
  857. }
  858. return 0;
  859. }
  860. static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  861. struct amdgpu_irq_src *src,
  862. unsigned type,
  863. enum amdgpu_interrupt_state state)
  864. {
  865. u32 tmp;
  866. u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  867. VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  868. VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  869. VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  870. VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  871. VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
  872. switch (state) {
  873. case AMDGPU_IRQ_STATE_DISABLE:
  874. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  875. tmp &= ~bits;
  876. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  877. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  878. tmp &= ~bits;
  879. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  880. break;
  881. case AMDGPU_IRQ_STATE_ENABLE:
  882. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  883. tmp |= bits;
  884. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  885. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  886. tmp |= bits;
  887. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  888. break;
  889. default:
  890. break;
  891. }
  892. return 0;
  893. }
  894. static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
  895. struct amdgpu_irq_src *source,
  896. struct amdgpu_iv_entry *entry)
  897. {
  898. u32 addr, status;
  899. addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
  900. status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
  901. WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
  902. if (!addr && !status)
  903. return 0;
  904. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
  905. gmc_v6_0_set_fault_enable_default(adev, false);
  906. if (printk_ratelimit()) {
  907. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  908. entry->src_id, entry->src_data[0]);
  909. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  910. addr);
  911. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  912. status);
  913. gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
  914. }
  915. return 0;
  916. }
  917. static int gmc_v6_0_set_clockgating_state(void *handle,
  918. enum amd_clockgating_state state)
  919. {
  920. return 0;
  921. }
  922. static int gmc_v6_0_set_powergating_state(void *handle,
  923. enum amd_powergating_state state)
  924. {
  925. return 0;
  926. }
  927. static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
  928. .name = "gmc_v6_0",
  929. .early_init = gmc_v6_0_early_init,
  930. .late_init = gmc_v6_0_late_init,
  931. .sw_init = gmc_v6_0_sw_init,
  932. .sw_fini = gmc_v6_0_sw_fini,
  933. .hw_init = gmc_v6_0_hw_init,
  934. .hw_fini = gmc_v6_0_hw_fini,
  935. .suspend = gmc_v6_0_suspend,
  936. .resume = gmc_v6_0_resume,
  937. .is_idle = gmc_v6_0_is_idle,
  938. .wait_for_idle = gmc_v6_0_wait_for_idle,
  939. .soft_reset = gmc_v6_0_soft_reset,
  940. .set_clockgating_state = gmc_v6_0_set_clockgating_state,
  941. .set_powergating_state = gmc_v6_0_set_powergating_state,
  942. };
  943. static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
  944. .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
  945. .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
  946. .set_prt = gmc_v6_0_set_prt,
  947. .get_vm_pde = gmc_v6_0_get_vm_pde,
  948. .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
  949. };
  950. static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
  951. .set = gmc_v6_0_vm_fault_interrupt_state,
  952. .process = gmc_v6_0_process_interrupt,
  953. };
  954. static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
  955. {
  956. if (adev->gart.gart_funcs == NULL)
  957. adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
  958. }
  959. static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
  960. {
  961. adev->mc.vm_fault.num_types = 1;
  962. adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
  963. }
  964. const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
  965. {
  966. .type = AMD_IP_BLOCK_TYPE_GMC,
  967. .major = 6,
  968. .minor = 0,
  969. .rev = 0,
  970. .funcs = &gmc_v6_0_ip_funcs,
  971. };