gmc_v6_0.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. #include "gmc_v6_0.h"
  27. #include "amdgpu_ucode.h"
  28. #include "bif/bif_3_0_d.h"
  29. #include "bif/bif_3_0_sh_mask.h"
  30. #include "oss/oss_1_0_d.h"
  31. #include "oss/oss_1_0_sh_mask.h"
  32. #include "gmc/gmc_6_0_d.h"
  33. #include "gmc/gmc_6_0_sh_mask.h"
  34. #include "dce/dce_6_0_d.h"
  35. #include "dce/dce_6_0_sh_mask.h"
  36. #include "si_enums.h"
  37. static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
  38. static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  39. static int gmc_v6_0_wait_for_idle(void *handle);
  40. MODULE_FIRMWARE("radeon/tahiti_mc.bin");
  41. MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
  42. MODULE_FIRMWARE("radeon/verde_mc.bin");
  43. MODULE_FIRMWARE("radeon/oland_mc.bin");
  44. MODULE_FIRMWARE("radeon/si58_mc.bin");
  45. #define MC_SEQ_MISC0__MT__MASK 0xf0000000
  46. #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
  47. #define MC_SEQ_MISC0__MT__DDR2 0x20000000
  48. #define MC_SEQ_MISC0__MT__GDDR3 0x30000000
  49. #define MC_SEQ_MISC0__MT__GDDR4 0x40000000
  50. #define MC_SEQ_MISC0__MT__GDDR5 0x50000000
  51. #define MC_SEQ_MISC0__MT__HBM 0x60000000
  52. #define MC_SEQ_MISC0__MT__DDR3 0xB0000000
  53. static const u32 crtc_offsets[6] =
  54. {
  55. SI_CRTC0_REGISTER_OFFSET,
  56. SI_CRTC1_REGISTER_OFFSET,
  57. SI_CRTC2_REGISTER_OFFSET,
  58. SI_CRTC3_REGISTER_OFFSET,
  59. SI_CRTC4_REGISTER_OFFSET,
  60. SI_CRTC5_REGISTER_OFFSET
  61. };
  62. static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
  63. struct amdgpu_mode_mc_save *save)
  64. {
  65. u32 blackout;
  66. if (adev->mode_info.num_crtc)
  67. amdgpu_display_stop_mc_access(adev, save);
  68. gmc_v6_0_wait_for_idle((void *)adev);
  69. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  70. if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  71. /* Block CPU access */
  72. WREG32(mmBIF_FB_EN, 0);
  73. /* blackout the MC */
  74. blackout = REG_SET_FIELD(blackout,
  75. MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  76. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  77. }
  78. /* wait for the MC to settle */
  79. udelay(100);
  80. }
  81. static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
  82. struct amdgpu_mode_mc_save *save)
  83. {
  84. u32 tmp;
  85. /* unblackout the MC */
  86. tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  87. tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  88. WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  89. /* allow CPU access */
  90. tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  91. tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  92. WREG32(mmBIF_FB_EN, tmp);
  93. if (adev->mode_info.num_crtc)
  94. amdgpu_display_resume_mc_access(adev, save);
  95. }
  96. static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
  97. {
  98. const char *chip_name;
  99. char fw_name[30];
  100. int err;
  101. bool is_58_fw = false;
  102. DRM_DEBUG("\n");
  103. switch (adev->asic_type) {
  104. case CHIP_TAHITI:
  105. chip_name = "tahiti";
  106. break;
  107. case CHIP_PITCAIRN:
  108. chip_name = "pitcairn";
  109. break;
  110. case CHIP_VERDE:
  111. chip_name = "verde";
  112. break;
  113. case CHIP_OLAND:
  114. chip_name = "oland";
  115. break;
  116. case CHIP_HAINAN:
  117. chip_name = "hainan";
  118. break;
  119. default: BUG();
  120. }
  121. /* this memory configuration requires special firmware */
  122. if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
  123. is_58_fw = true;
  124. if (is_58_fw)
  125. snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
  126. else
  127. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  128. err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
  129. if (err)
  130. goto out;
  131. err = amdgpu_ucode_validate(adev->mc.fw);
  132. out:
  133. if (err) {
  134. dev_err(adev->dev,
  135. "si_mc: Failed to load firmware \"%s\"\n",
  136. fw_name);
  137. release_firmware(adev->mc.fw);
  138. adev->mc.fw = NULL;
  139. }
  140. return err;
  141. }
  142. static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
  143. {
  144. const __le32 *new_fw_data = NULL;
  145. u32 running;
  146. const __le32 *new_io_mc_regs = NULL;
  147. int i, regs_size, ucode_size;
  148. const struct mc_firmware_header_v1_0 *hdr;
  149. if (!adev->mc.fw)
  150. return -EINVAL;
  151. hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
  152. amdgpu_ucode_print_mc_hdr(&hdr->header);
  153. adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  154. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  155. new_io_mc_regs = (const __le32 *)
  156. (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  157. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  158. new_fw_data = (const __le32 *)
  159. (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  160. running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
  161. if (running == 0) {
  162. /* reset the engine and set to writable */
  163. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  164. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  165. /* load mc io regs */
  166. for (i = 0; i < regs_size; i++) {
  167. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
  168. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
  169. }
  170. /* load the MC ucode */
  171. for (i = 0; i < ucode_size; i++) {
  172. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
  173. }
  174. /* put the engine back into the active state */
  175. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  176. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  177. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  178. /* wait for training to complete */
  179. for (i = 0; i < adev->usec_timeout; i++) {
  180. if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
  181. break;
  182. udelay(1);
  183. }
  184. for (i = 0; i < adev->usec_timeout; i++) {
  185. if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
  186. break;
  187. udelay(1);
  188. }
  189. }
  190. return 0;
  191. }
  192. static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
  193. struct amdgpu_mc *mc)
  194. {
  195. if (mc->mc_vram_size > 0xFFC0000000ULL) {
  196. dev_warn(adev->dev, "limiting VRAM\n");
  197. mc->real_vram_size = 0xFFC0000000ULL;
  198. mc->mc_vram_size = 0xFFC0000000ULL;
  199. }
  200. amdgpu_vram_location(adev, &adev->mc, 0);
  201. adev->mc.gtt_base_align = 0;
  202. amdgpu_gtt_location(adev, mc);
  203. }
  204. static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
  205. {
  206. struct amdgpu_mode_mc_save save;
  207. u32 tmp;
  208. int i, j;
  209. /* Initialize HDP */
  210. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  211. WREG32((0xb05 + j), 0x00000000);
  212. WREG32((0xb06 + j), 0x00000000);
  213. WREG32((0xb07 + j), 0x00000000);
  214. WREG32((0xb08 + j), 0x00000000);
  215. WREG32((0xb09 + j), 0x00000000);
  216. }
  217. WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
  218. if (adev->mode_info.num_crtc)
  219. amdgpu_display_set_vga_render_state(adev, false);
  220. gmc_v6_0_mc_stop(adev, &save);
  221. if (gmc_v6_0_wait_for_idle((void *)adev)) {
  222. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  223. }
  224. WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
  225. /* Update configuration */
  226. WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  227. adev->mc.vram_start >> 12);
  228. WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  229. adev->mc.vram_end >> 12);
  230. WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  231. adev->vram_scratch.gpu_addr >> 12);
  232. tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
  233. tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
  234. WREG32(mmMC_VM_FB_LOCATION, tmp);
  235. /* XXX double check these! */
  236. WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
  237. WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  238. WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  239. WREG32(mmMC_VM_AGP_BASE, 0);
  240. WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
  241. WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
  242. if (gmc_v6_0_wait_for_idle((void *)adev)) {
  243. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  244. }
  245. gmc_v6_0_mc_resume(adev, &save);
  246. }
  247. static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
  248. {
  249. u32 tmp;
  250. int chansize, numchan;
  251. tmp = RREG32(mmMC_ARB_RAMCFG);
  252. if (tmp & (1 << 11)) {
  253. chansize = 16;
  254. } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
  255. chansize = 64;
  256. } else {
  257. chansize = 32;
  258. }
  259. tmp = RREG32(mmMC_SHARED_CHMAP);
  260. switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
  261. case 0:
  262. default:
  263. numchan = 1;
  264. break;
  265. case 1:
  266. numchan = 2;
  267. break;
  268. case 2:
  269. numchan = 4;
  270. break;
  271. case 3:
  272. numchan = 8;
  273. break;
  274. case 4:
  275. numchan = 3;
  276. break;
  277. case 5:
  278. numchan = 6;
  279. break;
  280. case 6:
  281. numchan = 10;
  282. break;
  283. case 7:
  284. numchan = 12;
  285. break;
  286. case 8:
  287. numchan = 16;
  288. break;
  289. }
  290. adev->mc.vram_width = numchan * chansize;
  291. /* Could aper size report 0 ? */
  292. adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
  293. adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
  294. /* size in MB on si */
  295. adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  296. adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  297. adev->mc.visible_vram_size = adev->mc.aper_size;
  298. /* unless the user had overridden it, set the gart
  299. * size equal to the 1024 or vram, whichever is larger.
  300. */
  301. if (amdgpu_gart_size == -1)
  302. adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
  303. adev->mc.mc_vram_size);
  304. else
  305. adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
  306. gmc_v6_0_vram_gtt_location(adev, &adev->mc);
  307. return 0;
  308. }
  309. static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  310. uint32_t vmid)
  311. {
  312. WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  313. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  314. }
  315. static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
  316. void *cpu_pt_addr,
  317. uint32_t gpu_page_idx,
  318. uint64_t addr,
  319. uint64_t flags)
  320. {
  321. void __iomem *ptr = (void *)cpu_pt_addr;
  322. uint64_t value;
  323. value = addr & 0xFFFFFFFFFFFFF000ULL;
  324. value |= flags;
  325. writeq(value, ptr + (gpu_page_idx * 8));
  326. return 0;
  327. }
  328. static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
  329. uint32_t flags)
  330. {
  331. uint64_t pte_flag = 0;
  332. if (flags & AMDGPU_VM_PAGE_READABLE)
  333. pte_flag |= AMDGPU_PTE_READABLE;
  334. if (flags & AMDGPU_VM_PAGE_WRITEABLE)
  335. pte_flag |= AMDGPU_PTE_WRITEABLE;
  336. if (flags & AMDGPU_VM_PAGE_PRT)
  337. pte_flag |= AMDGPU_PTE_PRT;
  338. return pte_flag;
  339. }
  340. static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
  341. {
  342. BUG_ON(addr & 0xFFFFFF0000000FFFULL);
  343. return addr;
  344. }
  345. static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
  346. bool value)
  347. {
  348. u32 tmp;
  349. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  350. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  351. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  352. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  353. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  354. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  355. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  356. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  357. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  358. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  359. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  360. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  361. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  362. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  363. }
  364. /**
  365. + * gmc_v8_0_set_prt - set PRT VM fault
  366. + *
  367. + * @adev: amdgpu_device pointer
  368. + * @enable: enable/disable VM fault handling for PRT
  369. +*/
  370. static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
  371. {
  372. u32 tmp;
  373. if (enable && !adev->mc.prt_warning) {
  374. dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
  375. adev->mc.prt_warning = true;
  376. }
  377. tmp = RREG32(mmVM_PRT_CNTL);
  378. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  379. CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
  380. enable);
  381. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  382. TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
  383. enable);
  384. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  385. L2_CACHE_STORE_INVALID_ENTRIES,
  386. enable);
  387. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  388. L1_TLB_STORE_INVALID_ENTRIES,
  389. enable);
  390. WREG32(mmVM_PRT_CNTL, tmp);
  391. if (enable) {
  392. uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
  393. uint32_t high = adev->vm_manager.max_pfn;
  394. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
  395. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
  396. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
  397. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
  398. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
  399. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
  400. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
  401. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
  402. } else {
  403. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
  404. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
  405. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
  406. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
  407. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
  408. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
  409. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
  410. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
  411. }
  412. }
  413. static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
  414. {
  415. int r, i;
  416. if (adev->gart.robj == NULL) {
  417. dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
  418. return -EINVAL;
  419. }
  420. r = amdgpu_gart_table_vram_pin(adev);
  421. if (r)
  422. return r;
  423. /* Setup TLB control */
  424. WREG32(mmMC_VM_MX_L1_TLB_CNTL,
  425. (0xA << 7) |
  426. MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
  427. MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
  428. MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
  429. MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
  430. (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
  431. /* Setup L2 cache */
  432. WREG32(mmVM_L2_CNTL,
  433. VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
  434. VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
  435. VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  436. VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  437. (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
  438. (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
  439. WREG32(mmVM_L2_CNTL2,
  440. VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
  441. VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
  442. WREG32(mmVM_L2_CNTL3,
  443. VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
  444. (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
  445. (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
  446. /* setup context0 */
  447. WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
  448. WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
  449. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
  450. WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  451. (u32)(adev->dummy_page.addr >> 12));
  452. WREG32(mmVM_CONTEXT0_CNTL2, 0);
  453. WREG32(mmVM_CONTEXT0_CNTL,
  454. VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
  455. (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
  456. VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
  457. WREG32(0x575, 0);
  458. WREG32(0x576, 0);
  459. WREG32(0x577, 0);
  460. /* empty context1-15 */
  461. /* set vm size, must be a multiple of 4 */
  462. WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  463. WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
  464. /* Assign the pt base to something valid for now; the pts used for
  465. * the VMs are determined by the application and setup and assigned
  466. * on the fly in the vm part of radeon_gart.c
  467. */
  468. for (i = 1; i < 16; i++) {
  469. if (i < 8)
  470. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
  471. adev->gart.table_addr >> 12);
  472. else
  473. WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
  474. adev->gart.table_addr >> 12);
  475. }
  476. /* enable context1-15 */
  477. WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  478. (u32)(adev->dummy_page.addr >> 12));
  479. WREG32(mmVM_CONTEXT1_CNTL2, 4);
  480. WREG32(mmVM_CONTEXT1_CNTL,
  481. VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
  482. (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
  483. ((adev->vm_manager.block_size - 9)
  484. << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
  485. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
  486. gmc_v6_0_set_fault_enable_default(adev, false);
  487. else
  488. gmc_v6_0_set_fault_enable_default(adev, true);
  489. gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
  490. dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
  491. (unsigned)(adev->mc.gtt_size >> 20),
  492. (unsigned long long)adev->gart.table_addr);
  493. adev->gart.ready = true;
  494. return 0;
  495. }
  496. static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
  497. {
  498. int r;
  499. if (adev->gart.robj) {
  500. dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
  501. return 0;
  502. }
  503. r = amdgpu_gart_init(adev);
  504. if (r)
  505. return r;
  506. adev->gart.table_size = adev->gart.num_gpu_pages * 8;
  507. adev->gart.gart_pte_flags = 0;
  508. return amdgpu_gart_table_vram_alloc(adev);
  509. }
  510. static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
  511. {
  512. /*unsigned i;
  513. for (i = 1; i < 16; ++i) {
  514. uint32_t reg;
  515. if (i < 8)
  516. reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
  517. else
  518. reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
  519. adev->vm_manager.saved_table_addr[i] = RREG32(reg);
  520. }*/
  521. /* Disable all tables */
  522. WREG32(mmVM_CONTEXT0_CNTL, 0);
  523. WREG32(mmVM_CONTEXT1_CNTL, 0);
  524. /* Setup TLB control */
  525. WREG32(mmMC_VM_MX_L1_TLB_CNTL,
  526. MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
  527. (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
  528. /* Setup L2 cache */
  529. WREG32(mmVM_L2_CNTL,
  530. VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  531. VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
  532. (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
  533. (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
  534. WREG32(mmVM_L2_CNTL2, 0);
  535. WREG32(mmVM_L2_CNTL3,
  536. VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
  537. (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
  538. amdgpu_gart_table_vram_unpin(adev);
  539. }
  540. static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
  541. {
  542. amdgpu_gart_table_vram_free(adev);
  543. amdgpu_gart_fini(adev);
  544. }
  545. static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
  546. u32 status, u32 addr, u32 mc_client)
  547. {
  548. u32 mc_id;
  549. u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
  550. u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  551. PROTECTIONS);
  552. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  553. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  554. mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  555. MEMORY_CLIENT_ID);
  556. dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  557. protections, vmid, addr,
  558. REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  559. MEMORY_CLIENT_RW) ?
  560. "write" : "read", block, mc_client, mc_id);
  561. }
  562. /*
  563. static const u32 mc_cg_registers[] = {
  564. MC_HUB_MISC_HUB_CG,
  565. MC_HUB_MISC_SIP_CG,
  566. MC_HUB_MISC_VM_CG,
  567. MC_XPB_CLK_GAT,
  568. ATC_MISC_CG,
  569. MC_CITF_MISC_WR_CG,
  570. MC_CITF_MISC_RD_CG,
  571. MC_CITF_MISC_VM_CG,
  572. VM_L2_CG,
  573. };
  574. static const u32 mc_cg_ls_en[] = {
  575. MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
  576. MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
  577. MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  578. MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
  579. ATC_MISC_CG__MEM_LS_ENABLE_MASK,
  580. MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
  581. MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
  582. MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  583. VM_L2_CG__MEM_LS_ENABLE_MASK,
  584. };
  585. static const u32 mc_cg_en[] = {
  586. MC_HUB_MISC_HUB_CG__ENABLE_MASK,
  587. MC_HUB_MISC_SIP_CG__ENABLE_MASK,
  588. MC_HUB_MISC_VM_CG__ENABLE_MASK,
  589. MC_XPB_CLK_GAT__ENABLE_MASK,
  590. ATC_MISC_CG__ENABLE_MASK,
  591. MC_CITF_MISC_WR_CG__ENABLE_MASK,
  592. MC_CITF_MISC_RD_CG__ENABLE_MASK,
  593. MC_CITF_MISC_VM_CG__ENABLE_MASK,
  594. VM_L2_CG__ENABLE_MASK,
  595. };
  596. static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
  597. bool enable)
  598. {
  599. int i;
  600. u32 orig, data;
  601. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  602. orig = data = RREG32(mc_cg_registers[i]);
  603. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
  604. data |= mc_cg_ls_en[i];
  605. else
  606. data &= ~mc_cg_ls_en[i];
  607. if (data != orig)
  608. WREG32(mc_cg_registers[i], data);
  609. }
  610. }
  611. static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
  612. bool enable)
  613. {
  614. int i;
  615. u32 orig, data;
  616. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  617. orig = data = RREG32(mc_cg_registers[i]);
  618. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
  619. data |= mc_cg_en[i];
  620. else
  621. data &= ~mc_cg_en[i];
  622. if (data != orig)
  623. WREG32(mc_cg_registers[i], data);
  624. }
  625. }
  626. static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
  627. bool enable)
  628. {
  629. u32 orig, data;
  630. orig = data = RREG32_PCIE(ixPCIE_CNTL2);
  631. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
  632. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
  633. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
  634. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
  635. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
  636. } else {
  637. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
  638. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
  639. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
  640. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
  641. }
  642. if (orig != data)
  643. WREG32_PCIE(ixPCIE_CNTL2, data);
  644. }
  645. static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
  646. bool enable)
  647. {
  648. u32 orig, data;
  649. orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
  650. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
  651. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
  652. else
  653. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
  654. if (orig != data)
  655. WREG32(mmHDP_HOST_PATH_CNTL, data);
  656. }
  657. static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
  658. bool enable)
  659. {
  660. u32 orig, data;
  661. orig = data = RREG32(mmHDP_MEM_POWER_LS);
  662. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
  663. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
  664. else
  665. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
  666. if (orig != data)
  667. WREG32(mmHDP_MEM_POWER_LS, data);
  668. }
  669. */
  670. static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
  671. {
  672. switch (mc_seq_vram_type) {
  673. case MC_SEQ_MISC0__MT__GDDR1:
  674. return AMDGPU_VRAM_TYPE_GDDR1;
  675. case MC_SEQ_MISC0__MT__DDR2:
  676. return AMDGPU_VRAM_TYPE_DDR2;
  677. case MC_SEQ_MISC0__MT__GDDR3:
  678. return AMDGPU_VRAM_TYPE_GDDR3;
  679. case MC_SEQ_MISC0__MT__GDDR4:
  680. return AMDGPU_VRAM_TYPE_GDDR4;
  681. case MC_SEQ_MISC0__MT__GDDR5:
  682. return AMDGPU_VRAM_TYPE_GDDR5;
  683. case MC_SEQ_MISC0__MT__DDR3:
  684. return AMDGPU_VRAM_TYPE_DDR3;
  685. default:
  686. return AMDGPU_VRAM_TYPE_UNKNOWN;
  687. }
  688. }
  689. static int gmc_v6_0_early_init(void *handle)
  690. {
  691. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  692. gmc_v6_0_set_gart_funcs(adev);
  693. gmc_v6_0_set_irq_funcs(adev);
  694. return 0;
  695. }
  696. static int gmc_v6_0_late_init(void *handle)
  697. {
  698. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  699. if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
  700. return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
  701. else
  702. return 0;
  703. }
  704. static int gmc_v6_0_sw_init(void *handle)
  705. {
  706. int r;
  707. int dma_bits;
  708. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  709. if (adev->flags & AMD_IS_APU) {
  710. adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
  711. } else {
  712. u32 tmp = RREG32(mmMC_SEQ_MISC0);
  713. tmp &= MC_SEQ_MISC0__MT__MASK;
  714. adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
  715. }
  716. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
  717. if (r)
  718. return r;
  719. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
  720. if (r)
  721. return r;
  722. amdgpu_vm_adjust_size(adev, 64);
  723. adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
  724. adev->mc.mc_mask = 0xffffffffffULL;
  725. adev->mc.stolen_size = 256 * 1024;
  726. adev->need_dma32 = false;
  727. dma_bits = adev->need_dma32 ? 32 : 40;
  728. r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  729. if (r) {
  730. adev->need_dma32 = true;
  731. dma_bits = 32;
  732. dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
  733. }
  734. r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  735. if (r) {
  736. pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
  737. dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
  738. }
  739. r = gmc_v6_0_init_microcode(adev);
  740. if (r) {
  741. dev_err(adev->dev, "Failed to load mc firmware!\n");
  742. return r;
  743. }
  744. r = gmc_v6_0_mc_init(adev);
  745. if (r)
  746. return r;
  747. r = amdgpu_bo_init(adev);
  748. if (r)
  749. return r;
  750. r = gmc_v6_0_gart_init(adev);
  751. if (r)
  752. return r;
  753. /*
  754. * number of VMs
  755. * VMID 0 is reserved for System
  756. * amdgpu graphics/compute will use VMIDs 1-7
  757. * amdkfd will use VMIDs 8-15
  758. */
  759. adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
  760. adev->vm_manager.num_level = 1;
  761. amdgpu_vm_manager_init(adev);
  762. /* base offset of vram pages */
  763. if (adev->flags & AMD_IS_APU) {
  764. u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
  765. tmp <<= 22;
  766. adev->vm_manager.vram_base_offset = tmp;
  767. } else {
  768. adev->vm_manager.vram_base_offset = 0;
  769. }
  770. return 0;
  771. }
  772. static int gmc_v6_0_sw_fini(void *handle)
  773. {
  774. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  775. amdgpu_vm_manager_fini(adev);
  776. gmc_v6_0_gart_fini(adev);
  777. amdgpu_gem_force_release(adev);
  778. amdgpu_bo_fini(adev);
  779. return 0;
  780. }
  781. static int gmc_v6_0_hw_init(void *handle)
  782. {
  783. int r;
  784. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  785. gmc_v6_0_mc_program(adev);
  786. if (!(adev->flags & AMD_IS_APU)) {
  787. r = gmc_v6_0_mc_load_microcode(adev);
  788. if (r) {
  789. dev_err(adev->dev, "Failed to load MC firmware!\n");
  790. return r;
  791. }
  792. }
  793. r = gmc_v6_0_gart_enable(adev);
  794. if (r)
  795. return r;
  796. return r;
  797. }
  798. static int gmc_v6_0_hw_fini(void *handle)
  799. {
  800. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  801. amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
  802. gmc_v6_0_gart_disable(adev);
  803. return 0;
  804. }
  805. static int gmc_v6_0_suspend(void *handle)
  806. {
  807. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  808. gmc_v6_0_hw_fini(adev);
  809. return 0;
  810. }
  811. static int gmc_v6_0_resume(void *handle)
  812. {
  813. int r;
  814. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  815. r = gmc_v6_0_hw_init(adev);
  816. if (r)
  817. return r;
  818. amdgpu_vm_reset_all_ids(adev);
  819. return 0;
  820. }
  821. static bool gmc_v6_0_is_idle(void *handle)
  822. {
  823. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  824. u32 tmp = RREG32(mmSRBM_STATUS);
  825. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  826. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
  827. return false;
  828. return true;
  829. }
  830. static int gmc_v6_0_wait_for_idle(void *handle)
  831. {
  832. unsigned i;
  833. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  834. for (i = 0; i < adev->usec_timeout; i++) {
  835. if (gmc_v6_0_is_idle(handle))
  836. return 0;
  837. udelay(1);
  838. }
  839. return -ETIMEDOUT;
  840. }
  841. static int gmc_v6_0_soft_reset(void *handle)
  842. {
  843. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  844. struct amdgpu_mode_mc_save save;
  845. u32 srbm_soft_reset = 0;
  846. u32 tmp = RREG32(mmSRBM_STATUS);
  847. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  848. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  849. SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
  850. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  851. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
  852. if (!(adev->flags & AMD_IS_APU))
  853. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  854. SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
  855. }
  856. if (srbm_soft_reset) {
  857. gmc_v6_0_mc_stop(adev, &save);
  858. if (gmc_v6_0_wait_for_idle(adev)) {
  859. dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
  860. }
  861. tmp = RREG32(mmSRBM_SOFT_RESET);
  862. tmp |= srbm_soft_reset;
  863. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  864. WREG32(mmSRBM_SOFT_RESET, tmp);
  865. tmp = RREG32(mmSRBM_SOFT_RESET);
  866. udelay(50);
  867. tmp &= ~srbm_soft_reset;
  868. WREG32(mmSRBM_SOFT_RESET, tmp);
  869. tmp = RREG32(mmSRBM_SOFT_RESET);
  870. udelay(50);
  871. gmc_v6_0_mc_resume(adev, &save);
  872. udelay(50);
  873. }
  874. return 0;
  875. }
  876. static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  877. struct amdgpu_irq_src *src,
  878. unsigned type,
  879. enum amdgpu_interrupt_state state)
  880. {
  881. u32 tmp;
  882. u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  883. VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  884. VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  885. VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  886. VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  887. VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
  888. switch (state) {
  889. case AMDGPU_IRQ_STATE_DISABLE:
  890. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  891. tmp &= ~bits;
  892. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  893. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  894. tmp &= ~bits;
  895. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  896. break;
  897. case AMDGPU_IRQ_STATE_ENABLE:
  898. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  899. tmp |= bits;
  900. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  901. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  902. tmp |= bits;
  903. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  904. break;
  905. default:
  906. break;
  907. }
  908. return 0;
  909. }
  910. static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
  911. struct amdgpu_irq_src *source,
  912. struct amdgpu_iv_entry *entry)
  913. {
  914. u32 addr, status;
  915. addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
  916. status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
  917. WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
  918. if (!addr && !status)
  919. return 0;
  920. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
  921. gmc_v6_0_set_fault_enable_default(adev, false);
  922. if (printk_ratelimit()) {
  923. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  924. entry->src_id, entry->src_data[0]);
  925. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  926. addr);
  927. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  928. status);
  929. gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
  930. }
  931. return 0;
  932. }
  933. static int gmc_v6_0_set_clockgating_state(void *handle,
  934. enum amd_clockgating_state state)
  935. {
  936. return 0;
  937. }
  938. static int gmc_v6_0_set_powergating_state(void *handle,
  939. enum amd_powergating_state state)
  940. {
  941. return 0;
  942. }
  943. static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
  944. .name = "gmc_v6_0",
  945. .early_init = gmc_v6_0_early_init,
  946. .late_init = gmc_v6_0_late_init,
  947. .sw_init = gmc_v6_0_sw_init,
  948. .sw_fini = gmc_v6_0_sw_fini,
  949. .hw_init = gmc_v6_0_hw_init,
  950. .hw_fini = gmc_v6_0_hw_fini,
  951. .suspend = gmc_v6_0_suspend,
  952. .resume = gmc_v6_0_resume,
  953. .is_idle = gmc_v6_0_is_idle,
  954. .wait_for_idle = gmc_v6_0_wait_for_idle,
  955. .soft_reset = gmc_v6_0_soft_reset,
  956. .set_clockgating_state = gmc_v6_0_set_clockgating_state,
  957. .set_powergating_state = gmc_v6_0_set_powergating_state,
  958. };
  959. static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
  960. .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
  961. .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
  962. .set_prt = gmc_v6_0_set_prt,
  963. .get_vm_pde = gmc_v6_0_get_vm_pde,
  964. .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
  965. };
  966. static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
  967. .set = gmc_v6_0_vm_fault_interrupt_state,
  968. .process = gmc_v6_0_process_interrupt,
  969. };
  970. static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
  971. {
  972. if (adev->gart.gart_funcs == NULL)
  973. adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
  974. }
  975. static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
  976. {
  977. adev->mc.vm_fault.num_types = 1;
  978. adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
  979. }
  980. const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
  981. {
  982. .type = AMD_IP_BLOCK_TYPE_GMC,
  983. .major = 6,
  984. .minor = 0,
  985. .rev = 0,
  986. .funcs = &gmc_v6_0_ip_funcs,
  987. };