gmc_v7_0.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <drm/drmP.h>
  25. #include <drm/drm_cache.h>
  26. #include "amdgpu.h"
  27. #include "cikd.h"
  28. #include "cik.h"
  29. #include "gmc_v7_0.h"
  30. #include "amdgpu_ucode.h"
  31. #include "bif/bif_4_1_d.h"
  32. #include "bif/bif_4_1_sh_mask.h"
  33. #include "gmc/gmc_7_1_d.h"
  34. #include "gmc/gmc_7_1_sh_mask.h"
  35. #include "oss/oss_2_0_d.h"
  36. #include "oss/oss_2_0_sh_mask.h"
  37. #include "dce/dce_8_0_d.h"
  38. #include "dce/dce_8_0_sh_mask.h"
  39. #include "amdgpu_atombios.h"
  40. static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
  41. static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  42. static int gmc_v7_0_wait_for_idle(void *handle);
  43. MODULE_FIRMWARE("radeon/bonaire_mc.bin");
  44. MODULE_FIRMWARE("radeon/hawaii_mc.bin");
  45. MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
  46. static const u32 golden_settings_iceland_a11[] =
  47. {
  48. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  49. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  50. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  51. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  52. };
  53. static const u32 iceland_mgcg_cgcg_init[] =
  54. {
  55. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  56. };
  57. static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
  58. {
  59. switch (adev->asic_type) {
  60. case CHIP_TOPAZ:
  61. amdgpu_device_program_register_sequence(adev,
  62. iceland_mgcg_cgcg_init,
  63. ARRAY_SIZE(iceland_mgcg_cgcg_init));
  64. amdgpu_device_program_register_sequence(adev,
  65. golden_settings_iceland_a11,
  66. ARRAY_SIZE(golden_settings_iceland_a11));
  67. break;
  68. default:
  69. break;
  70. }
  71. }
  72. static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
  73. {
  74. u32 blackout;
  75. gmc_v7_0_wait_for_idle((void *)adev);
  76. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  77. if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  78. /* Block CPU access */
  79. WREG32(mmBIF_FB_EN, 0);
  80. /* blackout the MC */
  81. blackout = REG_SET_FIELD(blackout,
  82. MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  83. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  84. }
  85. /* wait for the MC to settle */
  86. udelay(100);
  87. }
  88. static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
  89. {
  90. u32 tmp;
  91. /* unblackout the MC */
  92. tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  93. tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  94. WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  95. /* allow CPU access */
  96. tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  97. tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  98. WREG32(mmBIF_FB_EN, tmp);
  99. }
  100. /**
  101. * gmc_v7_0_init_microcode - load ucode images from disk
  102. *
  103. * @adev: amdgpu_device pointer
  104. *
  105. * Use the firmware interface to load the ucode images into
  106. * the driver (not loaded into hw).
  107. * Returns 0 on success, error on failure.
  108. */
  109. static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
  110. {
  111. const char *chip_name;
  112. char fw_name[30];
  113. int err;
  114. DRM_DEBUG("\n");
  115. switch (adev->asic_type) {
  116. case CHIP_BONAIRE:
  117. chip_name = "bonaire";
  118. break;
  119. case CHIP_HAWAII:
  120. chip_name = "hawaii";
  121. break;
  122. case CHIP_TOPAZ:
  123. chip_name = "topaz";
  124. break;
  125. case CHIP_KAVERI:
  126. case CHIP_KABINI:
  127. case CHIP_MULLINS:
  128. return 0;
  129. default: BUG();
  130. }
  131. if (adev->asic_type == CHIP_TOPAZ)
  132. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
  133. else
  134. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  135. err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
  136. if (err)
  137. goto out;
  138. err = amdgpu_ucode_validate(adev->gmc.fw);
  139. out:
  140. if (err) {
  141. pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
  142. release_firmware(adev->gmc.fw);
  143. adev->gmc.fw = NULL;
  144. }
  145. return err;
  146. }
  147. /**
  148. * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
  149. *
  150. * @adev: amdgpu_device pointer
  151. *
  152. * Load the GDDR MC ucode into the hw (CIK).
  153. * Returns 0 on success, error on failure.
  154. */
  155. static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
  156. {
  157. const struct mc_firmware_header_v1_0 *hdr;
  158. const __le32 *fw_data = NULL;
  159. const __le32 *io_mc_regs = NULL;
  160. u32 running;
  161. int i, ucode_size, regs_size;
  162. if (!adev->gmc.fw)
  163. return -EINVAL;
  164. hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
  165. amdgpu_ucode_print_mc_hdr(&hdr->header);
  166. adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  167. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  168. io_mc_regs = (const __le32 *)
  169. (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  170. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  171. fw_data = (const __le32 *)
  172. (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  173. running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
  174. if (running == 0) {
  175. /* reset the engine and set to writable */
  176. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  177. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  178. /* load mc io regs */
  179. for (i = 0; i < regs_size; i++) {
  180. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
  181. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
  182. }
  183. /* load the MC ucode */
  184. for (i = 0; i < ucode_size; i++)
  185. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
  186. /* put the engine back into the active state */
  187. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  188. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  189. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  190. /* wait for training to complete */
  191. for (i = 0; i < adev->usec_timeout; i++) {
  192. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  193. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
  194. break;
  195. udelay(1);
  196. }
  197. for (i = 0; i < adev->usec_timeout; i++) {
  198. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  199. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
  200. break;
  201. udelay(1);
  202. }
  203. }
  204. return 0;
  205. }
  206. static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
  207. struct amdgpu_gmc *mc)
  208. {
  209. u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
  210. base <<= 24;
  211. amdgpu_device_vram_location(adev, &adev->gmc, base);
  212. amdgpu_device_gart_location(adev, mc);
  213. }
  214. /**
  215. * gmc_v7_0_mc_program - program the GPU memory controller
  216. *
  217. * @adev: amdgpu_device pointer
  218. *
  219. * Set the location of vram, gart, and AGP in the GPU's
  220. * physical address space (CIK).
  221. */
  222. static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
  223. {
  224. u32 tmp;
  225. int i, j;
  226. /* Initialize HDP */
  227. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  228. WREG32((0xb05 + j), 0x00000000);
  229. WREG32((0xb06 + j), 0x00000000);
  230. WREG32((0xb07 + j), 0x00000000);
  231. WREG32((0xb08 + j), 0x00000000);
  232. WREG32((0xb09 + j), 0x00000000);
  233. }
  234. WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
  235. if (gmc_v7_0_wait_for_idle((void *)adev)) {
  236. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  237. }
  238. if (adev->mode_info.num_crtc) {
  239. /* Lockout access through VGA aperture*/
  240. tmp = RREG32(mmVGA_HDP_CONTROL);
  241. tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
  242. WREG32(mmVGA_HDP_CONTROL, tmp);
  243. /* disable VGA render */
  244. tmp = RREG32(mmVGA_RENDER_CONTROL);
  245. tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
  246. WREG32(mmVGA_RENDER_CONTROL, tmp);
  247. }
  248. /* Update configuration */
  249. WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  250. adev->gmc.vram_start >> 12);
  251. WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  252. adev->gmc.vram_end >> 12);
  253. WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  254. adev->vram_scratch.gpu_addr >> 12);
  255. WREG32(mmMC_VM_AGP_BASE, 0);
  256. WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
  257. WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
  258. if (gmc_v7_0_wait_for_idle((void *)adev)) {
  259. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  260. }
  261. WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
  262. tmp = RREG32(mmHDP_MISC_CNTL);
  263. tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
  264. WREG32(mmHDP_MISC_CNTL, tmp);
  265. tmp = RREG32(mmHDP_HOST_PATH_CNTL);
  266. WREG32(mmHDP_HOST_PATH_CNTL, tmp);
  267. }
  268. /**
  269. * gmc_v7_0_mc_init - initialize the memory controller driver params
  270. *
  271. * @adev: amdgpu_device pointer
  272. *
  273. * Look up the amount of vram, vram width, and decide how to place
  274. * vram and gart within the GPU's physical address space (CIK).
  275. * Returns 0 for success.
  276. */
  277. static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
  278. {
  279. int r;
  280. adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
  281. if (!adev->gmc.vram_width) {
  282. u32 tmp;
  283. int chansize, numchan;
  284. /* Get VRAM informations */
  285. tmp = RREG32(mmMC_ARB_RAMCFG);
  286. if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
  287. chansize = 64;
  288. } else {
  289. chansize = 32;
  290. }
  291. tmp = RREG32(mmMC_SHARED_CHMAP);
  292. switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
  293. case 0:
  294. default:
  295. numchan = 1;
  296. break;
  297. case 1:
  298. numchan = 2;
  299. break;
  300. case 2:
  301. numchan = 4;
  302. break;
  303. case 3:
  304. numchan = 8;
  305. break;
  306. case 4:
  307. numchan = 3;
  308. break;
  309. case 5:
  310. numchan = 6;
  311. break;
  312. case 6:
  313. numchan = 10;
  314. break;
  315. case 7:
  316. numchan = 12;
  317. break;
  318. case 8:
  319. numchan = 16;
  320. break;
  321. }
  322. adev->gmc.vram_width = numchan * chansize;
  323. }
  324. /* size in MB on si */
  325. adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  326. adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  327. if (!(adev->flags & AMD_IS_APU)) {
  328. r = amdgpu_device_resize_fb_bar(adev);
  329. if (r)
  330. return r;
  331. }
  332. adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
  333. adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
  334. #ifdef CONFIG_X86_64
  335. if (adev->flags & AMD_IS_APU) {
  336. adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
  337. adev->gmc.aper_size = adev->gmc.real_vram_size;
  338. }
  339. #endif
  340. /* In case the PCI BAR is larger than the actual amount of vram */
  341. adev->gmc.visible_vram_size = adev->gmc.aper_size;
  342. if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
  343. adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
  344. /* set the gart size */
  345. if (amdgpu_gart_size == -1) {
  346. switch (adev->asic_type) {
  347. case CHIP_TOPAZ: /* no MM engines */
  348. default:
  349. adev->gmc.gart_size = 256ULL << 20;
  350. break;
  351. #ifdef CONFIG_DRM_AMDGPU_CIK
  352. case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
  353. case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */
  354. case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
  355. case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
  356. case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
  357. adev->gmc.gart_size = 1024ULL << 20;
  358. break;
  359. #endif
  360. }
  361. } else {
  362. adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
  363. }
  364. gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
  365. return 0;
  366. }
  367. /*
  368. * GART
  369. * VMID 0 is the physical GPU addresses as used by the kernel.
  370. * VMIDs 1-15 are used for userspace clients and are handled
  371. * by the amdgpu vm/hsa code.
  372. */
  373. /**
  374. * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
  375. *
  376. * @adev: amdgpu_device pointer
  377. * @vmid: vm instance to flush
  378. *
  379. * Flush the TLB for the requested page table (CIK).
  380. */
  381. static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
  382. {
  383. /* bits 0-15 are the VM contexts0-15 */
  384. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  385. }
  386. static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
  387. unsigned vmid, uint64_t pd_addr)
  388. {
  389. uint32_t reg;
  390. if (vmid < 8)
  391. reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
  392. else
  393. reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
  394. amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
  395. /* bits 0-15 are the VM contexts0-15 */
  396. amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
  397. return pd_addr;
  398. }
  399. static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
  400. unsigned pasid)
  401. {
  402. amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
  403. }
  404. /**
  405. * gmc_v7_0_set_pte_pde - update the page tables using MMIO
  406. *
  407. * @adev: amdgpu_device pointer
  408. * @cpu_pt_addr: cpu address of the page table
  409. * @gpu_page_idx: entry in the page table to update
  410. * @addr: dst addr to write into pte/pde
  411. * @flags: access flags
  412. *
  413. * Update the page tables using the CPU.
  414. */
  415. static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
  416. uint32_t gpu_page_idx, uint64_t addr,
  417. uint64_t flags)
  418. {
  419. void __iomem *ptr = (void *)cpu_pt_addr;
  420. uint64_t value;
  421. value = addr & 0xFFFFFFFFFFFFF000ULL;
  422. value |= flags;
  423. writeq(value, ptr + (gpu_page_idx * 8));
  424. return 0;
  425. }
  426. static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
  427. uint32_t flags)
  428. {
  429. uint64_t pte_flag = 0;
  430. if (flags & AMDGPU_VM_PAGE_READABLE)
  431. pte_flag |= AMDGPU_PTE_READABLE;
  432. if (flags & AMDGPU_VM_PAGE_WRITEABLE)
  433. pte_flag |= AMDGPU_PTE_WRITEABLE;
  434. if (flags & AMDGPU_VM_PAGE_PRT)
  435. pte_flag |= AMDGPU_PTE_PRT;
  436. return pte_flag;
  437. }
  438. static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
  439. uint64_t *addr, uint64_t *flags)
  440. {
  441. BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
  442. }
  443. /**
  444. * gmc_v8_0_set_fault_enable_default - update VM fault handling
  445. *
  446. * @adev: amdgpu_device pointer
  447. * @value: true redirects VM faults to the default page
  448. */
  449. static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
  450. bool value)
  451. {
  452. u32 tmp;
  453. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  454. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  455. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  456. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  457. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  458. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  459. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  460. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  461. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  462. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  463. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  464. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  465. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  466. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  467. }
  468. /**
  469. * gmc_v7_0_set_prt - set PRT VM fault
  470. *
  471. * @adev: amdgpu_device pointer
  472. * @enable: enable/disable VM fault handling for PRT
  473. */
  474. static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
  475. {
  476. uint32_t tmp;
  477. if (enable && !adev->gmc.prt_warning) {
  478. dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
  479. adev->gmc.prt_warning = true;
  480. }
  481. tmp = RREG32(mmVM_PRT_CNTL);
  482. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  483. CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
  484. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  485. CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
  486. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  487. TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
  488. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  489. TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
  490. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  491. L2_CACHE_STORE_INVALID_ENTRIES, enable);
  492. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  493. L1_TLB_STORE_INVALID_ENTRIES, enable);
  494. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  495. MASK_PDE0_FAULT, enable);
  496. WREG32(mmVM_PRT_CNTL, tmp);
  497. if (enable) {
  498. uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
  499. uint32_t high = adev->vm_manager.max_pfn -
  500. (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
  501. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
  502. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
  503. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
  504. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
  505. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
  506. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
  507. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
  508. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
  509. } else {
  510. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
  511. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
  512. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
  513. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
  514. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
  515. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
  516. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
  517. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
  518. }
  519. }
  520. /**
  521. * gmc_v7_0_gart_enable - gart enable
  522. *
  523. * @adev: amdgpu_device pointer
  524. *
  525. * This sets up the TLBs, programs the page tables for VMID0,
  526. * sets up the hw for VMIDs 1-15 which are allocated on
  527. * demand, and sets up the global locations for the LDS, GDS,
  528. * and GPUVM for FSA64 clients (CIK).
  529. * Returns 0 for success, errors for failure.
  530. */
  531. static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
  532. {
  533. int r, i;
  534. u32 tmp, field;
  535. if (adev->gart.robj == NULL) {
  536. dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
  537. return -EINVAL;
  538. }
  539. r = amdgpu_gart_table_vram_pin(adev);
  540. if (r)
  541. return r;
  542. /* Setup TLB control */
  543. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  544. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  545. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
  546. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
  547. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
  548. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
  549. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  550. /* Setup L2 cache */
  551. tmp = RREG32(mmVM_L2_CNTL);
  552. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  553. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
  554. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
  555. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
  556. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
  557. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
  558. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
  559. WREG32(mmVM_L2_CNTL, tmp);
  560. tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  561. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  562. WREG32(mmVM_L2_CNTL2, tmp);
  563. field = adev->vm_manager.fragment_size;
  564. tmp = RREG32(mmVM_L2_CNTL3);
  565. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
  566. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
  567. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
  568. WREG32(mmVM_L2_CNTL3, tmp);
  569. /* setup context0 */
  570. WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
  571. WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
  572. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
  573. WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  574. (u32)(adev->dummy_page_addr >> 12));
  575. WREG32(mmVM_CONTEXT0_CNTL2, 0);
  576. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  577. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  578. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  579. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  580. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  581. WREG32(0x575, 0);
  582. WREG32(0x576, 0);
  583. WREG32(0x577, 0);
  584. /* empty context1-15 */
  585. /* FIXME start with 4G, once using 2 level pt switch to full
  586. * vm size space
  587. */
  588. /* set vm size, must be a multiple of 4 */
  589. WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  590. WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
  591. for (i = 1; i < 16; i++) {
  592. if (i < 8)
  593. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
  594. adev->gart.table_addr >> 12);
  595. else
  596. WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
  597. adev->gart.table_addr >> 12);
  598. }
  599. /* enable context1-15 */
  600. WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  601. (u32)(adev->dummy_page_addr >> 12));
  602. WREG32(mmVM_CONTEXT1_CNTL2, 4);
  603. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  604. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
  605. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
  606. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
  607. adev->vm_manager.block_size - 9);
  608. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  609. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
  610. gmc_v7_0_set_fault_enable_default(adev, false);
  611. else
  612. gmc_v7_0_set_fault_enable_default(adev, true);
  613. if (adev->asic_type == CHIP_KAVERI) {
  614. tmp = RREG32(mmCHUB_CONTROL);
  615. tmp &= ~BYPASS_VM;
  616. WREG32(mmCHUB_CONTROL, tmp);
  617. }
  618. gmc_v7_0_flush_gpu_tlb(adev, 0);
  619. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  620. (unsigned)(adev->gmc.gart_size >> 20),
  621. (unsigned long long)adev->gart.table_addr);
  622. adev->gart.ready = true;
  623. return 0;
  624. }
  625. static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
  626. {
  627. int r;
  628. if (adev->gart.robj) {
  629. WARN(1, "R600 PCIE GART already initialized\n");
  630. return 0;
  631. }
  632. /* Initialize common gart structure */
  633. r = amdgpu_gart_init(adev);
  634. if (r)
  635. return r;
  636. adev->gart.table_size = adev->gart.num_gpu_pages * 8;
  637. adev->gart.gart_pte_flags = 0;
  638. return amdgpu_gart_table_vram_alloc(adev);
  639. }
  640. /**
  641. * gmc_v7_0_gart_disable - gart disable
  642. *
  643. * @adev: amdgpu_device pointer
  644. *
  645. * This disables all VM page table (CIK).
  646. */
  647. static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
  648. {
  649. u32 tmp;
  650. /* Disable all tables */
  651. WREG32(mmVM_CONTEXT0_CNTL, 0);
  652. WREG32(mmVM_CONTEXT1_CNTL, 0);
  653. /* Setup TLB control */
  654. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  655. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  656. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
  657. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
  658. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  659. /* Setup L2 cache */
  660. tmp = RREG32(mmVM_L2_CNTL);
  661. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  662. WREG32(mmVM_L2_CNTL, tmp);
  663. WREG32(mmVM_L2_CNTL2, 0);
  664. amdgpu_gart_table_vram_unpin(adev);
  665. }
  666. /**
  667. * gmc_v7_0_gart_fini - vm fini callback
  668. *
  669. * @adev: amdgpu_device pointer
  670. *
  671. * Tears down the driver GART/VM setup (CIK).
  672. */
  673. static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
  674. {
  675. amdgpu_gart_table_vram_free(adev);
  676. amdgpu_gart_fini(adev);
  677. }
  678. /**
  679. * gmc_v7_0_vm_decode_fault - print human readable fault info
  680. *
  681. * @adev: amdgpu_device pointer
  682. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  683. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  684. *
  685. * Print human readable fault information (CIK).
  686. */
  687. static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
  688. u32 addr, u32 mc_client, unsigned pasid)
  689. {
  690. u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
  691. u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  692. PROTECTIONS);
  693. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  694. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  695. u32 mc_id;
  696. mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  697. MEMORY_CLIENT_ID);
  698. dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  699. protections, vmid, pasid, addr,
  700. REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  701. MEMORY_CLIENT_RW) ?
  702. "write" : "read", block, mc_client, mc_id);
  703. }
  704. static const u32 mc_cg_registers[] = {
  705. mmMC_HUB_MISC_HUB_CG,
  706. mmMC_HUB_MISC_SIP_CG,
  707. mmMC_HUB_MISC_VM_CG,
  708. mmMC_XPB_CLK_GAT,
  709. mmATC_MISC_CG,
  710. mmMC_CITF_MISC_WR_CG,
  711. mmMC_CITF_MISC_RD_CG,
  712. mmMC_CITF_MISC_VM_CG,
  713. mmVM_L2_CG,
  714. };
  715. static const u32 mc_cg_ls_en[] = {
  716. MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
  717. MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
  718. MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  719. MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
  720. ATC_MISC_CG__MEM_LS_ENABLE_MASK,
  721. MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
  722. MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
  723. MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  724. VM_L2_CG__MEM_LS_ENABLE_MASK,
  725. };
  726. static const u32 mc_cg_en[] = {
  727. MC_HUB_MISC_HUB_CG__ENABLE_MASK,
  728. MC_HUB_MISC_SIP_CG__ENABLE_MASK,
  729. MC_HUB_MISC_VM_CG__ENABLE_MASK,
  730. MC_XPB_CLK_GAT__ENABLE_MASK,
  731. ATC_MISC_CG__ENABLE_MASK,
  732. MC_CITF_MISC_WR_CG__ENABLE_MASK,
  733. MC_CITF_MISC_RD_CG__ENABLE_MASK,
  734. MC_CITF_MISC_VM_CG__ENABLE_MASK,
  735. VM_L2_CG__ENABLE_MASK,
  736. };
  737. static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
  738. bool enable)
  739. {
  740. int i;
  741. u32 orig, data;
  742. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  743. orig = data = RREG32(mc_cg_registers[i]);
  744. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
  745. data |= mc_cg_ls_en[i];
  746. else
  747. data &= ~mc_cg_ls_en[i];
  748. if (data != orig)
  749. WREG32(mc_cg_registers[i], data);
  750. }
  751. }
  752. static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
  753. bool enable)
  754. {
  755. int i;
  756. u32 orig, data;
  757. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  758. orig = data = RREG32(mc_cg_registers[i]);
  759. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
  760. data |= mc_cg_en[i];
  761. else
  762. data &= ~mc_cg_en[i];
  763. if (data != orig)
  764. WREG32(mc_cg_registers[i], data);
  765. }
  766. }
  767. static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
  768. bool enable)
  769. {
  770. u32 orig, data;
  771. orig = data = RREG32_PCIE(ixPCIE_CNTL2);
  772. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
  773. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
  774. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
  775. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
  776. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
  777. } else {
  778. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
  779. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
  780. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
  781. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
  782. }
  783. if (orig != data)
  784. WREG32_PCIE(ixPCIE_CNTL2, data);
  785. }
  786. static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
  787. bool enable)
  788. {
  789. u32 orig, data;
  790. orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
  791. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
  792. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
  793. else
  794. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
  795. if (orig != data)
  796. WREG32(mmHDP_HOST_PATH_CNTL, data);
  797. }
  798. static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
  799. bool enable)
  800. {
  801. u32 orig, data;
  802. orig = data = RREG32(mmHDP_MEM_POWER_LS);
  803. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  804. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
  805. else
  806. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
  807. if (orig != data)
  808. WREG32(mmHDP_MEM_POWER_LS, data);
  809. }
  810. static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
  811. {
  812. switch (mc_seq_vram_type) {
  813. case MC_SEQ_MISC0__MT__GDDR1:
  814. return AMDGPU_VRAM_TYPE_GDDR1;
  815. case MC_SEQ_MISC0__MT__DDR2:
  816. return AMDGPU_VRAM_TYPE_DDR2;
  817. case MC_SEQ_MISC0__MT__GDDR3:
  818. return AMDGPU_VRAM_TYPE_GDDR3;
  819. case MC_SEQ_MISC0__MT__GDDR4:
  820. return AMDGPU_VRAM_TYPE_GDDR4;
  821. case MC_SEQ_MISC0__MT__GDDR5:
  822. return AMDGPU_VRAM_TYPE_GDDR5;
  823. case MC_SEQ_MISC0__MT__HBM:
  824. return AMDGPU_VRAM_TYPE_HBM;
  825. case MC_SEQ_MISC0__MT__DDR3:
  826. return AMDGPU_VRAM_TYPE_DDR3;
  827. default:
  828. return AMDGPU_VRAM_TYPE_UNKNOWN;
  829. }
  830. }
  831. static int gmc_v7_0_early_init(void *handle)
  832. {
  833. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  834. gmc_v7_0_set_gmc_funcs(adev);
  835. gmc_v7_0_set_irq_funcs(adev);
  836. adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
  837. adev->gmc.shared_aperture_end =
  838. adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
  839. adev->gmc.private_aperture_start =
  840. adev->gmc.shared_aperture_end + 1;
  841. adev->gmc.private_aperture_end =
  842. adev->gmc.private_aperture_start + (4ULL << 30) - 1;
  843. return 0;
  844. }
  845. static int gmc_v7_0_late_init(void *handle)
  846. {
  847. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  848. amdgpu_bo_late_init(adev);
  849. if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
  850. return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
  851. else
  852. return 0;
  853. }
  854. static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
  855. {
  856. u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
  857. unsigned size;
  858. if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
  859. size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
  860. } else {
  861. u32 viewport = RREG32(mmVIEWPORT_SIZE);
  862. size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
  863. REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
  864. 4);
  865. }
  866. /* return 0 if the pre-OS buffer uses up most of vram */
  867. if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
  868. return 0;
  869. return size;
  870. }
  871. static int gmc_v7_0_sw_init(void *handle)
  872. {
  873. int r;
  874. int dma_bits;
  875. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  876. if (adev->flags & AMD_IS_APU) {
  877. adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
  878. } else {
  879. u32 tmp = RREG32(mmMC_SEQ_MISC0);
  880. tmp &= MC_SEQ_MISC0__MT__MASK;
  881. adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
  882. }
  883. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
  884. if (r)
  885. return r;
  886. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
  887. if (r)
  888. return r;
  889. /* Adjust VM size here.
  890. * Currently set to 4GB ((1 << 20) 4k pages).
  891. * Max GPUVM size for cayman and SI is 40 bits.
  892. */
  893. amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
  894. /* Set the internal MC address mask
  895. * This is the max address of the GPU's
  896. * internal address space.
  897. */
  898. adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
  899. /* set DMA mask + need_dma32 flags.
  900. * PCIE - can handle 40-bits.
  901. * IGP - can handle 40-bits
  902. * PCI - dma32 for legacy pci gart, 40 bits on newer asics
  903. */
  904. adev->need_dma32 = false;
  905. dma_bits = adev->need_dma32 ? 32 : 40;
  906. r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  907. if (r) {
  908. adev->need_dma32 = true;
  909. dma_bits = 32;
  910. pr_warn("amdgpu: No suitable DMA available\n");
  911. }
  912. r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  913. if (r) {
  914. pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
  915. pr_warn("amdgpu: No coherent DMA available\n");
  916. }
  917. adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
  918. r = gmc_v7_0_init_microcode(adev);
  919. if (r) {
  920. DRM_ERROR("Failed to load mc firmware!\n");
  921. return r;
  922. }
  923. r = gmc_v7_0_mc_init(adev);
  924. if (r)
  925. return r;
  926. adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
  927. /* Memory manager */
  928. r = amdgpu_bo_init(adev);
  929. if (r)
  930. return r;
  931. r = gmc_v7_0_gart_init(adev);
  932. if (r)
  933. return r;
  934. /*
  935. * number of VMs
  936. * VMID 0 is reserved for System
  937. * amdgpu graphics/compute will use VMIDs 1-7
  938. * amdkfd will use VMIDs 8-15
  939. */
  940. adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
  941. amdgpu_vm_manager_init(adev);
  942. /* base offset of vram pages */
  943. if (adev->flags & AMD_IS_APU) {
  944. u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
  945. tmp <<= 22;
  946. adev->vm_manager.vram_base_offset = tmp;
  947. } else {
  948. adev->vm_manager.vram_base_offset = 0;
  949. }
  950. return 0;
  951. }
  952. static int gmc_v7_0_sw_fini(void *handle)
  953. {
  954. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  955. amdgpu_gem_force_release(adev);
  956. amdgpu_vm_manager_fini(adev);
  957. gmc_v7_0_gart_fini(adev);
  958. amdgpu_bo_fini(adev);
  959. release_firmware(adev->gmc.fw);
  960. adev->gmc.fw = NULL;
  961. return 0;
  962. }
  963. static int gmc_v7_0_hw_init(void *handle)
  964. {
  965. int r;
  966. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  967. gmc_v7_0_init_golden_registers(adev);
  968. gmc_v7_0_mc_program(adev);
  969. if (!(adev->flags & AMD_IS_APU)) {
  970. r = gmc_v7_0_mc_load_microcode(adev);
  971. if (r) {
  972. DRM_ERROR("Failed to load MC firmware!\n");
  973. return r;
  974. }
  975. }
  976. r = gmc_v7_0_gart_enable(adev);
  977. if (r)
  978. return r;
  979. return r;
  980. }
  981. static int gmc_v7_0_hw_fini(void *handle)
  982. {
  983. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  984. amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
  985. gmc_v7_0_gart_disable(adev);
  986. return 0;
  987. }
  988. static int gmc_v7_0_suspend(void *handle)
  989. {
  990. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  991. gmc_v7_0_hw_fini(adev);
  992. return 0;
  993. }
  994. static int gmc_v7_0_resume(void *handle)
  995. {
  996. int r;
  997. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  998. r = gmc_v7_0_hw_init(adev);
  999. if (r)
  1000. return r;
  1001. amdgpu_vmid_reset_all(adev);
  1002. return 0;
  1003. }
  1004. static bool gmc_v7_0_is_idle(void *handle)
  1005. {
  1006. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1007. u32 tmp = RREG32(mmSRBM_STATUS);
  1008. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1009. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
  1010. return false;
  1011. return true;
  1012. }
  1013. static int gmc_v7_0_wait_for_idle(void *handle)
  1014. {
  1015. unsigned i;
  1016. u32 tmp;
  1017. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1018. for (i = 0; i < adev->usec_timeout; i++) {
  1019. /* read MC_STATUS */
  1020. tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
  1021. SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1022. SRBM_STATUS__MCC_BUSY_MASK |
  1023. SRBM_STATUS__MCD_BUSY_MASK |
  1024. SRBM_STATUS__VMC_BUSY_MASK);
  1025. if (!tmp)
  1026. return 0;
  1027. udelay(1);
  1028. }
  1029. return -ETIMEDOUT;
  1030. }
  1031. static int gmc_v7_0_soft_reset(void *handle)
  1032. {
  1033. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1034. u32 srbm_soft_reset = 0;
  1035. u32 tmp = RREG32(mmSRBM_STATUS);
  1036. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  1037. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1038. SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
  1039. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1040. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
  1041. if (!(adev->flags & AMD_IS_APU))
  1042. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1043. SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
  1044. }
  1045. if (srbm_soft_reset) {
  1046. gmc_v7_0_mc_stop(adev);
  1047. if (gmc_v7_0_wait_for_idle((void *)adev)) {
  1048. dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
  1049. }
  1050. tmp = RREG32(mmSRBM_SOFT_RESET);
  1051. tmp |= srbm_soft_reset;
  1052. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  1053. WREG32(mmSRBM_SOFT_RESET, tmp);
  1054. tmp = RREG32(mmSRBM_SOFT_RESET);
  1055. udelay(50);
  1056. tmp &= ~srbm_soft_reset;
  1057. WREG32(mmSRBM_SOFT_RESET, tmp);
  1058. tmp = RREG32(mmSRBM_SOFT_RESET);
  1059. /* Wait a little for things to settle down */
  1060. udelay(50);
  1061. gmc_v7_0_mc_resume(adev);
  1062. udelay(50);
  1063. }
  1064. return 0;
  1065. }
  1066. static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  1067. struct amdgpu_irq_src *src,
  1068. unsigned type,
  1069. enum amdgpu_interrupt_state state)
  1070. {
  1071. u32 tmp;
  1072. u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1073. VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1074. VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1075. VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1076. VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1077. VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
  1078. switch (state) {
  1079. case AMDGPU_IRQ_STATE_DISABLE:
  1080. /* system context */
  1081. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1082. tmp &= ~bits;
  1083. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1084. /* VMs */
  1085. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1086. tmp &= ~bits;
  1087. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1088. break;
  1089. case AMDGPU_IRQ_STATE_ENABLE:
  1090. /* system context */
  1091. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1092. tmp |= bits;
  1093. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1094. /* VMs */
  1095. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1096. tmp |= bits;
  1097. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1098. break;
  1099. default:
  1100. break;
  1101. }
  1102. return 0;
  1103. }
  1104. static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
  1105. struct amdgpu_irq_src *source,
  1106. struct amdgpu_iv_entry *entry)
  1107. {
  1108. u32 addr, status, mc_client;
  1109. addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
  1110. status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
  1111. mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  1112. /* reset addr and status */
  1113. WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
  1114. if (!addr && !status)
  1115. return 0;
  1116. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
  1117. gmc_v7_0_set_fault_enable_default(adev, false);
  1118. if (printk_ratelimit()) {
  1119. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  1120. entry->src_id, entry->src_data[0]);
  1121. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  1122. addr);
  1123. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  1124. status);
  1125. gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
  1126. entry->pasid);
  1127. }
  1128. return 0;
  1129. }
  1130. static int gmc_v7_0_set_clockgating_state(void *handle,
  1131. enum amd_clockgating_state state)
  1132. {
  1133. bool gate = false;
  1134. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1135. if (state == AMD_CG_STATE_GATE)
  1136. gate = true;
  1137. if (!(adev->flags & AMD_IS_APU)) {
  1138. gmc_v7_0_enable_mc_mgcg(adev, gate);
  1139. gmc_v7_0_enable_mc_ls(adev, gate);
  1140. }
  1141. gmc_v7_0_enable_bif_mgls(adev, gate);
  1142. gmc_v7_0_enable_hdp_mgcg(adev, gate);
  1143. gmc_v7_0_enable_hdp_ls(adev, gate);
  1144. return 0;
  1145. }
  1146. static int gmc_v7_0_set_powergating_state(void *handle,
  1147. enum amd_powergating_state state)
  1148. {
  1149. return 0;
  1150. }
  1151. static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
  1152. .name = "gmc_v7_0",
  1153. .early_init = gmc_v7_0_early_init,
  1154. .late_init = gmc_v7_0_late_init,
  1155. .sw_init = gmc_v7_0_sw_init,
  1156. .sw_fini = gmc_v7_0_sw_fini,
  1157. .hw_init = gmc_v7_0_hw_init,
  1158. .hw_fini = gmc_v7_0_hw_fini,
  1159. .suspend = gmc_v7_0_suspend,
  1160. .resume = gmc_v7_0_resume,
  1161. .is_idle = gmc_v7_0_is_idle,
  1162. .wait_for_idle = gmc_v7_0_wait_for_idle,
  1163. .soft_reset = gmc_v7_0_soft_reset,
  1164. .set_clockgating_state = gmc_v7_0_set_clockgating_state,
  1165. .set_powergating_state = gmc_v7_0_set_powergating_state,
  1166. };
  1167. static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
  1168. .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
  1169. .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
  1170. .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
  1171. .set_pte_pde = gmc_v7_0_set_pte_pde,
  1172. .set_prt = gmc_v7_0_set_prt,
  1173. .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
  1174. .get_vm_pde = gmc_v7_0_get_vm_pde
  1175. };
  1176. static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
  1177. .set = gmc_v7_0_vm_fault_interrupt_state,
  1178. .process = gmc_v7_0_process_interrupt,
  1179. };
  1180. static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
  1181. {
  1182. if (adev->gmc.gmc_funcs == NULL)
  1183. adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
  1184. }
  1185. static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
  1186. {
  1187. adev->gmc.vm_fault.num_types = 1;
  1188. adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
  1189. }
  1190. const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
  1191. {
  1192. .type = AMD_IP_BLOCK_TYPE_GMC,
  1193. .major = 7,
  1194. .minor = 0,
  1195. .rev = 0,
  1196. .funcs = &gmc_v7_0_ip_funcs,
  1197. };
  1198. const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
  1199. {
  1200. .type = AMD_IP_BLOCK_TYPE_GMC,
  1201. .major = 7,
  1202. .minor = 4,
  1203. .rev = 0,
  1204. .funcs = &gmc_v7_0_ip_funcs,
  1205. };