gmc_v8_0.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "gmc_v8_0.h"
  27. #include "amdgpu_ucode.h"
  28. #include "gmc/gmc_8_1_d.h"
  29. #include "gmc/gmc_8_1_sh_mask.h"
  30. #include "bif/bif_5_0_d.h"
  31. #include "bif/bif_5_0_sh_mask.h"
  32. #include "oss/oss_3_0_d.h"
  33. #include "oss/oss_3_0_sh_mask.h"
  34. #include "vid.h"
  35. #include "vi.h"
  36. static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
  37. static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  38. static int gmc_v8_0_wait_for_idle(void *handle);
  39. MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
  40. MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
  41. MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
  42. MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
  43. static const u32 golden_settings_tonga_a11[] =
  44. {
  45. mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  46. mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  47. mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  48. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  49. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  50. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  51. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  52. };
  53. static const u32 tonga_mgcg_cgcg_init[] =
  54. {
  55. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  56. };
  57. static const u32 golden_settings_fiji_a10[] =
  58. {
  59. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  60. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  61. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  62. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  63. };
  64. static const u32 fiji_mgcg_cgcg_init[] =
  65. {
  66. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  67. };
  68. static const u32 golden_settings_polaris11_a11[] =
  69. {
  70. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  71. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  72. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  73. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  74. };
  75. static const u32 golden_settings_polaris10_a11[] =
  76. {
  77. mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  78. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  79. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  80. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  81. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  82. };
  83. static const u32 cz_mgcg_cgcg_init[] =
  84. {
  85. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  86. };
  87. static const u32 stoney_mgcg_cgcg_init[] =
  88. {
  89. mmATC_MISC_CG, 0xffffffff, 0x000c0200,
  90. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  91. };
  92. static const u32 golden_settings_stoney_common[] =
  93. {
  94. mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
  95. mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
  96. };
  97. static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
  98. {
  99. switch (adev->asic_type) {
  100. case CHIP_FIJI:
  101. amdgpu_program_register_sequence(adev,
  102. fiji_mgcg_cgcg_init,
  103. (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
  104. amdgpu_program_register_sequence(adev,
  105. golden_settings_fiji_a10,
  106. (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
  107. break;
  108. case CHIP_TONGA:
  109. amdgpu_program_register_sequence(adev,
  110. tonga_mgcg_cgcg_init,
  111. (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
  112. amdgpu_program_register_sequence(adev,
  113. golden_settings_tonga_a11,
  114. (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
  115. break;
  116. case CHIP_POLARIS11:
  117. case CHIP_POLARIS12:
  118. amdgpu_program_register_sequence(adev,
  119. golden_settings_polaris11_a11,
  120. (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
  121. break;
  122. case CHIP_POLARIS10:
  123. amdgpu_program_register_sequence(adev,
  124. golden_settings_polaris10_a11,
  125. (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
  126. break;
  127. case CHIP_CARRIZO:
  128. amdgpu_program_register_sequence(adev,
  129. cz_mgcg_cgcg_init,
  130. (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
  131. break;
  132. case CHIP_STONEY:
  133. amdgpu_program_register_sequence(adev,
  134. stoney_mgcg_cgcg_init,
  135. (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
  136. amdgpu_program_register_sequence(adev,
  137. golden_settings_stoney_common,
  138. (const u32)ARRAY_SIZE(golden_settings_stoney_common));
  139. break;
  140. default:
  141. break;
  142. }
  143. }
  144. static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
  145. struct amdgpu_mode_mc_save *save)
  146. {
  147. u32 blackout;
  148. if (adev->mode_info.num_crtc)
  149. amdgpu_display_stop_mc_access(adev, save);
  150. gmc_v8_0_wait_for_idle(adev);
  151. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  152. if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  153. /* Block CPU access */
  154. WREG32(mmBIF_FB_EN, 0);
  155. /* blackout the MC */
  156. blackout = REG_SET_FIELD(blackout,
  157. MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
  158. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
  159. }
  160. /* wait for the MC to settle */
  161. udelay(100);
  162. }
  163. static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
  164. struct amdgpu_mode_mc_save *save)
  165. {
  166. u32 tmp;
  167. /* unblackout the MC */
  168. tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  169. tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  170. WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  171. /* allow CPU access */
  172. tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  173. tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  174. WREG32(mmBIF_FB_EN, tmp);
  175. if (adev->mode_info.num_crtc)
  176. amdgpu_display_resume_mc_access(adev, save);
  177. }
  178. /**
  179. * gmc_v8_0_init_microcode - load ucode images from disk
  180. *
  181. * @adev: amdgpu_device pointer
  182. *
  183. * Use the firmware interface to load the ucode images into
  184. * the driver (not loaded into hw).
  185. * Returns 0 on success, error on failure.
  186. */
  187. static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
  188. {
  189. const char *chip_name;
  190. char fw_name[30];
  191. int err;
  192. DRM_DEBUG("\n");
  193. switch (adev->asic_type) {
  194. case CHIP_TONGA:
  195. chip_name = "tonga";
  196. break;
  197. case CHIP_POLARIS11:
  198. chip_name = "polaris11";
  199. break;
  200. case CHIP_POLARIS10:
  201. chip_name = "polaris10";
  202. break;
  203. case CHIP_POLARIS12:
  204. chip_name = "polaris12";
  205. break;
  206. case CHIP_FIJI:
  207. case CHIP_CARRIZO:
  208. case CHIP_STONEY:
  209. return 0;
  210. default: BUG();
  211. }
  212. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
  213. err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
  214. if (err)
  215. goto out;
  216. err = amdgpu_ucode_validate(adev->mc.fw);
  217. out:
  218. if (err) {
  219. pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
  220. release_firmware(adev->mc.fw);
  221. adev->mc.fw = NULL;
  222. }
  223. return err;
  224. }
  225. /**
  226. * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
  227. *
  228. * @adev: amdgpu_device pointer
  229. *
  230. * Load the GDDR MC ucode into the hw (CIK).
  231. * Returns 0 on success, error on failure.
  232. */
  233. static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
  234. {
  235. const struct mc_firmware_header_v1_0 *hdr;
  236. const __le32 *fw_data = NULL;
  237. const __le32 *io_mc_regs = NULL;
  238. u32 running;
  239. int i, ucode_size, regs_size;
  240. /* Skip MC ucode loading on SR-IOV capable boards.
  241. * vbios does this for us in asic_init in that case.
  242. * Skip MC ucode loading on VF, because hypervisor will do that
  243. * for this adaptor.
  244. */
  245. if (amdgpu_sriov_bios(adev))
  246. return 0;
  247. if (!adev->mc.fw)
  248. return -EINVAL;
  249. hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
  250. amdgpu_ucode_print_mc_hdr(&hdr->header);
  251. adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  252. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  253. io_mc_regs = (const __le32 *)
  254. (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  255. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  256. fw_data = (const __le32 *)
  257. (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  258. running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
  259. if (running == 0) {
  260. /* reset the engine and set to writable */
  261. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  262. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  263. /* load mc io regs */
  264. for (i = 0; i < regs_size; i++) {
  265. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
  266. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
  267. }
  268. /* load the MC ucode */
  269. for (i = 0; i < ucode_size; i++)
  270. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
  271. /* put the engine back into the active state */
  272. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  273. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  274. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  275. /* wait for training to complete */
  276. for (i = 0; i < adev->usec_timeout; i++) {
  277. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  278. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
  279. break;
  280. udelay(1);
  281. }
  282. for (i = 0; i < adev->usec_timeout; i++) {
  283. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  284. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
  285. break;
  286. udelay(1);
  287. }
  288. }
  289. return 0;
  290. }
  291. static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
  292. {
  293. const struct mc_firmware_header_v1_0 *hdr;
  294. const __le32 *fw_data = NULL;
  295. const __le32 *io_mc_regs = NULL;
  296. u32 data, vbios_version;
  297. int i, ucode_size, regs_size;
  298. /* Skip MC ucode loading on SR-IOV capable boards.
  299. * vbios does this for us in asic_init in that case.
  300. * Skip MC ucode loading on VF, because hypervisor will do that
  301. * for this adaptor.
  302. */
  303. if (amdgpu_sriov_bios(adev))
  304. return 0;
  305. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
  306. data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
  307. vbios_version = data & 0xf;
  308. if (vbios_version == 0)
  309. return 0;
  310. if (!adev->mc.fw)
  311. return -EINVAL;
  312. hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
  313. amdgpu_ucode_print_mc_hdr(&hdr->header);
  314. adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  315. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  316. io_mc_regs = (const __le32 *)
  317. (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  318. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  319. fw_data = (const __le32 *)
  320. (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  321. data = RREG32(mmMC_SEQ_MISC0);
  322. data &= ~(0x40);
  323. WREG32(mmMC_SEQ_MISC0, data);
  324. /* load mc io regs */
  325. for (i = 0; i < regs_size; i++) {
  326. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
  327. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
  328. }
  329. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  330. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  331. /* load the MC ucode */
  332. for (i = 0; i < ucode_size; i++)
  333. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
  334. /* put the engine back into the active state */
  335. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  336. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  337. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  338. /* wait for training to complete */
  339. for (i = 0; i < adev->usec_timeout; i++) {
  340. data = RREG32(mmMC_SEQ_MISC0);
  341. if (data & 0x80)
  342. break;
  343. udelay(1);
  344. }
  345. return 0;
  346. }
  347. static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
  348. struct amdgpu_mc *mc)
  349. {
  350. if (mc->mc_vram_size > 0xFFC0000000ULL) {
  351. /* leave room for at least 1024M GTT */
  352. dev_warn(adev->dev, "limiting VRAM\n");
  353. mc->real_vram_size = 0xFFC0000000ULL;
  354. mc->mc_vram_size = 0xFFC0000000ULL;
  355. }
  356. amdgpu_vram_location(adev, &adev->mc, 0);
  357. adev->mc.gtt_base_align = 0;
  358. amdgpu_gtt_location(adev, mc);
  359. }
  360. /**
  361. * gmc_v8_0_mc_program - program the GPU memory controller
  362. *
  363. * @adev: amdgpu_device pointer
  364. *
  365. * Set the location of vram, gart, and AGP in the GPU's
  366. * physical address space (CIK).
  367. */
  368. static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
  369. {
  370. struct amdgpu_mode_mc_save save;
  371. u32 tmp;
  372. int i, j;
  373. /* Initialize HDP */
  374. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  375. WREG32((0xb05 + j), 0x00000000);
  376. WREG32((0xb06 + j), 0x00000000);
  377. WREG32((0xb07 + j), 0x00000000);
  378. WREG32((0xb08 + j), 0x00000000);
  379. WREG32((0xb09 + j), 0x00000000);
  380. }
  381. WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
  382. if (adev->mode_info.num_crtc)
  383. amdgpu_display_set_vga_render_state(adev, false);
  384. gmc_v8_0_mc_stop(adev, &save);
  385. if (gmc_v8_0_wait_for_idle((void *)adev)) {
  386. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  387. }
  388. /* Update configuration */
  389. WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  390. adev->mc.vram_start >> 12);
  391. WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  392. adev->mc.vram_end >> 12);
  393. WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  394. adev->vram_scratch.gpu_addr >> 12);
  395. tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
  396. tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
  397. WREG32(mmMC_VM_FB_LOCATION, tmp);
  398. /* XXX double check these! */
  399. WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
  400. WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  401. WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  402. WREG32(mmMC_VM_AGP_BASE, 0);
  403. WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
  404. WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
  405. if (gmc_v8_0_wait_for_idle((void *)adev)) {
  406. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  407. }
  408. gmc_v8_0_mc_resume(adev, &save);
  409. WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
  410. tmp = RREG32(mmHDP_MISC_CNTL);
  411. tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
  412. WREG32(mmHDP_MISC_CNTL, tmp);
  413. tmp = RREG32(mmHDP_HOST_PATH_CNTL);
  414. WREG32(mmHDP_HOST_PATH_CNTL, tmp);
  415. }
  416. /**
  417. * gmc_v8_0_mc_init - initialize the memory controller driver params
  418. *
  419. * @adev: amdgpu_device pointer
  420. *
  421. * Look up the amount of vram, vram width, and decide how to place
  422. * vram and gart within the GPU's physical address space (CIK).
  423. * Returns 0 for success.
  424. */
  425. static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
  426. {
  427. u32 tmp;
  428. int chansize, numchan;
  429. /* Get VRAM informations */
  430. tmp = RREG32(mmMC_ARB_RAMCFG);
  431. if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
  432. chansize = 64;
  433. } else {
  434. chansize = 32;
  435. }
  436. tmp = RREG32(mmMC_SHARED_CHMAP);
  437. switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
  438. case 0:
  439. default:
  440. numchan = 1;
  441. break;
  442. case 1:
  443. numchan = 2;
  444. break;
  445. case 2:
  446. numchan = 4;
  447. break;
  448. case 3:
  449. numchan = 8;
  450. break;
  451. case 4:
  452. numchan = 3;
  453. break;
  454. case 5:
  455. numchan = 6;
  456. break;
  457. case 6:
  458. numchan = 10;
  459. break;
  460. case 7:
  461. numchan = 12;
  462. break;
  463. case 8:
  464. numchan = 16;
  465. break;
  466. }
  467. adev->mc.vram_width = numchan * chansize;
  468. /* Could aper size report 0 ? */
  469. adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
  470. adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
  471. /* size in MB on si */
  472. adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  473. adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  474. #ifdef CONFIG_X86_64
  475. if (adev->flags & AMD_IS_APU) {
  476. adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
  477. adev->mc.aper_size = adev->mc.real_vram_size;
  478. }
  479. #endif
  480. /* In case the PCI BAR is larger than the actual amount of vram */
  481. adev->mc.visible_vram_size = adev->mc.aper_size;
  482. if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
  483. adev->mc.visible_vram_size = adev->mc.real_vram_size;
  484. /* unless the user had overridden it, set the gart
  485. * size equal to the 1024 or vram, whichever is larger.
  486. */
  487. if (amdgpu_gart_size == -1)
  488. adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
  489. else
  490. adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
  491. gmc_v8_0_vram_gtt_location(adev, &adev->mc);
  492. return 0;
  493. }
  494. /*
  495. * GART
  496. * VMID 0 is the physical GPU addresses as used by the kernel.
  497. * VMIDs 1-15 are used for userspace clients and are handled
  498. * by the amdgpu vm/hsa code.
  499. */
  500. /**
  501. * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
  502. *
  503. * @adev: amdgpu_device pointer
  504. * @vmid: vm instance to flush
  505. *
  506. * Flush the TLB for the requested page table (CIK).
  507. */
  508. static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  509. uint32_t vmid)
  510. {
  511. /* flush hdp cache */
  512. WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  513. /* bits 0-15 are the VM contexts0-15 */
  514. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  515. }
  516. /**
  517. * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
  518. *
  519. * @adev: amdgpu_device pointer
  520. * @cpu_pt_addr: cpu address of the page table
  521. * @gpu_page_idx: entry in the page table to update
  522. * @addr: dst addr to write into pte/pde
  523. * @flags: access flags
  524. *
  525. * Update the page tables using the CPU.
  526. */
  527. static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
  528. void *cpu_pt_addr,
  529. uint32_t gpu_page_idx,
  530. uint64_t addr,
  531. uint64_t flags)
  532. {
  533. void __iomem *ptr = (void *)cpu_pt_addr;
  534. uint64_t value;
  535. /*
  536. * PTE format on VI:
  537. * 63:40 reserved
  538. * 39:12 4k physical page base address
  539. * 11:7 fragment
  540. * 6 write
  541. * 5 read
  542. * 4 exe
  543. * 3 reserved
  544. * 2 snooped
  545. * 1 system
  546. * 0 valid
  547. *
  548. * PDE format on VI:
  549. * 63:59 block fragment size
  550. * 58:40 reserved
  551. * 39:1 physical base address of PTE
  552. * bits 5:1 must be 0.
  553. * 0 valid
  554. */
  555. value = addr & 0x000000FFFFFFF000ULL;
  556. value |= flags;
  557. writeq(value, ptr + (gpu_page_idx * 8));
  558. return 0;
  559. }
  560. static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
  561. uint32_t flags)
  562. {
  563. uint64_t pte_flag = 0;
  564. if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
  565. pte_flag |= AMDGPU_PTE_EXECUTABLE;
  566. if (flags & AMDGPU_VM_PAGE_READABLE)
  567. pte_flag |= AMDGPU_PTE_READABLE;
  568. if (flags & AMDGPU_VM_PAGE_WRITEABLE)
  569. pte_flag |= AMDGPU_PTE_WRITEABLE;
  570. if (flags & AMDGPU_VM_PAGE_PRT)
  571. pte_flag |= AMDGPU_PTE_PRT;
  572. return pte_flag;
  573. }
  574. /**
  575. * gmc_v8_0_set_fault_enable_default - update VM fault handling
  576. *
  577. * @adev: amdgpu_device pointer
  578. * @value: true redirects VM faults to the default page
  579. */
  580. static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
  581. bool value)
  582. {
  583. u32 tmp;
  584. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  585. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  586. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  587. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  588. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  589. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  590. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  591. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  592. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  593. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  594. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  595. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  596. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  597. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  598. EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  599. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  600. }
  601. /**
  602. * gmc_v8_0_set_prt - set PRT VM fault
  603. *
  604. * @adev: amdgpu_device pointer
  605. * @enable: enable/disable VM fault handling for PRT
  606. */
  607. static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
  608. {
  609. u32 tmp;
  610. if (enable && !adev->mc.prt_warning) {
  611. dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
  612. adev->mc.prt_warning = true;
  613. }
  614. tmp = RREG32(mmVM_PRT_CNTL);
  615. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  616. CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
  617. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  618. CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
  619. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  620. TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
  621. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  622. TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
  623. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  624. L2_CACHE_STORE_INVALID_ENTRIES, enable);
  625. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  626. L1_TLB_STORE_INVALID_ENTRIES, enable);
  627. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  628. MASK_PDE0_FAULT, enable);
  629. WREG32(mmVM_PRT_CNTL, tmp);
  630. if (enable) {
  631. uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
  632. uint32_t high = adev->vm_manager.max_pfn;
  633. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
  634. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
  635. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
  636. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
  637. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
  638. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
  639. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
  640. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
  641. } else {
  642. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
  643. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
  644. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
  645. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
  646. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
  647. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
  648. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
  649. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
  650. }
  651. }
  652. /**
  653. * gmc_v8_0_gart_enable - gart enable
  654. *
  655. * @adev: amdgpu_device pointer
  656. *
  657. * This sets up the TLBs, programs the page tables for VMID0,
  658. * sets up the hw for VMIDs 1-15 which are allocated on
  659. * demand, and sets up the global locations for the LDS, GDS,
  660. * and GPUVM for FSA64 clients (CIK).
  661. * Returns 0 for success, errors for failure.
  662. */
  663. static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
  664. {
  665. int r, i;
  666. u32 tmp;
  667. if (adev->gart.robj == NULL) {
  668. dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
  669. return -EINVAL;
  670. }
  671. r = amdgpu_gart_table_vram_pin(adev);
  672. if (r)
  673. return r;
  674. /* Setup TLB control */
  675. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  676. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  677. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
  678. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
  679. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
  680. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
  681. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  682. /* Setup L2 cache */
  683. tmp = RREG32(mmVM_L2_CNTL);
  684. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  685. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
  686. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
  687. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
  688. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
  689. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
  690. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
  691. WREG32(mmVM_L2_CNTL, tmp);
  692. tmp = RREG32(mmVM_L2_CNTL2);
  693. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  694. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  695. WREG32(mmVM_L2_CNTL2, tmp);
  696. tmp = RREG32(mmVM_L2_CNTL3);
  697. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
  698. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
  699. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
  700. WREG32(mmVM_L2_CNTL3, tmp);
  701. /* XXX: set to enable PTE/PDE in system memory */
  702. tmp = RREG32(mmVM_L2_CNTL4);
  703. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
  704. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
  705. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
  706. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
  707. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
  708. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
  709. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
  710. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
  711. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
  712. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
  713. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
  714. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
  715. WREG32(mmVM_L2_CNTL4, tmp);
  716. /* setup context0 */
  717. WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
  718. WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
  719. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
  720. WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  721. (u32)(adev->dummy_page.addr >> 12));
  722. WREG32(mmVM_CONTEXT0_CNTL2, 0);
  723. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  724. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  725. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  726. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  727. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  728. WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
  729. WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
  730. WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
  731. /* empty context1-15 */
  732. /* FIXME start with 4G, once using 2 level pt switch to full
  733. * vm size space
  734. */
  735. /* set vm size, must be a multiple of 4 */
  736. WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  737. WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
  738. for (i = 1; i < 16; i++) {
  739. if (i < 8)
  740. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
  741. adev->gart.table_addr >> 12);
  742. else
  743. WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
  744. adev->gart.table_addr >> 12);
  745. }
  746. /* enable context1-15 */
  747. WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  748. (u32)(adev->dummy_page.addr >> 12));
  749. WREG32(mmVM_CONTEXT1_CNTL2, 4);
  750. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  751. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
  752. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
  753. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  754. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  755. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  756. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  757. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  758. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  759. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  760. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
  761. amdgpu_vm_block_size - 9);
  762. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  763. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
  764. gmc_v8_0_set_fault_enable_default(adev, false);
  765. else
  766. gmc_v8_0_set_fault_enable_default(adev, true);
  767. gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
  768. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  769. (unsigned)(adev->mc.gtt_size >> 20),
  770. (unsigned long long)adev->gart.table_addr);
  771. adev->gart.ready = true;
  772. return 0;
  773. }
  774. static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
  775. {
  776. int r;
  777. if (adev->gart.robj) {
  778. WARN(1, "R600 PCIE GART already initialized\n");
  779. return 0;
  780. }
  781. /* Initialize common gart structure */
  782. r = amdgpu_gart_init(adev);
  783. if (r)
  784. return r;
  785. adev->gart.table_size = adev->gart.num_gpu_pages * 8;
  786. adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
  787. return amdgpu_gart_table_vram_alloc(adev);
  788. }
  789. /**
  790. * gmc_v8_0_gart_disable - gart disable
  791. *
  792. * @adev: amdgpu_device pointer
  793. *
  794. * This disables all VM page table (CIK).
  795. */
  796. static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  797. {
  798. u32 tmp;
  799. /* Disable all tables */
  800. WREG32(mmVM_CONTEXT0_CNTL, 0);
  801. WREG32(mmVM_CONTEXT1_CNTL, 0);
  802. /* Setup TLB control */
  803. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  804. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  805. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
  806. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
  807. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  808. /* Setup L2 cache */
  809. tmp = RREG32(mmVM_L2_CNTL);
  810. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  811. WREG32(mmVM_L2_CNTL, tmp);
  812. WREG32(mmVM_L2_CNTL2, 0);
  813. amdgpu_gart_table_vram_unpin(adev);
  814. }
  815. /**
  816. * gmc_v8_0_gart_fini - vm fini callback
  817. *
  818. * @adev: amdgpu_device pointer
  819. *
  820. * Tears down the driver GART/VM setup (CIK).
  821. */
  822. static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
  823. {
  824. amdgpu_gart_table_vram_free(adev);
  825. amdgpu_gart_fini(adev);
  826. }
  827. /*
  828. * vm
  829. * VMID 0 is the physical GPU addresses as used by the kernel.
  830. * VMIDs 1-15 are used for userspace clients and are handled
  831. * by the amdgpu vm/hsa code.
  832. */
  833. /**
  834. * gmc_v8_0_vm_init - cik vm init callback
  835. *
  836. * @adev: amdgpu_device pointer
  837. *
  838. * Inits cik specific vm parameters (number of VMs, base of vram for
  839. * VMIDs 1-15) (CIK).
  840. * Returns 0 for success.
  841. */
  842. static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
  843. {
  844. /*
  845. * number of VMs
  846. * VMID 0 is reserved for System
  847. * amdgpu graphics/compute will use VMIDs 1-7
  848. * amdkfd will use VMIDs 8-15
  849. */
  850. adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
  851. adev->vm_manager.num_level = 1;
  852. amdgpu_vm_manager_init(adev);
  853. /* base offset of vram pages */
  854. if (adev->flags & AMD_IS_APU) {
  855. u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
  856. tmp <<= 22;
  857. adev->vm_manager.vram_base_offset = tmp;
  858. } else
  859. adev->vm_manager.vram_base_offset = 0;
  860. return 0;
  861. }
  862. /**
  863. * gmc_v8_0_vm_fini - cik vm fini callback
  864. *
  865. * @adev: amdgpu_device pointer
  866. *
  867. * Tear down any asic specific VM setup (CIK).
  868. */
  869. static void gmc_v8_0_vm_fini(struct amdgpu_device *adev)
  870. {
  871. }
  872. /**
  873. * gmc_v8_0_vm_decode_fault - print human readable fault info
  874. *
  875. * @adev: amdgpu_device pointer
  876. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  877. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  878. *
  879. * Print human readable fault information (CIK).
  880. */
  881. static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
  882. u32 status, u32 addr, u32 mc_client)
  883. {
  884. u32 mc_id;
  885. u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
  886. u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  887. PROTECTIONS);
  888. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  889. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  890. mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  891. MEMORY_CLIENT_ID);
  892. dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  893. protections, vmid, addr,
  894. REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  895. MEMORY_CLIENT_RW) ?
  896. "write" : "read", block, mc_client, mc_id);
  897. }
  898. static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
  899. {
  900. switch (mc_seq_vram_type) {
  901. case MC_SEQ_MISC0__MT__GDDR1:
  902. return AMDGPU_VRAM_TYPE_GDDR1;
  903. case MC_SEQ_MISC0__MT__DDR2:
  904. return AMDGPU_VRAM_TYPE_DDR2;
  905. case MC_SEQ_MISC0__MT__GDDR3:
  906. return AMDGPU_VRAM_TYPE_GDDR3;
  907. case MC_SEQ_MISC0__MT__GDDR4:
  908. return AMDGPU_VRAM_TYPE_GDDR4;
  909. case MC_SEQ_MISC0__MT__GDDR5:
  910. return AMDGPU_VRAM_TYPE_GDDR5;
  911. case MC_SEQ_MISC0__MT__HBM:
  912. return AMDGPU_VRAM_TYPE_HBM;
  913. case MC_SEQ_MISC0__MT__DDR3:
  914. return AMDGPU_VRAM_TYPE_DDR3;
  915. default:
  916. return AMDGPU_VRAM_TYPE_UNKNOWN;
  917. }
  918. }
  919. static int gmc_v8_0_early_init(void *handle)
  920. {
  921. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  922. gmc_v8_0_set_gart_funcs(adev);
  923. gmc_v8_0_set_irq_funcs(adev);
  924. adev->mc.shared_aperture_start = 0x2000000000000000ULL;
  925. adev->mc.shared_aperture_end =
  926. adev->mc.shared_aperture_start + (4ULL << 30) - 1;
  927. adev->mc.private_aperture_start =
  928. adev->mc.shared_aperture_end + 1;
  929. adev->mc.private_aperture_end =
  930. adev->mc.private_aperture_start + (4ULL << 30) - 1;
  931. return 0;
  932. }
  933. static int gmc_v8_0_late_init(void *handle)
  934. {
  935. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  936. if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
  937. return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
  938. else
  939. return 0;
  940. }
  941. #define mmMC_SEQ_MISC0_FIJI 0xA71
  942. static int gmc_v8_0_sw_init(void *handle)
  943. {
  944. int r;
  945. int dma_bits;
  946. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  947. if (adev->flags & AMD_IS_APU) {
  948. adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
  949. } else {
  950. u32 tmp;
  951. if (adev->asic_type == CHIP_FIJI)
  952. tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
  953. else
  954. tmp = RREG32(mmMC_SEQ_MISC0);
  955. tmp &= MC_SEQ_MISC0__MT__MASK;
  956. adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
  957. }
  958. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
  959. if (r)
  960. return r;
  961. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
  962. if (r)
  963. return r;
  964. /* Adjust VM size here.
  965. * Currently set to 4GB ((1 << 20) 4k pages).
  966. * Max GPUVM size for cayman and SI is 40 bits.
  967. */
  968. adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
  969. /* Set the internal MC address mask
  970. * This is the max address of the GPU's
  971. * internal address space.
  972. */
  973. adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
  974. /* set DMA mask + need_dma32 flags.
  975. * PCIE - can handle 40-bits.
  976. * IGP - can handle 40-bits
  977. * PCI - dma32 for legacy pci gart, 40 bits on newer asics
  978. */
  979. adev->need_dma32 = false;
  980. dma_bits = adev->need_dma32 ? 32 : 40;
  981. r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  982. if (r) {
  983. adev->need_dma32 = true;
  984. dma_bits = 32;
  985. pr_warn("amdgpu: No suitable DMA available\n");
  986. }
  987. r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  988. if (r) {
  989. pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
  990. pr_warn("amdgpu: No coherent DMA available\n");
  991. }
  992. r = gmc_v8_0_init_microcode(adev);
  993. if (r) {
  994. DRM_ERROR("Failed to load mc firmware!\n");
  995. return r;
  996. }
  997. r = gmc_v8_0_mc_init(adev);
  998. if (r)
  999. return r;
  1000. /* Memory manager */
  1001. r = amdgpu_bo_init(adev);
  1002. if (r)
  1003. return r;
  1004. r = gmc_v8_0_gart_init(adev);
  1005. if (r)
  1006. return r;
  1007. if (!adev->vm_manager.enabled) {
  1008. r = gmc_v8_0_vm_init(adev);
  1009. if (r) {
  1010. dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
  1011. return r;
  1012. }
  1013. adev->vm_manager.enabled = true;
  1014. }
  1015. return r;
  1016. }
  1017. static int gmc_v8_0_sw_fini(void *handle)
  1018. {
  1019. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1020. if (adev->vm_manager.enabled) {
  1021. amdgpu_vm_manager_fini(adev);
  1022. gmc_v8_0_vm_fini(adev);
  1023. adev->vm_manager.enabled = false;
  1024. }
  1025. gmc_v8_0_gart_fini(adev);
  1026. amdgpu_gem_force_release(adev);
  1027. amdgpu_bo_fini(adev);
  1028. return 0;
  1029. }
  1030. static int gmc_v8_0_hw_init(void *handle)
  1031. {
  1032. int r;
  1033. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1034. gmc_v8_0_init_golden_registers(adev);
  1035. gmc_v8_0_mc_program(adev);
  1036. if (adev->asic_type == CHIP_TONGA) {
  1037. r = gmc_v8_0_tonga_mc_load_microcode(adev);
  1038. if (r) {
  1039. DRM_ERROR("Failed to load MC firmware!\n");
  1040. return r;
  1041. }
  1042. } else if (adev->asic_type == CHIP_POLARIS11 ||
  1043. adev->asic_type == CHIP_POLARIS10 ||
  1044. adev->asic_type == CHIP_POLARIS12) {
  1045. r = gmc_v8_0_polaris_mc_load_microcode(adev);
  1046. if (r) {
  1047. DRM_ERROR("Failed to load MC firmware!\n");
  1048. return r;
  1049. }
  1050. }
  1051. r = gmc_v8_0_gart_enable(adev);
  1052. if (r)
  1053. return r;
  1054. return r;
  1055. }
  1056. static int gmc_v8_0_hw_fini(void *handle)
  1057. {
  1058. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1059. amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
  1060. gmc_v8_0_gart_disable(adev);
  1061. return 0;
  1062. }
  1063. static int gmc_v8_0_suspend(void *handle)
  1064. {
  1065. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1066. if (adev->vm_manager.enabled) {
  1067. gmc_v8_0_vm_fini(adev);
  1068. adev->vm_manager.enabled = false;
  1069. }
  1070. gmc_v8_0_hw_fini(adev);
  1071. return 0;
  1072. }
  1073. static int gmc_v8_0_resume(void *handle)
  1074. {
  1075. int r;
  1076. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1077. r = gmc_v8_0_hw_init(adev);
  1078. if (r)
  1079. return r;
  1080. if (!adev->vm_manager.enabled) {
  1081. r = gmc_v8_0_vm_init(adev);
  1082. if (r) {
  1083. dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
  1084. return r;
  1085. }
  1086. adev->vm_manager.enabled = true;
  1087. }
  1088. return r;
  1089. }
  1090. static bool gmc_v8_0_is_idle(void *handle)
  1091. {
  1092. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1093. u32 tmp = RREG32(mmSRBM_STATUS);
  1094. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1095. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
  1096. return false;
  1097. return true;
  1098. }
  1099. static int gmc_v8_0_wait_for_idle(void *handle)
  1100. {
  1101. unsigned i;
  1102. u32 tmp;
  1103. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1104. for (i = 0; i < adev->usec_timeout; i++) {
  1105. /* read MC_STATUS */
  1106. tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
  1107. SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1108. SRBM_STATUS__MCC_BUSY_MASK |
  1109. SRBM_STATUS__MCD_BUSY_MASK |
  1110. SRBM_STATUS__VMC_BUSY_MASK |
  1111. SRBM_STATUS__VMC1_BUSY_MASK);
  1112. if (!tmp)
  1113. return 0;
  1114. udelay(1);
  1115. }
  1116. return -ETIMEDOUT;
  1117. }
  1118. static bool gmc_v8_0_check_soft_reset(void *handle)
  1119. {
  1120. u32 srbm_soft_reset = 0;
  1121. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1122. u32 tmp = RREG32(mmSRBM_STATUS);
  1123. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  1124. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1125. SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
  1126. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1127. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
  1128. if (!(adev->flags & AMD_IS_APU))
  1129. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1130. SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
  1131. }
  1132. if (srbm_soft_reset) {
  1133. adev->mc.srbm_soft_reset = srbm_soft_reset;
  1134. return true;
  1135. } else {
  1136. adev->mc.srbm_soft_reset = 0;
  1137. return false;
  1138. }
  1139. }
  1140. static int gmc_v8_0_pre_soft_reset(void *handle)
  1141. {
  1142. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1143. if (!adev->mc.srbm_soft_reset)
  1144. return 0;
  1145. gmc_v8_0_mc_stop(adev, &adev->mc.save);
  1146. if (gmc_v8_0_wait_for_idle(adev)) {
  1147. dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
  1148. }
  1149. return 0;
  1150. }
  1151. static int gmc_v8_0_soft_reset(void *handle)
  1152. {
  1153. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1154. u32 srbm_soft_reset;
  1155. if (!adev->mc.srbm_soft_reset)
  1156. return 0;
  1157. srbm_soft_reset = adev->mc.srbm_soft_reset;
  1158. if (srbm_soft_reset) {
  1159. u32 tmp;
  1160. tmp = RREG32(mmSRBM_SOFT_RESET);
  1161. tmp |= srbm_soft_reset;
  1162. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  1163. WREG32(mmSRBM_SOFT_RESET, tmp);
  1164. tmp = RREG32(mmSRBM_SOFT_RESET);
  1165. udelay(50);
  1166. tmp &= ~srbm_soft_reset;
  1167. WREG32(mmSRBM_SOFT_RESET, tmp);
  1168. tmp = RREG32(mmSRBM_SOFT_RESET);
  1169. /* Wait a little for things to settle down */
  1170. udelay(50);
  1171. }
  1172. return 0;
  1173. }
  1174. static int gmc_v8_0_post_soft_reset(void *handle)
  1175. {
  1176. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1177. if (!adev->mc.srbm_soft_reset)
  1178. return 0;
  1179. gmc_v8_0_mc_resume(adev, &adev->mc.save);
  1180. return 0;
  1181. }
  1182. static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  1183. struct amdgpu_irq_src *src,
  1184. unsigned type,
  1185. enum amdgpu_interrupt_state state)
  1186. {
  1187. u32 tmp;
  1188. u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1189. VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1190. VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1191. VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1192. VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1193. VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1194. VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
  1195. switch (state) {
  1196. case AMDGPU_IRQ_STATE_DISABLE:
  1197. /* system context */
  1198. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1199. tmp &= ~bits;
  1200. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1201. /* VMs */
  1202. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1203. tmp &= ~bits;
  1204. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1205. break;
  1206. case AMDGPU_IRQ_STATE_ENABLE:
  1207. /* system context */
  1208. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1209. tmp |= bits;
  1210. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1211. /* VMs */
  1212. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1213. tmp |= bits;
  1214. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1215. break;
  1216. default:
  1217. break;
  1218. }
  1219. return 0;
  1220. }
  1221. static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
  1222. struct amdgpu_irq_src *source,
  1223. struct amdgpu_iv_entry *entry)
  1224. {
  1225. u32 addr, status, mc_client;
  1226. if (amdgpu_sriov_vf(adev)) {
  1227. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  1228. entry->src_id, entry->src_data[0]);
  1229. dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
  1230. return 0;
  1231. }
  1232. addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
  1233. status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
  1234. mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  1235. /* reset addr and status */
  1236. WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
  1237. if (!addr && !status)
  1238. return 0;
  1239. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
  1240. gmc_v8_0_set_fault_enable_default(adev, false);
  1241. if (printk_ratelimit()) {
  1242. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  1243. entry->src_id, entry->src_data[0]);
  1244. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  1245. addr);
  1246. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  1247. status);
  1248. gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
  1249. }
  1250. return 0;
  1251. }
  1252. static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
  1253. bool enable)
  1254. {
  1255. uint32_t data;
  1256. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
  1257. data = RREG32(mmMC_HUB_MISC_HUB_CG);
  1258. data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
  1259. WREG32(mmMC_HUB_MISC_HUB_CG, data);
  1260. data = RREG32(mmMC_HUB_MISC_SIP_CG);
  1261. data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
  1262. WREG32(mmMC_HUB_MISC_SIP_CG, data);
  1263. data = RREG32(mmMC_HUB_MISC_VM_CG);
  1264. data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
  1265. WREG32(mmMC_HUB_MISC_VM_CG, data);
  1266. data = RREG32(mmMC_XPB_CLK_GAT);
  1267. data |= MC_XPB_CLK_GAT__ENABLE_MASK;
  1268. WREG32(mmMC_XPB_CLK_GAT, data);
  1269. data = RREG32(mmATC_MISC_CG);
  1270. data |= ATC_MISC_CG__ENABLE_MASK;
  1271. WREG32(mmATC_MISC_CG, data);
  1272. data = RREG32(mmMC_CITF_MISC_WR_CG);
  1273. data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
  1274. WREG32(mmMC_CITF_MISC_WR_CG, data);
  1275. data = RREG32(mmMC_CITF_MISC_RD_CG);
  1276. data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
  1277. WREG32(mmMC_CITF_MISC_RD_CG, data);
  1278. data = RREG32(mmMC_CITF_MISC_VM_CG);
  1279. data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
  1280. WREG32(mmMC_CITF_MISC_VM_CG, data);
  1281. data = RREG32(mmVM_L2_CG);
  1282. data |= VM_L2_CG__ENABLE_MASK;
  1283. WREG32(mmVM_L2_CG, data);
  1284. } else {
  1285. data = RREG32(mmMC_HUB_MISC_HUB_CG);
  1286. data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
  1287. WREG32(mmMC_HUB_MISC_HUB_CG, data);
  1288. data = RREG32(mmMC_HUB_MISC_SIP_CG);
  1289. data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
  1290. WREG32(mmMC_HUB_MISC_SIP_CG, data);
  1291. data = RREG32(mmMC_HUB_MISC_VM_CG);
  1292. data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
  1293. WREG32(mmMC_HUB_MISC_VM_CG, data);
  1294. data = RREG32(mmMC_XPB_CLK_GAT);
  1295. data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
  1296. WREG32(mmMC_XPB_CLK_GAT, data);
  1297. data = RREG32(mmATC_MISC_CG);
  1298. data &= ~ATC_MISC_CG__ENABLE_MASK;
  1299. WREG32(mmATC_MISC_CG, data);
  1300. data = RREG32(mmMC_CITF_MISC_WR_CG);
  1301. data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
  1302. WREG32(mmMC_CITF_MISC_WR_CG, data);
  1303. data = RREG32(mmMC_CITF_MISC_RD_CG);
  1304. data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
  1305. WREG32(mmMC_CITF_MISC_RD_CG, data);
  1306. data = RREG32(mmMC_CITF_MISC_VM_CG);
  1307. data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
  1308. WREG32(mmMC_CITF_MISC_VM_CG, data);
  1309. data = RREG32(mmVM_L2_CG);
  1310. data &= ~VM_L2_CG__ENABLE_MASK;
  1311. WREG32(mmVM_L2_CG, data);
  1312. }
  1313. }
  1314. static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
  1315. bool enable)
  1316. {
  1317. uint32_t data;
  1318. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
  1319. data = RREG32(mmMC_HUB_MISC_HUB_CG);
  1320. data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
  1321. WREG32(mmMC_HUB_MISC_HUB_CG, data);
  1322. data = RREG32(mmMC_HUB_MISC_SIP_CG);
  1323. data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
  1324. WREG32(mmMC_HUB_MISC_SIP_CG, data);
  1325. data = RREG32(mmMC_HUB_MISC_VM_CG);
  1326. data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
  1327. WREG32(mmMC_HUB_MISC_VM_CG, data);
  1328. data = RREG32(mmMC_XPB_CLK_GAT);
  1329. data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
  1330. WREG32(mmMC_XPB_CLK_GAT, data);
  1331. data = RREG32(mmATC_MISC_CG);
  1332. data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
  1333. WREG32(mmATC_MISC_CG, data);
  1334. data = RREG32(mmMC_CITF_MISC_WR_CG);
  1335. data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
  1336. WREG32(mmMC_CITF_MISC_WR_CG, data);
  1337. data = RREG32(mmMC_CITF_MISC_RD_CG);
  1338. data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
  1339. WREG32(mmMC_CITF_MISC_RD_CG, data);
  1340. data = RREG32(mmMC_CITF_MISC_VM_CG);
  1341. data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
  1342. WREG32(mmMC_CITF_MISC_VM_CG, data);
  1343. data = RREG32(mmVM_L2_CG);
  1344. data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
  1345. WREG32(mmVM_L2_CG, data);
  1346. } else {
  1347. data = RREG32(mmMC_HUB_MISC_HUB_CG);
  1348. data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
  1349. WREG32(mmMC_HUB_MISC_HUB_CG, data);
  1350. data = RREG32(mmMC_HUB_MISC_SIP_CG);
  1351. data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
  1352. WREG32(mmMC_HUB_MISC_SIP_CG, data);
  1353. data = RREG32(mmMC_HUB_MISC_VM_CG);
  1354. data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
  1355. WREG32(mmMC_HUB_MISC_VM_CG, data);
  1356. data = RREG32(mmMC_XPB_CLK_GAT);
  1357. data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
  1358. WREG32(mmMC_XPB_CLK_GAT, data);
  1359. data = RREG32(mmATC_MISC_CG);
  1360. data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
  1361. WREG32(mmATC_MISC_CG, data);
  1362. data = RREG32(mmMC_CITF_MISC_WR_CG);
  1363. data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
  1364. WREG32(mmMC_CITF_MISC_WR_CG, data);
  1365. data = RREG32(mmMC_CITF_MISC_RD_CG);
  1366. data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
  1367. WREG32(mmMC_CITF_MISC_RD_CG, data);
  1368. data = RREG32(mmMC_CITF_MISC_VM_CG);
  1369. data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
  1370. WREG32(mmMC_CITF_MISC_VM_CG, data);
  1371. data = RREG32(mmVM_L2_CG);
  1372. data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
  1373. WREG32(mmVM_L2_CG, data);
  1374. }
  1375. }
  1376. static int gmc_v8_0_set_clockgating_state(void *handle,
  1377. enum amd_clockgating_state state)
  1378. {
  1379. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1380. if (amdgpu_sriov_vf(adev))
  1381. return 0;
  1382. switch (adev->asic_type) {
  1383. case CHIP_FIJI:
  1384. fiji_update_mc_medium_grain_clock_gating(adev,
  1385. state == AMD_CG_STATE_GATE);
  1386. fiji_update_mc_light_sleep(adev,
  1387. state == AMD_CG_STATE_GATE);
  1388. break;
  1389. default:
  1390. break;
  1391. }
  1392. return 0;
  1393. }
  1394. static int gmc_v8_0_set_powergating_state(void *handle,
  1395. enum amd_powergating_state state)
  1396. {
  1397. return 0;
  1398. }
  1399. static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
  1400. {
  1401. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1402. int data;
  1403. if (amdgpu_sriov_vf(adev))
  1404. *flags = 0;
  1405. /* AMD_CG_SUPPORT_MC_MGCG */
  1406. data = RREG32(mmMC_HUB_MISC_HUB_CG);
  1407. if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
  1408. *flags |= AMD_CG_SUPPORT_MC_MGCG;
  1409. /* AMD_CG_SUPPORT_MC_LS */
  1410. if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
  1411. *flags |= AMD_CG_SUPPORT_MC_LS;
  1412. }
  1413. static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
  1414. .name = "gmc_v8_0",
  1415. .early_init = gmc_v8_0_early_init,
  1416. .late_init = gmc_v8_0_late_init,
  1417. .sw_init = gmc_v8_0_sw_init,
  1418. .sw_fini = gmc_v8_0_sw_fini,
  1419. .hw_init = gmc_v8_0_hw_init,
  1420. .hw_fini = gmc_v8_0_hw_fini,
  1421. .suspend = gmc_v8_0_suspend,
  1422. .resume = gmc_v8_0_resume,
  1423. .is_idle = gmc_v8_0_is_idle,
  1424. .wait_for_idle = gmc_v8_0_wait_for_idle,
  1425. .check_soft_reset = gmc_v8_0_check_soft_reset,
  1426. .pre_soft_reset = gmc_v8_0_pre_soft_reset,
  1427. .soft_reset = gmc_v8_0_soft_reset,
  1428. .post_soft_reset = gmc_v8_0_post_soft_reset,
  1429. .set_clockgating_state = gmc_v8_0_set_clockgating_state,
  1430. .set_powergating_state = gmc_v8_0_set_powergating_state,
  1431. .get_clockgating_state = gmc_v8_0_get_clockgating_state,
  1432. };
  1433. static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
  1434. .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
  1435. .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
  1436. .set_prt = gmc_v8_0_set_prt,
  1437. .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags
  1438. };
  1439. static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
  1440. .set = gmc_v8_0_vm_fault_interrupt_state,
  1441. .process = gmc_v8_0_process_interrupt,
  1442. };
  1443. static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
  1444. {
  1445. if (adev->gart.gart_funcs == NULL)
  1446. adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
  1447. }
  1448. static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
  1449. {
  1450. adev->mc.vm_fault.num_types = 1;
  1451. adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
  1452. }
  1453. const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
  1454. {
  1455. .type = AMD_IP_BLOCK_TYPE_GMC,
  1456. .major = 8,
  1457. .minor = 0,
  1458. .rev = 0,
  1459. .funcs = &gmc_v8_0_ip_funcs,
  1460. };
  1461. const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
  1462. {
  1463. .type = AMD_IP_BLOCK_TYPE_GMC,
  1464. .major = 8,
  1465. .minor = 1,
  1466. .rev = 0,
  1467. .funcs = &gmc_v8_0_ip_funcs,
  1468. };
  1469. const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
  1470. {
  1471. .type = AMD_IP_BLOCK_TYPE_GMC,
  1472. .major = 8,
  1473. .minor = 5,
  1474. .rev = 0,
  1475. .funcs = &gmc_v8_0_ip_funcs,
  1476. };