soc15.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <drm/drmP.h>
  27. #include "amdgpu.h"
  28. #include "amdgpu_atombios.h"
  29. #include "amdgpu_ih.h"
  30. #include "amdgpu_uvd.h"
  31. #include "amdgpu_vce.h"
  32. #include "amdgpu_ucode.h"
  33. #include "amdgpu_psp.h"
  34. #include "atom.h"
  35. #include "amd_pcie.h"
  36. #include "uvd/uvd_7_0_offset.h"
  37. #include "gc/gc_9_0_offset.h"
  38. #include "gc/gc_9_0_sh_mask.h"
  39. #include "sdma0/sdma0_4_0_offset.h"
  40. #include "sdma1/sdma1_4_0_offset.h"
  41. #include "hdp/hdp_4_0_offset.h"
  42. #include "hdp/hdp_4_0_sh_mask.h"
  43. #include "mp/mp_9_0_offset.h"
  44. #include "mp/mp_9_0_sh_mask.h"
  45. #include "smuio/smuio_9_0_offset.h"
  46. #include "smuio/smuio_9_0_sh_mask.h"
  47. #include "soc15.h"
  48. #include "soc15_common.h"
  49. #include "gfx_v9_0.h"
  50. #include "gmc_v9_0.h"
  51. #include "gfxhub_v1_0.h"
  52. #include "mmhub_v1_0.h"
  53. #include "vega10_ih.h"
  54. #include "sdma_v4_0.h"
  55. #include "uvd_v7_0.h"
  56. #include "vce_v4_0.h"
  57. #include "vcn_v1_0.h"
  58. #include "amdgpu_powerplay.h"
  59. #include "dce_virtual.h"
  60. #include "mxgpu_ai.h"
  61. #define mmFabricConfigAccessControl 0x0410
  62. #define mmFabricConfigAccessControl_BASE_IDX 0
  63. #define mmFabricConfigAccessControl_DEFAULT 0x00000000
  64. //FabricConfigAccessControl
  65. #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
  66. #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
  67. #define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
  68. #define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
  69. #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
  70. #define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
  71. #define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
  72. #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
  73. //DF_PIE_AON0_DfGlobalClkGater
  74. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
  75. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
  76. enum {
  77. DF_MGCG_DISABLE = 0,
  78. DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
  79. DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
  80. DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
  81. DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
  82. DF_MGCG_ENABLE_63_CYCLE_DELAY =15
  83. };
  84. #define mmMP0_MISC_CGTT_CTRL0 0x01b9
  85. #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
  86. #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
  87. #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
  88. /*
  89. * Indirect registers accessor
  90. */
  91. static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  92. {
  93. unsigned long flags, address, data;
  94. u32 r;
  95. address = adev->nbio_funcs->get_pcie_index_offset(adev);
  96. data = adev->nbio_funcs->get_pcie_data_offset(adev);
  97. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  98. WREG32(address, reg);
  99. (void)RREG32(address);
  100. r = RREG32(data);
  101. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  102. return r;
  103. }
  104. static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  105. {
  106. unsigned long flags, address, data;
  107. address = adev->nbio_funcs->get_pcie_index_offset(adev);
  108. data = adev->nbio_funcs->get_pcie_data_offset(adev);
  109. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  110. WREG32(address, reg);
  111. (void)RREG32(address);
  112. WREG32(data, v);
  113. (void)RREG32(data);
  114. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  115. }
  116. static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
  117. {
  118. unsigned long flags, address, data;
  119. u32 r;
  120. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  121. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  122. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  123. WREG32(address, ((reg) & 0x1ff));
  124. r = RREG32(data);
  125. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  126. return r;
  127. }
  128. static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  129. {
  130. unsigned long flags, address, data;
  131. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  132. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  133. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  134. WREG32(address, ((reg) & 0x1ff));
  135. WREG32(data, (v));
  136. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  137. }
  138. static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
  139. {
  140. unsigned long flags, address, data;
  141. u32 r;
  142. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  143. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  144. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  145. WREG32(address, (reg));
  146. r = RREG32(data);
  147. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  148. return r;
  149. }
  150. static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  151. {
  152. unsigned long flags, address, data;
  153. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  154. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  155. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  156. WREG32(address, (reg));
  157. WREG32(data, (v));
  158. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  159. }
  160. static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
  161. {
  162. unsigned long flags;
  163. u32 r;
  164. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  165. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  166. r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
  167. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  168. return r;
  169. }
  170. static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  171. {
  172. unsigned long flags;
  173. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  174. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  175. WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
  176. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  177. }
  178. static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
  179. {
  180. unsigned long flags;
  181. u32 r;
  182. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  183. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  184. r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
  185. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  186. return r;
  187. }
  188. static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  189. {
  190. unsigned long flags;
  191. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  192. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  193. WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
  194. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  195. }
  196. static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
  197. {
  198. if (adev->flags & AMD_IS_APU)
  199. return nbio_v7_0_get_memsize(adev);
  200. else
  201. return nbio_v6_1_get_memsize(adev);
  202. }
  203. static const u32 vega10_golden_init[] =
  204. {
  205. };
  206. static const u32 raven_golden_init[] =
  207. {
  208. };
  209. static void soc15_init_golden_registers(struct amdgpu_device *adev)
  210. {
  211. /* Some of the registers might be dependent on GRBM_GFX_INDEX */
  212. mutex_lock(&adev->grbm_idx_mutex);
  213. switch (adev->asic_type) {
  214. case CHIP_VEGA10:
  215. amdgpu_program_register_sequence(adev,
  216. vega10_golden_init,
  217. ARRAY_SIZE(vega10_golden_init));
  218. break;
  219. case CHIP_RAVEN:
  220. amdgpu_program_register_sequence(adev,
  221. raven_golden_init,
  222. ARRAY_SIZE(raven_golden_init));
  223. break;
  224. default:
  225. break;
  226. }
  227. mutex_unlock(&adev->grbm_idx_mutex);
  228. }
  229. static u32 soc15_get_xclk(struct amdgpu_device *adev)
  230. {
  231. return adev->clock.spll.reference_freq;
  232. }
  233. void soc15_grbm_select(struct amdgpu_device *adev,
  234. u32 me, u32 pipe, u32 queue, u32 vmid)
  235. {
  236. u32 grbm_gfx_cntl = 0;
  237. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
  238. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
  239. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
  240. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
  241. WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
  242. }
  243. static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
  244. {
  245. /* todo */
  246. }
  247. static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
  248. {
  249. /* todo */
  250. return false;
  251. }
  252. static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
  253. u8 *bios, u32 length_bytes)
  254. {
  255. u32 *dw_ptr;
  256. u32 i, length_dw;
  257. if (bios == NULL)
  258. return false;
  259. if (length_bytes == 0)
  260. return false;
  261. /* APU vbios image is part of sbios image */
  262. if (adev->flags & AMD_IS_APU)
  263. return false;
  264. dw_ptr = (u32 *)bios;
  265. length_dw = ALIGN(length_bytes, 4) / 4;
  266. /* set rom index to 0 */
  267. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
  268. /* read out the rom data */
  269. for (i = 0; i < length_dw; i++)
  270. dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
  271. return true;
  272. }
  273. struct soc15_allowed_register_entry {
  274. uint32_t hwip;
  275. uint32_t inst;
  276. uint32_t seg;
  277. uint32_t reg_offset;
  278. bool grbm_indexed;
  279. };
  280. static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
  281. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
  282. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
  283. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
  284. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
  285. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
  286. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
  287. { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
  288. { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
  289. { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
  290. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
  291. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
  292. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
  293. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
  294. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
  295. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
  296. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
  297. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
  298. { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
  299. };
  300. static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
  301. u32 sh_num, u32 reg_offset)
  302. {
  303. uint32_t val;
  304. mutex_lock(&adev->grbm_idx_mutex);
  305. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  306. amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
  307. val = RREG32(reg_offset);
  308. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  309. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  310. mutex_unlock(&adev->grbm_idx_mutex);
  311. return val;
  312. }
  313. static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
  314. bool indexed, u32 se_num,
  315. u32 sh_num, u32 reg_offset)
  316. {
  317. if (indexed) {
  318. return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
  319. } else {
  320. if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
  321. return adev->gfx.config.gb_addr_config;
  322. return RREG32(reg_offset);
  323. }
  324. }
  325. static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
  326. u32 sh_num, u32 reg_offset, u32 *value)
  327. {
  328. uint32_t i;
  329. struct soc15_allowed_register_entry *en;
  330. *value = 0;
  331. for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
  332. en = &soc15_allowed_read_registers[i];
  333. if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
  334. + en->reg_offset))
  335. continue;
  336. *value = soc15_get_register_value(adev,
  337. soc15_allowed_read_registers[i].grbm_indexed,
  338. se_num, sh_num, reg_offset);
  339. return 0;
  340. }
  341. return -EINVAL;
  342. }
  343. /**
  344. * soc15_program_register_sequence - program an array of registers.
  345. *
  346. * @adev: amdgpu_device pointer
  347. * @regs: pointer to the register array
  348. * @array_size: size of the register array
  349. *
  350. * Programs an array or registers with and and or masks.
  351. * This is a helper for setting golden registers.
  352. */
  353. void soc15_program_register_sequence(struct amdgpu_device *adev,
  354. const struct soc15_reg_golden *regs,
  355. const u32 array_size)
  356. {
  357. const struct soc15_reg_golden *entry;
  358. u32 tmp, reg;
  359. int i;
  360. for (i = 0; i < array_size; ++i) {
  361. entry = &regs[i];
  362. reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
  363. if (entry->and_mask == 0xffffffff) {
  364. tmp = entry->or_mask;
  365. } else {
  366. tmp = RREG32(reg);
  367. tmp &= ~(entry->and_mask);
  368. tmp |= entry->or_mask;
  369. }
  370. WREG32(reg, tmp);
  371. }
  372. }
  373. static int soc15_asic_reset(struct amdgpu_device *adev)
  374. {
  375. u32 i;
  376. amdgpu_atombios_scratch_regs_engine_hung(adev, true);
  377. dev_info(adev->dev, "GPU reset\n");
  378. /* disable BM */
  379. pci_clear_master(adev->pdev);
  380. pci_save_state(adev->pdev);
  381. for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) {
  382. if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
  383. adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
  384. break;
  385. }
  386. }
  387. pci_restore_state(adev->pdev);
  388. /* wait for asic to come out of reset */
  389. for (i = 0; i < adev->usec_timeout; i++) {
  390. u32 memsize = (adev->flags & AMD_IS_APU) ?
  391. nbio_v7_0_get_memsize(adev) :
  392. nbio_v6_1_get_memsize(adev);
  393. if (memsize != 0xffffffff)
  394. break;
  395. udelay(1);
  396. }
  397. amdgpu_atombios_scratch_regs_engine_hung(adev, false);
  398. return 0;
  399. }
  400. /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
  401. u32 cntl_reg, u32 status_reg)
  402. {
  403. return 0;
  404. }*/
  405. static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  406. {
  407. /*int r;
  408. r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
  409. if (r)
  410. return r;
  411. r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
  412. */
  413. return 0;
  414. }
  415. static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  416. {
  417. /* todo */
  418. return 0;
  419. }
  420. static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
  421. {
  422. if (pci_is_root_bus(adev->pdev->bus))
  423. return;
  424. if (amdgpu_pcie_gen2 == 0)
  425. return;
  426. if (adev->flags & AMD_IS_APU)
  427. return;
  428. if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  429. CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
  430. return;
  431. /* todo */
  432. }
  433. static void soc15_program_aspm(struct amdgpu_device *adev)
  434. {
  435. if (amdgpu_aspm == 0)
  436. return;
  437. /* todo */
  438. }
  439. static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
  440. bool enable)
  441. {
  442. if (adev->flags & AMD_IS_APU) {
  443. nbio_v7_0_enable_doorbell_aperture(adev, enable);
  444. } else {
  445. nbio_v6_1_enable_doorbell_aperture(adev, enable);
  446. nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
  447. }
  448. }
  449. static const struct amdgpu_ip_block_version vega10_common_ip_block =
  450. {
  451. .type = AMD_IP_BLOCK_TYPE_COMMON,
  452. .major = 2,
  453. .minor = 0,
  454. .rev = 0,
  455. .funcs = &soc15_common_ip_funcs,
  456. };
  457. int soc15_set_ip_blocks(struct amdgpu_device *adev)
  458. {
  459. /* Set IP register base before any HW register access */
  460. switch (adev->asic_type) {
  461. case CHIP_VEGA10:
  462. case CHIP_RAVEN:
  463. vega10_reg_base_init(adev);
  464. break;
  465. default:
  466. return -EINVAL;
  467. }
  468. nbio_v6_1_detect_hw_virt(adev);
  469. if (amdgpu_sriov_vf(adev))
  470. adev->virt.ops = &xgpu_ai_virt_ops;
  471. switch (adev->asic_type) {
  472. case CHIP_VEGA10:
  473. amdgpu_ip_block_add(adev, &vega10_common_ip_block);
  474. amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
  475. amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
  476. if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
  477. amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
  478. if (!amdgpu_sriov_vf(adev))
  479. amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
  480. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  481. amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
  482. #if defined(CONFIG_DRM_AMD_DC)
  483. else if (amdgpu_device_has_dc_support(adev))
  484. amdgpu_ip_block_add(adev, &dm_ip_block);
  485. #else
  486. # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
  487. #endif
  488. amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
  489. amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
  490. amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
  491. amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
  492. break;
  493. case CHIP_RAVEN:
  494. amdgpu_ip_block_add(adev, &vega10_common_ip_block);
  495. amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
  496. amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
  497. amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
  498. amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
  499. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  500. amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
  501. #if defined(CONFIG_DRM_AMD_DC)
  502. else if (amdgpu_device_has_dc_support(adev))
  503. amdgpu_ip_block_add(adev, &dm_ip_block);
  504. #else
  505. # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
  506. #endif
  507. amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
  508. amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
  509. amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
  510. break;
  511. default:
  512. return -EINVAL;
  513. }
  514. return 0;
  515. }
  516. static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
  517. {
  518. if (adev->flags & AMD_IS_APU)
  519. return nbio_v7_0_get_rev_id(adev);
  520. else
  521. return nbio_v6_1_get_rev_id(adev);
  522. }
  523. static const struct amdgpu_asic_funcs soc15_asic_funcs =
  524. {
  525. .read_disabled_bios = &soc15_read_disabled_bios,
  526. .read_bios_from_rom = &soc15_read_bios_from_rom,
  527. .read_register = &soc15_read_register,
  528. .reset = &soc15_asic_reset,
  529. .set_vga_state = &soc15_vga_set_state,
  530. .get_xclk = &soc15_get_xclk,
  531. .set_uvd_clocks = &soc15_set_uvd_clocks,
  532. .set_vce_clocks = &soc15_set_vce_clocks,
  533. .get_config_memsize = &soc15_get_config_memsize,
  534. };
  535. static int soc15_common_early_init(void *handle)
  536. {
  537. bool psp_enabled = false;
  538. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  539. adev->smc_rreg = NULL;
  540. adev->smc_wreg = NULL;
  541. adev->pcie_rreg = &soc15_pcie_rreg;
  542. adev->pcie_wreg = &soc15_pcie_wreg;
  543. adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
  544. adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
  545. adev->didt_rreg = &soc15_didt_rreg;
  546. adev->didt_wreg = &soc15_didt_wreg;
  547. adev->gc_cac_rreg = &soc15_gc_cac_rreg;
  548. adev->gc_cac_wreg = &soc15_gc_cac_wreg;
  549. adev->se_cac_rreg = &soc15_se_cac_rreg;
  550. adev->se_cac_wreg = &soc15_se_cac_wreg;
  551. adev->asic_funcs = &soc15_asic_funcs;
  552. if (adev->flags & AMD_IS_APU)
  553. adev->nbio_funcs = &nbio_v7_0_funcs;
  554. else
  555. adev->nbio_funcs = &nbio_v6_1_funcs;
  556. if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
  557. (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
  558. psp_enabled = true;
  559. adev->rev_id = soc15_get_rev_id(adev);
  560. adev->external_rev_id = 0xFF;
  561. switch (adev->asic_type) {
  562. case CHIP_VEGA10:
  563. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  564. AMD_CG_SUPPORT_GFX_MGLS |
  565. AMD_CG_SUPPORT_GFX_RLC_LS |
  566. AMD_CG_SUPPORT_GFX_CP_LS |
  567. AMD_CG_SUPPORT_GFX_3D_CGCG |
  568. AMD_CG_SUPPORT_GFX_3D_CGLS |
  569. AMD_CG_SUPPORT_GFX_CGCG |
  570. AMD_CG_SUPPORT_GFX_CGLS |
  571. AMD_CG_SUPPORT_BIF_MGCG |
  572. AMD_CG_SUPPORT_BIF_LS |
  573. AMD_CG_SUPPORT_HDP_LS |
  574. AMD_CG_SUPPORT_DRM_MGCG |
  575. AMD_CG_SUPPORT_DRM_LS |
  576. AMD_CG_SUPPORT_ROM_MGCG |
  577. AMD_CG_SUPPORT_DF_MGCG |
  578. AMD_CG_SUPPORT_SDMA_MGCG |
  579. AMD_CG_SUPPORT_SDMA_LS |
  580. AMD_CG_SUPPORT_MC_MGCG |
  581. AMD_CG_SUPPORT_MC_LS;
  582. adev->pg_flags = 0;
  583. adev->external_rev_id = 0x1;
  584. break;
  585. case CHIP_RAVEN:
  586. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  587. AMD_CG_SUPPORT_GFX_MGLS |
  588. AMD_CG_SUPPORT_GFX_RLC_LS |
  589. AMD_CG_SUPPORT_GFX_CP_LS |
  590. AMD_CG_SUPPORT_GFX_3D_CGCG |
  591. AMD_CG_SUPPORT_GFX_3D_CGLS |
  592. AMD_CG_SUPPORT_GFX_CGCG |
  593. AMD_CG_SUPPORT_GFX_CGLS |
  594. AMD_CG_SUPPORT_BIF_MGCG |
  595. AMD_CG_SUPPORT_BIF_LS |
  596. AMD_CG_SUPPORT_HDP_MGCG |
  597. AMD_CG_SUPPORT_HDP_LS |
  598. AMD_CG_SUPPORT_DRM_MGCG |
  599. AMD_CG_SUPPORT_DRM_LS |
  600. AMD_CG_SUPPORT_ROM_MGCG |
  601. AMD_CG_SUPPORT_MC_MGCG |
  602. AMD_CG_SUPPORT_MC_LS |
  603. AMD_CG_SUPPORT_SDMA_MGCG |
  604. AMD_CG_SUPPORT_SDMA_LS;
  605. adev->pg_flags = AMD_PG_SUPPORT_SDMA |
  606. AMD_PG_SUPPORT_MMHUB;
  607. adev->external_rev_id = 0x1;
  608. break;
  609. default:
  610. /* FIXME: not supported yet */
  611. return -EINVAL;
  612. }
  613. if (amdgpu_sriov_vf(adev)) {
  614. amdgpu_virt_init_setting(adev);
  615. xgpu_ai_mailbox_set_irq_funcs(adev);
  616. }
  617. adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
  618. amdgpu_get_pcie_info(adev);
  619. return 0;
  620. }
  621. static int soc15_common_late_init(void *handle)
  622. {
  623. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  624. if (amdgpu_sriov_vf(adev))
  625. xgpu_ai_mailbox_get_irq(adev);
  626. return 0;
  627. }
  628. static int soc15_common_sw_init(void *handle)
  629. {
  630. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  631. if (amdgpu_sriov_vf(adev))
  632. xgpu_ai_mailbox_add_irq_id(adev);
  633. return 0;
  634. }
  635. static int soc15_common_sw_fini(void *handle)
  636. {
  637. return 0;
  638. }
  639. static int soc15_common_hw_init(void *handle)
  640. {
  641. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  642. /* move the golden regs per IP block */
  643. soc15_init_golden_registers(adev);
  644. /* enable pcie gen2/3 link */
  645. soc15_pcie_gen3_enable(adev);
  646. /* enable aspm */
  647. soc15_program_aspm(adev);
  648. /* setup nbio registers */
  649. if (!(adev->flags & AMD_IS_APU))
  650. nbio_v6_1_init_registers(adev);
  651. /* enable the doorbell aperture */
  652. soc15_enable_doorbell_aperture(adev, true);
  653. return 0;
  654. }
  655. static int soc15_common_hw_fini(void *handle)
  656. {
  657. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  658. /* disable the doorbell aperture */
  659. soc15_enable_doorbell_aperture(adev, false);
  660. if (amdgpu_sriov_vf(adev))
  661. xgpu_ai_mailbox_put_irq(adev);
  662. return 0;
  663. }
  664. static int soc15_common_suspend(void *handle)
  665. {
  666. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  667. return soc15_common_hw_fini(adev);
  668. }
  669. static int soc15_common_resume(void *handle)
  670. {
  671. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  672. return soc15_common_hw_init(adev);
  673. }
  674. static bool soc15_common_is_idle(void *handle)
  675. {
  676. return true;
  677. }
  678. static int soc15_common_wait_for_idle(void *handle)
  679. {
  680. return 0;
  681. }
  682. static int soc15_common_soft_reset(void *handle)
  683. {
  684. return 0;
  685. }
  686. static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
  687. {
  688. uint32_t def, data;
  689. def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  690. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  691. data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  692. else
  693. data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  694. if (def != data)
  695. WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
  696. }
  697. static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
  698. {
  699. uint32_t def, data;
  700. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  701. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
  702. data &= ~(0x01000000 |
  703. 0x02000000 |
  704. 0x04000000 |
  705. 0x08000000 |
  706. 0x10000000 |
  707. 0x20000000 |
  708. 0x40000000 |
  709. 0x80000000);
  710. else
  711. data |= (0x01000000 |
  712. 0x02000000 |
  713. 0x04000000 |
  714. 0x08000000 |
  715. 0x10000000 |
  716. 0x20000000 |
  717. 0x40000000 |
  718. 0x80000000);
  719. if (def != data)
  720. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
  721. }
  722. static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
  723. {
  724. uint32_t def, data;
  725. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  726. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
  727. data |= 1;
  728. else
  729. data &= ~1;
  730. if (def != data)
  731. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
  732. }
  733. static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
  734. bool enable)
  735. {
  736. uint32_t def, data;
  737. def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  738. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
  739. data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  740. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
  741. else
  742. data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  743. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
  744. if (def != data)
  745. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
  746. }
  747. static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
  748. bool enable)
  749. {
  750. uint32_t data;
  751. /* Put DF on broadcast mode */
  752. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
  753. data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
  754. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
  755. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
  756. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  757. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  758. data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
  759. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  760. } else {
  761. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  762. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  763. data |= DF_MGCG_DISABLE;
  764. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  765. }
  766. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
  767. mmFabricConfigAccessControl_DEFAULT);
  768. }
  769. static int soc15_common_set_clockgating_state(void *handle,
  770. enum amd_clockgating_state state)
  771. {
  772. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  773. if (amdgpu_sriov_vf(adev))
  774. return 0;
  775. switch (adev->asic_type) {
  776. case CHIP_VEGA10:
  777. nbio_v6_1_update_medium_grain_clock_gating(adev,
  778. state == AMD_CG_STATE_GATE ? true : false);
  779. nbio_v6_1_update_medium_grain_light_sleep(adev,
  780. state == AMD_CG_STATE_GATE ? true : false);
  781. soc15_update_hdp_light_sleep(adev,
  782. state == AMD_CG_STATE_GATE ? true : false);
  783. soc15_update_drm_clock_gating(adev,
  784. state == AMD_CG_STATE_GATE ? true : false);
  785. soc15_update_drm_light_sleep(adev,
  786. state == AMD_CG_STATE_GATE ? true : false);
  787. soc15_update_rom_medium_grain_clock_gating(adev,
  788. state == AMD_CG_STATE_GATE ? true : false);
  789. soc15_update_df_medium_grain_clock_gating(adev,
  790. state == AMD_CG_STATE_GATE ? true : false);
  791. break;
  792. case CHIP_RAVEN:
  793. nbio_v7_0_update_medium_grain_clock_gating(adev,
  794. state == AMD_CG_STATE_GATE ? true : false);
  795. nbio_v6_1_update_medium_grain_light_sleep(adev,
  796. state == AMD_CG_STATE_GATE ? true : false);
  797. soc15_update_hdp_light_sleep(adev,
  798. state == AMD_CG_STATE_GATE ? true : false);
  799. soc15_update_drm_clock_gating(adev,
  800. state == AMD_CG_STATE_GATE ? true : false);
  801. soc15_update_drm_light_sleep(adev,
  802. state == AMD_CG_STATE_GATE ? true : false);
  803. soc15_update_rom_medium_grain_clock_gating(adev,
  804. state == AMD_CG_STATE_GATE ? true : false);
  805. break;
  806. default:
  807. break;
  808. }
  809. return 0;
  810. }
  811. static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
  812. {
  813. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  814. int data;
  815. if (amdgpu_sriov_vf(adev))
  816. *flags = 0;
  817. nbio_v6_1_get_clockgating_state(adev, flags);
  818. /* AMD_CG_SUPPORT_HDP_LS */
  819. data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  820. if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
  821. *flags |= AMD_CG_SUPPORT_HDP_LS;
  822. /* AMD_CG_SUPPORT_DRM_MGCG */
  823. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  824. if (!(data & 0x01000000))
  825. *flags |= AMD_CG_SUPPORT_DRM_MGCG;
  826. /* AMD_CG_SUPPORT_DRM_LS */
  827. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  828. if (data & 0x1)
  829. *flags |= AMD_CG_SUPPORT_DRM_LS;
  830. /* AMD_CG_SUPPORT_ROM_MGCG */
  831. data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  832. if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
  833. *flags |= AMD_CG_SUPPORT_ROM_MGCG;
  834. /* AMD_CG_SUPPORT_DF_MGCG */
  835. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  836. if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
  837. *flags |= AMD_CG_SUPPORT_DF_MGCG;
  838. }
  839. static int soc15_common_set_powergating_state(void *handle,
  840. enum amd_powergating_state state)
  841. {
  842. /* todo */
  843. return 0;
  844. }
  845. const struct amd_ip_funcs soc15_common_ip_funcs = {
  846. .name = "soc15_common",
  847. .early_init = soc15_common_early_init,
  848. .late_init = soc15_common_late_init,
  849. .sw_init = soc15_common_sw_init,
  850. .sw_fini = soc15_common_sw_fini,
  851. .hw_init = soc15_common_hw_init,
  852. .hw_fini = soc15_common_hw_fini,
  853. .suspend = soc15_common_suspend,
  854. .resume = soc15_common_resume,
  855. .is_idle = soc15_common_is_idle,
  856. .wait_for_idle = soc15_common_wait_for_idle,
  857. .soft_reset = soc15_common_soft_reset,
  858. .set_clockgating_state = soc15_common_set_clockgating_state,
  859. .set_powergating_state = soc15_common_set_powergating_state,
  860. .get_clockgating_state= soc15_common_get_clockgating_state,
  861. };