soc15.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <drm/drmP.h>
  27. #include "amdgpu.h"
  28. #include "amdgpu_atombios.h"
  29. #include "amdgpu_ih.h"
  30. #include "amdgpu_uvd.h"
  31. #include "amdgpu_vce.h"
  32. #include "amdgpu_ucode.h"
  33. #include "amdgpu_psp.h"
  34. #include "atom.h"
  35. #include "amd_pcie.h"
  36. #include "vega10/soc15ip.h"
  37. #include "vega10/UVD/uvd_7_0_offset.h"
  38. #include "vega10/GC/gc_9_0_offset.h"
  39. #include "vega10/GC/gc_9_0_sh_mask.h"
  40. #include "vega10/SDMA0/sdma0_4_0_offset.h"
  41. #include "vega10/SDMA1/sdma1_4_0_offset.h"
  42. #include "vega10/HDP/hdp_4_0_offset.h"
  43. #include "vega10/HDP/hdp_4_0_sh_mask.h"
  44. #include "vega10/MP/mp_9_0_offset.h"
  45. #include "vega10/MP/mp_9_0_sh_mask.h"
  46. #include "vega10/SMUIO/smuio_9_0_offset.h"
  47. #include "vega10/SMUIO/smuio_9_0_sh_mask.h"
  48. #include "soc15.h"
  49. #include "soc15_common.h"
  50. #include "gfx_v9_0.h"
  51. #include "gmc_v9_0.h"
  52. #include "gfxhub_v1_0.h"
  53. #include "mmhub_v1_0.h"
  54. #include "vega10_ih.h"
  55. #include "sdma_v4_0.h"
  56. #include "uvd_v7_0.h"
  57. #include "vce_v4_0.h"
  58. #include "vcn_v1_0.h"
  59. #include "amdgpu_powerplay.h"
  60. #include "dce_virtual.h"
  61. #include "mxgpu_ai.h"
  62. #define mmFabricConfigAccessControl 0x0410
  63. #define mmFabricConfigAccessControl_BASE_IDX 0
  64. #define mmFabricConfigAccessControl_DEFAULT 0x00000000
  65. //FabricConfigAccessControl
  66. #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
  67. #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
  68. #define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
  69. #define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
  70. #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
  71. #define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
  72. #define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
  73. #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
  74. //DF_PIE_AON0_DfGlobalClkGater
  75. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
  76. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
  77. enum {
  78. DF_MGCG_DISABLE = 0,
  79. DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
  80. DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
  81. DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
  82. DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
  83. DF_MGCG_ENABLE_63_CYCLE_DELAY =15
  84. };
  85. #define mmMP0_MISC_CGTT_CTRL0 0x01b9
  86. #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
  87. #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
  88. #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
  89. /*
  90. * Indirect registers accessor
  91. */
  92. static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  93. {
  94. unsigned long flags, address, data;
  95. u32 r;
  96. struct nbio_pcie_index_data *nbio_pcie_id;
  97. if (adev->flags & AMD_IS_APU)
  98. nbio_pcie_id = &nbio_v7_0_pcie_index_data;
  99. else
  100. nbio_pcie_id = &nbio_v6_1_pcie_index_data;
  101. address = nbio_pcie_id->index_offset;
  102. data = nbio_pcie_id->data_offset;
  103. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  104. WREG32(address, reg);
  105. (void)RREG32(address);
  106. r = RREG32(data);
  107. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  108. return r;
  109. }
  110. static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  111. {
  112. unsigned long flags, address, data;
  113. struct nbio_pcie_index_data *nbio_pcie_id;
  114. if (adev->flags & AMD_IS_APU)
  115. nbio_pcie_id = &nbio_v7_0_pcie_index_data;
  116. else
  117. nbio_pcie_id = &nbio_v6_1_pcie_index_data;
  118. address = nbio_pcie_id->index_offset;
  119. data = nbio_pcie_id->data_offset;
  120. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  121. WREG32(address, reg);
  122. (void)RREG32(address);
  123. WREG32(data, v);
  124. (void)RREG32(data);
  125. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  126. }
  127. static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
  128. {
  129. unsigned long flags, address, data;
  130. u32 r;
  131. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  132. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  133. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  134. WREG32(address, ((reg) & 0x1ff));
  135. r = RREG32(data);
  136. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  137. return r;
  138. }
  139. static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  140. {
  141. unsigned long flags, address, data;
  142. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  143. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  144. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  145. WREG32(address, ((reg) & 0x1ff));
  146. WREG32(data, (v));
  147. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  148. }
  149. static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
  150. {
  151. unsigned long flags, address, data;
  152. u32 r;
  153. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  154. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  155. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  156. WREG32(address, (reg));
  157. r = RREG32(data);
  158. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  159. return r;
  160. }
  161. static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  162. {
  163. unsigned long flags, address, data;
  164. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  165. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  166. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  167. WREG32(address, (reg));
  168. WREG32(data, (v));
  169. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  170. }
  171. static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
  172. {
  173. unsigned long flags;
  174. u32 r;
  175. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  176. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  177. r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
  178. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  179. return r;
  180. }
  181. static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  182. {
  183. unsigned long flags;
  184. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  185. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  186. WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
  187. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  188. }
  189. static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
  190. {
  191. unsigned long flags;
  192. u32 r;
  193. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  194. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  195. r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
  196. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  197. return r;
  198. }
  199. static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  200. {
  201. unsigned long flags;
  202. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  203. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  204. WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
  205. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  206. }
  207. static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
  208. {
  209. if (adev->flags & AMD_IS_APU)
  210. return nbio_v7_0_get_memsize(adev);
  211. else
  212. return nbio_v6_1_get_memsize(adev);
  213. }
  214. static const u32 vega10_golden_init[] =
  215. {
  216. };
  217. static const u32 raven_golden_init[] =
  218. {
  219. };
  220. static void soc15_init_golden_registers(struct amdgpu_device *adev)
  221. {
  222. /* Some of the registers might be dependent on GRBM_GFX_INDEX */
  223. mutex_lock(&adev->grbm_idx_mutex);
  224. switch (adev->asic_type) {
  225. case CHIP_VEGA10:
  226. amdgpu_program_register_sequence(adev,
  227. vega10_golden_init,
  228. (const u32)ARRAY_SIZE(vega10_golden_init));
  229. break;
  230. case CHIP_RAVEN:
  231. amdgpu_program_register_sequence(adev,
  232. raven_golden_init,
  233. (const u32)ARRAY_SIZE(raven_golden_init));
  234. break;
  235. default:
  236. break;
  237. }
  238. mutex_unlock(&adev->grbm_idx_mutex);
  239. }
  240. static u32 soc15_get_xclk(struct amdgpu_device *adev)
  241. {
  242. if (adev->asic_type == CHIP_VEGA10)
  243. return adev->clock.spll.reference_freq/4;
  244. else
  245. return adev->clock.spll.reference_freq;
  246. }
  247. void soc15_grbm_select(struct amdgpu_device *adev,
  248. u32 me, u32 pipe, u32 queue, u32 vmid)
  249. {
  250. u32 grbm_gfx_cntl = 0;
  251. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
  252. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
  253. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
  254. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
  255. WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
  256. }
  257. static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
  258. {
  259. /* todo */
  260. }
  261. static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
  262. {
  263. /* todo */
  264. return false;
  265. }
  266. static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
  267. u8 *bios, u32 length_bytes)
  268. {
  269. u32 *dw_ptr;
  270. u32 i, length_dw;
  271. if (bios == NULL)
  272. return false;
  273. if (length_bytes == 0)
  274. return false;
  275. /* APU vbios image is part of sbios image */
  276. if (adev->flags & AMD_IS_APU)
  277. return false;
  278. dw_ptr = (u32 *)bios;
  279. length_dw = ALIGN(length_bytes, 4) / 4;
  280. /* set rom index to 0 */
  281. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
  282. /* read out the rom data */
  283. for (i = 0; i < length_dw; i++)
  284. dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
  285. return true;
  286. }
  287. static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
  288. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)},
  289. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)},
  290. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)},
  291. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)},
  292. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)},
  293. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)},
  294. { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
  295. { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
  296. { SOC15_REG_OFFSET(GC, 0, mmCP_STAT)},
  297. { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)},
  298. { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)},
  299. { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)},
  300. { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)},
  301. { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)},
  302. { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)},
  303. { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)},
  304. { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)},
  305. { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)},
  306. };
  307. static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
  308. u32 sh_num, u32 reg_offset)
  309. {
  310. uint32_t val;
  311. mutex_lock(&adev->grbm_idx_mutex);
  312. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  313. amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
  314. val = RREG32(reg_offset);
  315. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  316. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  317. mutex_unlock(&adev->grbm_idx_mutex);
  318. return val;
  319. }
  320. static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
  321. bool indexed, u32 se_num,
  322. u32 sh_num, u32 reg_offset)
  323. {
  324. if (indexed) {
  325. return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
  326. } else {
  327. switch (reg_offset) {
  328. case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG):
  329. return adev->gfx.config.gb_addr_config;
  330. default:
  331. return RREG32(reg_offset);
  332. }
  333. }
  334. }
  335. static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
  336. u32 sh_num, u32 reg_offset, u32 *value)
  337. {
  338. uint32_t i;
  339. *value = 0;
  340. for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
  341. if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
  342. continue;
  343. *value = soc15_get_register_value(adev,
  344. soc15_allowed_read_registers[i].grbm_indexed,
  345. se_num, sh_num, reg_offset);
  346. return 0;
  347. }
  348. return -EINVAL;
  349. }
  350. static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
  351. {
  352. u32 i;
  353. dev_info(adev->dev, "GPU pci config reset\n");
  354. /* disable BM */
  355. pci_clear_master(adev->pdev);
  356. /* reset */
  357. amdgpu_pci_config_reset(adev);
  358. udelay(100);
  359. /* wait for asic to come out of reset */
  360. for (i = 0; i < adev->usec_timeout; i++) {
  361. u32 memsize = (adev->flags & AMD_IS_APU) ?
  362. nbio_v7_0_get_memsize(adev) :
  363. nbio_v6_1_get_memsize(adev);
  364. if (memsize != 0xffffffff)
  365. break;
  366. udelay(1);
  367. }
  368. }
  369. static int soc15_asic_reset(struct amdgpu_device *adev)
  370. {
  371. amdgpu_atombios_scratch_regs_engine_hung(adev, true);
  372. soc15_gpu_pci_config_reset(adev);
  373. amdgpu_atombios_scratch_regs_engine_hung(adev, false);
  374. return 0;
  375. }
  376. /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
  377. u32 cntl_reg, u32 status_reg)
  378. {
  379. return 0;
  380. }*/
  381. static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  382. {
  383. /*int r;
  384. r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
  385. if (r)
  386. return r;
  387. r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
  388. */
  389. return 0;
  390. }
  391. static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  392. {
  393. /* todo */
  394. return 0;
  395. }
  396. static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
  397. {
  398. if (pci_is_root_bus(adev->pdev->bus))
  399. return;
  400. if (amdgpu_pcie_gen2 == 0)
  401. return;
  402. if (adev->flags & AMD_IS_APU)
  403. return;
  404. if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  405. CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
  406. return;
  407. /* todo */
  408. }
  409. static void soc15_program_aspm(struct amdgpu_device *adev)
  410. {
  411. if (amdgpu_aspm == 0)
  412. return;
  413. /* todo */
  414. }
  415. static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
  416. bool enable)
  417. {
  418. if (adev->flags & AMD_IS_APU) {
  419. nbio_v7_0_enable_doorbell_aperture(adev, enable);
  420. } else {
  421. nbio_v6_1_enable_doorbell_aperture(adev, enable);
  422. nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
  423. }
  424. }
  425. static const struct amdgpu_ip_block_version vega10_common_ip_block =
  426. {
  427. .type = AMD_IP_BLOCK_TYPE_COMMON,
  428. .major = 2,
  429. .minor = 0,
  430. .rev = 0,
  431. .funcs = &soc15_common_ip_funcs,
  432. };
  433. int soc15_set_ip_blocks(struct amdgpu_device *adev)
  434. {
  435. nbio_v6_1_detect_hw_virt(adev);
  436. if (amdgpu_sriov_vf(adev))
  437. adev->virt.ops = &xgpu_ai_virt_ops;
  438. switch (adev->asic_type) {
  439. case CHIP_VEGA10:
  440. amdgpu_ip_block_add(adev, &vega10_common_ip_block);
  441. amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
  442. amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
  443. if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
  444. amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
  445. if (!amdgpu_sriov_vf(adev))
  446. amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
  447. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  448. amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
  449. amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
  450. amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
  451. amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
  452. amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
  453. break;
  454. case CHIP_RAVEN:
  455. amdgpu_ip_block_add(adev, &vega10_common_ip_block);
  456. amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
  457. amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
  458. amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
  459. amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
  460. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  461. amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
  462. amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
  463. amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
  464. amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
  465. break;
  466. default:
  467. return -EINVAL;
  468. }
  469. return 0;
  470. }
  471. static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
  472. {
  473. if (adev->flags & AMD_IS_APU)
  474. return nbio_v7_0_get_rev_id(adev);
  475. else
  476. return nbio_v6_1_get_rev_id(adev);
  477. }
  478. static const struct amdgpu_asic_funcs soc15_asic_funcs =
  479. {
  480. .read_disabled_bios = &soc15_read_disabled_bios,
  481. .read_bios_from_rom = &soc15_read_bios_from_rom,
  482. .read_register = &soc15_read_register,
  483. .reset = &soc15_asic_reset,
  484. .set_vga_state = &soc15_vga_set_state,
  485. .get_xclk = &soc15_get_xclk,
  486. .set_uvd_clocks = &soc15_set_uvd_clocks,
  487. .set_vce_clocks = &soc15_set_vce_clocks,
  488. .get_config_memsize = &soc15_get_config_memsize,
  489. };
  490. static int soc15_common_early_init(void *handle)
  491. {
  492. bool psp_enabled = false;
  493. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  494. adev->smc_rreg = NULL;
  495. adev->smc_wreg = NULL;
  496. adev->pcie_rreg = &soc15_pcie_rreg;
  497. adev->pcie_wreg = &soc15_pcie_wreg;
  498. adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
  499. adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
  500. adev->didt_rreg = &soc15_didt_rreg;
  501. adev->didt_wreg = &soc15_didt_wreg;
  502. adev->gc_cac_rreg = &soc15_gc_cac_rreg;
  503. adev->gc_cac_wreg = &soc15_gc_cac_wreg;
  504. adev->se_cac_rreg = &soc15_se_cac_rreg;
  505. adev->se_cac_wreg = &soc15_se_cac_wreg;
  506. adev->asic_funcs = &soc15_asic_funcs;
  507. if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
  508. (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
  509. psp_enabled = true;
  510. /*
  511. * nbio need be used for both sdma and gfx9, but only
  512. * initializes once
  513. */
  514. switch(adev->asic_type) {
  515. case CHIP_VEGA10:
  516. nbio_v6_1_init(adev);
  517. break;
  518. case CHIP_RAVEN:
  519. nbio_v7_0_init(adev);
  520. break;
  521. default:
  522. return -EINVAL;
  523. }
  524. adev->rev_id = soc15_get_rev_id(adev);
  525. adev->external_rev_id = 0xFF;
  526. switch (adev->asic_type) {
  527. case CHIP_VEGA10:
  528. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  529. AMD_CG_SUPPORT_GFX_MGLS |
  530. AMD_CG_SUPPORT_GFX_RLC_LS |
  531. AMD_CG_SUPPORT_GFX_CP_LS |
  532. AMD_CG_SUPPORT_GFX_3D_CGCG |
  533. AMD_CG_SUPPORT_GFX_3D_CGLS |
  534. AMD_CG_SUPPORT_GFX_CGCG |
  535. AMD_CG_SUPPORT_GFX_CGLS |
  536. AMD_CG_SUPPORT_BIF_MGCG |
  537. AMD_CG_SUPPORT_BIF_LS |
  538. AMD_CG_SUPPORT_HDP_LS |
  539. AMD_CG_SUPPORT_DRM_MGCG |
  540. AMD_CG_SUPPORT_DRM_LS |
  541. AMD_CG_SUPPORT_ROM_MGCG |
  542. AMD_CG_SUPPORT_DF_MGCG |
  543. AMD_CG_SUPPORT_SDMA_MGCG |
  544. AMD_CG_SUPPORT_SDMA_LS |
  545. AMD_CG_SUPPORT_MC_MGCG |
  546. AMD_CG_SUPPORT_MC_LS;
  547. adev->pg_flags = 0;
  548. adev->external_rev_id = 0x1;
  549. break;
  550. case CHIP_RAVEN:
  551. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  552. AMD_CG_SUPPORT_GFX_MGLS |
  553. AMD_CG_SUPPORT_GFX_RLC_LS |
  554. AMD_CG_SUPPORT_GFX_CP_LS |
  555. AMD_CG_SUPPORT_GFX_3D_CGCG |
  556. AMD_CG_SUPPORT_GFX_3D_CGLS |
  557. AMD_CG_SUPPORT_GFX_CGCG |
  558. AMD_CG_SUPPORT_GFX_CGLS |
  559. AMD_CG_SUPPORT_BIF_MGCG |
  560. AMD_CG_SUPPORT_BIF_LS |
  561. AMD_CG_SUPPORT_HDP_MGCG |
  562. AMD_CG_SUPPORT_HDP_LS |
  563. AMD_CG_SUPPORT_DRM_MGCG |
  564. AMD_CG_SUPPORT_DRM_LS |
  565. AMD_CG_SUPPORT_ROM_MGCG |
  566. AMD_CG_SUPPORT_MC_MGCG |
  567. AMD_CG_SUPPORT_MC_LS |
  568. AMD_CG_SUPPORT_SDMA_MGCG |
  569. AMD_CG_SUPPORT_SDMA_LS;
  570. adev->pg_flags = AMD_PG_SUPPORT_SDMA |
  571. AMD_PG_SUPPORT_MMHUB;
  572. adev->external_rev_id = 0x1;
  573. break;
  574. default:
  575. /* FIXME: not supported yet */
  576. return -EINVAL;
  577. }
  578. if (amdgpu_sriov_vf(adev)) {
  579. amdgpu_virt_init_setting(adev);
  580. xgpu_ai_mailbox_set_irq_funcs(adev);
  581. }
  582. adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
  583. amdgpu_get_pcie_info(adev);
  584. return 0;
  585. }
  586. static int soc15_common_late_init(void *handle)
  587. {
  588. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  589. if (amdgpu_sriov_vf(adev))
  590. xgpu_ai_mailbox_get_irq(adev);
  591. return 0;
  592. }
  593. static int soc15_common_sw_init(void *handle)
  594. {
  595. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  596. if (amdgpu_sriov_vf(adev))
  597. xgpu_ai_mailbox_add_irq_id(adev);
  598. return 0;
  599. }
  600. static int soc15_common_sw_fini(void *handle)
  601. {
  602. return 0;
  603. }
  604. static int soc15_common_hw_init(void *handle)
  605. {
  606. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  607. /* move the golden regs per IP block */
  608. soc15_init_golden_registers(adev);
  609. /* enable pcie gen2/3 link */
  610. soc15_pcie_gen3_enable(adev);
  611. /* enable aspm */
  612. soc15_program_aspm(adev);
  613. /* setup nbio registers */
  614. if (!(adev->flags & AMD_IS_APU))
  615. nbio_v6_1_init_registers(adev);
  616. /* enable the doorbell aperture */
  617. soc15_enable_doorbell_aperture(adev, true);
  618. return 0;
  619. }
  620. static int soc15_common_hw_fini(void *handle)
  621. {
  622. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  623. /* disable the doorbell aperture */
  624. soc15_enable_doorbell_aperture(adev, false);
  625. if (amdgpu_sriov_vf(adev))
  626. xgpu_ai_mailbox_put_irq(adev);
  627. return 0;
  628. }
  629. static int soc15_common_suspend(void *handle)
  630. {
  631. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  632. return soc15_common_hw_fini(adev);
  633. }
  634. static int soc15_common_resume(void *handle)
  635. {
  636. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  637. return soc15_common_hw_init(adev);
  638. }
  639. static bool soc15_common_is_idle(void *handle)
  640. {
  641. return true;
  642. }
  643. static int soc15_common_wait_for_idle(void *handle)
  644. {
  645. return 0;
  646. }
  647. static int soc15_common_soft_reset(void *handle)
  648. {
  649. return 0;
  650. }
  651. static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
  652. {
  653. uint32_t def, data;
  654. def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  655. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  656. data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  657. else
  658. data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  659. if (def != data)
  660. WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
  661. }
  662. static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
  663. {
  664. uint32_t def, data;
  665. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  666. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
  667. data &= ~(0x01000000 |
  668. 0x02000000 |
  669. 0x04000000 |
  670. 0x08000000 |
  671. 0x10000000 |
  672. 0x20000000 |
  673. 0x40000000 |
  674. 0x80000000);
  675. else
  676. data |= (0x01000000 |
  677. 0x02000000 |
  678. 0x04000000 |
  679. 0x08000000 |
  680. 0x10000000 |
  681. 0x20000000 |
  682. 0x40000000 |
  683. 0x80000000);
  684. if (def != data)
  685. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
  686. }
  687. static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
  688. {
  689. uint32_t def, data;
  690. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  691. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
  692. data |= 1;
  693. else
  694. data &= ~1;
  695. if (def != data)
  696. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
  697. }
  698. static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
  699. bool enable)
  700. {
  701. uint32_t def, data;
  702. def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  703. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
  704. data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  705. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
  706. else
  707. data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  708. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
  709. if (def != data)
  710. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
  711. }
  712. static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
  713. bool enable)
  714. {
  715. uint32_t data;
  716. /* Put DF on broadcast mode */
  717. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
  718. data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
  719. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
  720. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
  721. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  722. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  723. data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
  724. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  725. } else {
  726. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  727. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  728. data |= DF_MGCG_DISABLE;
  729. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  730. }
  731. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
  732. mmFabricConfigAccessControl_DEFAULT);
  733. }
  734. static int soc15_common_set_clockgating_state(void *handle,
  735. enum amd_clockgating_state state)
  736. {
  737. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  738. if (amdgpu_sriov_vf(adev))
  739. return 0;
  740. switch (adev->asic_type) {
  741. case CHIP_VEGA10:
  742. nbio_v6_1_update_medium_grain_clock_gating(adev,
  743. state == AMD_CG_STATE_GATE ? true : false);
  744. nbio_v6_1_update_medium_grain_light_sleep(adev,
  745. state == AMD_CG_STATE_GATE ? true : false);
  746. soc15_update_hdp_light_sleep(adev,
  747. state == AMD_CG_STATE_GATE ? true : false);
  748. soc15_update_drm_clock_gating(adev,
  749. state == AMD_CG_STATE_GATE ? true : false);
  750. soc15_update_drm_light_sleep(adev,
  751. state == AMD_CG_STATE_GATE ? true : false);
  752. soc15_update_rom_medium_grain_clock_gating(adev,
  753. state == AMD_CG_STATE_GATE ? true : false);
  754. soc15_update_df_medium_grain_clock_gating(adev,
  755. state == AMD_CG_STATE_GATE ? true : false);
  756. break;
  757. case CHIP_RAVEN:
  758. nbio_v7_0_update_medium_grain_clock_gating(adev,
  759. state == AMD_CG_STATE_GATE ? true : false);
  760. nbio_v6_1_update_medium_grain_light_sleep(adev,
  761. state == AMD_CG_STATE_GATE ? true : false);
  762. soc15_update_hdp_light_sleep(adev,
  763. state == AMD_CG_STATE_GATE ? true : false);
  764. soc15_update_drm_clock_gating(adev,
  765. state == AMD_CG_STATE_GATE ? true : false);
  766. soc15_update_drm_light_sleep(adev,
  767. state == AMD_CG_STATE_GATE ? true : false);
  768. soc15_update_rom_medium_grain_clock_gating(adev,
  769. state == AMD_CG_STATE_GATE ? true : false);
  770. break;
  771. default:
  772. break;
  773. }
  774. return 0;
  775. }
  776. static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
  777. {
  778. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  779. int data;
  780. if (amdgpu_sriov_vf(adev))
  781. *flags = 0;
  782. nbio_v6_1_get_clockgating_state(adev, flags);
  783. /* AMD_CG_SUPPORT_HDP_LS */
  784. data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  785. if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
  786. *flags |= AMD_CG_SUPPORT_HDP_LS;
  787. /* AMD_CG_SUPPORT_DRM_MGCG */
  788. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  789. if (!(data & 0x01000000))
  790. *flags |= AMD_CG_SUPPORT_DRM_MGCG;
  791. /* AMD_CG_SUPPORT_DRM_LS */
  792. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  793. if (data & 0x1)
  794. *flags |= AMD_CG_SUPPORT_DRM_LS;
  795. /* AMD_CG_SUPPORT_ROM_MGCG */
  796. data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  797. if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
  798. *flags |= AMD_CG_SUPPORT_ROM_MGCG;
  799. /* AMD_CG_SUPPORT_DF_MGCG */
  800. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  801. if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
  802. *flags |= AMD_CG_SUPPORT_DF_MGCG;
  803. }
  804. static int soc15_common_set_powergating_state(void *handle,
  805. enum amd_powergating_state state)
  806. {
  807. /* todo */
  808. return 0;
  809. }
  810. const struct amd_ip_funcs soc15_common_ip_funcs = {
  811. .name = "soc15_common",
  812. .early_init = soc15_common_early_init,
  813. .late_init = soc15_common_late_init,
  814. .sw_init = soc15_common_sw_init,
  815. .sw_fini = soc15_common_sw_fini,
  816. .hw_init = soc15_common_hw_init,
  817. .hw_fini = soc15_common_hw_fini,
  818. .suspend = soc15_common_suspend,
  819. .resume = soc15_common_resume,
  820. .is_idle = soc15_common_is_idle,
  821. .wait_for_idle = soc15_common_wait_for_idle,
  822. .soft_reset = soc15_common_soft_reset,
  823. .set_clockgating_state = soc15_common_set_clockgating_state,
  824. .set_powergating_state = soc15_common_set_powergating_state,
  825. .get_clockgating_state= soc15_common_get_clockgating_state,
  826. };