soc15.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include "drmP.h"
  27. #include "amdgpu.h"
  28. #include "amdgpu_atombios.h"
  29. #include "amdgpu_ih.h"
  30. #include "amdgpu_uvd.h"
  31. #include "amdgpu_vce.h"
  32. #include "amdgpu_ucode.h"
  33. #include "amdgpu_psp.h"
  34. #include "atom.h"
  35. #include "amd_pcie.h"
  36. #include "vega10/soc15ip.h"
  37. #include "vega10/UVD/uvd_7_0_offset.h"
  38. #include "vega10/GC/gc_9_0_offset.h"
  39. #include "vega10/GC/gc_9_0_sh_mask.h"
  40. #include "vega10/SDMA0/sdma0_4_0_offset.h"
  41. #include "vega10/SDMA1/sdma1_4_0_offset.h"
  42. #include "vega10/HDP/hdp_4_0_offset.h"
  43. #include "vega10/HDP/hdp_4_0_sh_mask.h"
  44. #include "vega10/MP/mp_9_0_offset.h"
  45. #include "vega10/MP/mp_9_0_sh_mask.h"
  46. #include "vega10/SMUIO/smuio_9_0_offset.h"
  47. #include "vega10/SMUIO/smuio_9_0_sh_mask.h"
  48. #include "soc15.h"
  49. #include "soc15_common.h"
  50. #include "gfx_v9_0.h"
  51. #include "gmc_v9_0.h"
  52. #include "gfxhub_v1_0.h"
  53. #include "mmhub_v1_0.h"
  54. #include "vega10_ih.h"
  55. #include "sdma_v4_0.h"
  56. #include "uvd_v7_0.h"
  57. #include "vce_v4_0.h"
  58. #include "amdgpu_powerplay.h"
  59. #include "mxgpu_ai.h"
  60. MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
  61. #define mmFabricConfigAccessControl 0x0410
  62. #define mmFabricConfigAccessControl_BASE_IDX 0
  63. #define mmFabricConfigAccessControl_DEFAULT 0x00000000
  64. //FabricConfigAccessControl
  65. #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
  66. #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
  67. #define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
  68. #define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
  69. #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
  70. #define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
  71. #define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
  72. #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
  73. //DF_PIE_AON0_DfGlobalClkGater
  74. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
  75. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
  76. enum {
  77. DF_MGCG_DISABLE = 0,
  78. DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
  79. DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
  80. DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
  81. DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
  82. DF_MGCG_ENABLE_63_CYCLE_DELAY =15
  83. };
  84. #define mmMP0_MISC_CGTT_CTRL0 0x01b9
  85. #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
  86. #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
  87. #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
  88. /*
  89. * Indirect registers accessor
  90. */
  91. static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  92. {
  93. unsigned long flags, address, data;
  94. u32 r;
  95. struct nbio_pcie_index_data *nbio_pcie_id;
  96. if (adev->asic_type == CHIP_VEGA10)
  97. nbio_pcie_id = &nbio_v6_1_pcie_index_data;
  98. address = nbio_pcie_id->index_offset;
  99. data = nbio_pcie_id->data_offset;
  100. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  101. WREG32(address, reg);
  102. (void)RREG32(address);
  103. r = RREG32(data);
  104. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  105. return r;
  106. }
  107. static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  108. {
  109. unsigned long flags, address, data;
  110. struct nbio_pcie_index_data *nbio_pcie_id;
  111. if (adev->asic_type == CHIP_VEGA10)
  112. nbio_pcie_id = &nbio_v6_1_pcie_index_data;
  113. address = nbio_pcie_id->index_offset;
  114. data = nbio_pcie_id->data_offset;
  115. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  116. WREG32(address, reg);
  117. (void)RREG32(address);
  118. WREG32(data, v);
  119. (void)RREG32(data);
  120. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  121. }
  122. static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
  123. {
  124. unsigned long flags, address, data;
  125. u32 r;
  126. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  127. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  128. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  129. WREG32(address, ((reg) & 0x1ff));
  130. r = RREG32(data);
  131. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  132. return r;
  133. }
  134. static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  135. {
  136. unsigned long flags, address, data;
  137. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  138. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  139. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  140. WREG32(address, ((reg) & 0x1ff));
  141. WREG32(data, (v));
  142. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  143. }
  144. static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
  145. {
  146. unsigned long flags, address, data;
  147. u32 r;
  148. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  149. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  150. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  151. WREG32(address, (reg));
  152. r = RREG32(data);
  153. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  154. return r;
  155. }
  156. static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  157. {
  158. unsigned long flags, address, data;
  159. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  160. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  161. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  162. WREG32(address, (reg));
  163. WREG32(data, (v));
  164. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  165. }
  166. static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
  167. {
  168. return nbio_v6_1_get_memsize(adev);
  169. }
  170. static const u32 vega10_golden_init[] =
  171. {
  172. };
  173. static void soc15_init_golden_registers(struct amdgpu_device *adev)
  174. {
  175. /* Some of the registers might be dependent on GRBM_GFX_INDEX */
  176. mutex_lock(&adev->grbm_idx_mutex);
  177. switch (adev->asic_type) {
  178. case CHIP_VEGA10:
  179. amdgpu_program_register_sequence(adev,
  180. vega10_golden_init,
  181. (const u32)ARRAY_SIZE(vega10_golden_init));
  182. break;
  183. default:
  184. break;
  185. }
  186. mutex_unlock(&adev->grbm_idx_mutex);
  187. }
  188. static u32 soc15_get_xclk(struct amdgpu_device *adev)
  189. {
  190. if (adev->asic_type == CHIP_VEGA10)
  191. return adev->clock.spll.reference_freq/4;
  192. else
  193. return adev->clock.spll.reference_freq;
  194. }
  195. void soc15_grbm_select(struct amdgpu_device *adev,
  196. u32 me, u32 pipe, u32 queue, u32 vmid)
  197. {
  198. u32 grbm_gfx_cntl = 0;
  199. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
  200. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
  201. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
  202. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
  203. WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
  204. }
  205. static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
  206. {
  207. /* todo */
  208. }
  209. static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
  210. {
  211. /* todo */
  212. return false;
  213. }
  214. static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
  215. u8 *bios, u32 length_bytes)
  216. {
  217. u32 *dw_ptr;
  218. u32 i, length_dw;
  219. if (bios == NULL)
  220. return false;
  221. if (length_bytes == 0)
  222. return false;
  223. /* APU vbios image is part of sbios image */
  224. if (adev->flags & AMD_IS_APU)
  225. return false;
  226. dw_ptr = (u32 *)bios;
  227. length_dw = ALIGN(length_bytes, 4) / 4;
  228. /* set rom index to 0 */
  229. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
  230. /* read out the rom data */
  231. for (i = 0; i < length_dw; i++)
  232. dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
  233. return true;
  234. }
  235. static struct amdgpu_allowed_register_entry vega10_allowed_read_registers[] = {
  236. /* todo */
  237. };
  238. static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
  239. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS), false},
  240. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2), false},
  241. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0), false},
  242. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1), false},
  243. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2), false},
  244. { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3), false},
  245. { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG), false},
  246. { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG), false},
  247. { SOC15_REG_OFFSET(GC, 0, mmCP_STAT), false},
  248. { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1), false},
  249. { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2), false},
  250. { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3), false},
  251. { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT), false},
  252. { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1), false},
  253. { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS), false},
  254. { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT), false},
  255. { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1), false},
  256. { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS), false},
  257. { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), false},
  258. { SOC15_REG_OFFSET(GC, 0, mmCC_RB_BACKEND_DISABLE), false, true},
  259. { SOC15_REG_OFFSET(GC, 0, mmGC_USER_RB_BACKEND_DISABLE), false, true},
  260. { SOC15_REG_OFFSET(GC, 0, mmGB_BACKEND_MAP), false, false},
  261. };
  262. static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
  263. u32 sh_num, u32 reg_offset)
  264. {
  265. uint32_t val;
  266. mutex_lock(&adev->grbm_idx_mutex);
  267. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  268. amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
  269. val = RREG32(reg_offset);
  270. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  271. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  272. mutex_unlock(&adev->grbm_idx_mutex);
  273. return val;
  274. }
  275. static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
  276. u32 sh_num, u32 reg_offset, u32 *value)
  277. {
  278. struct amdgpu_allowed_register_entry *asic_register_table = NULL;
  279. struct amdgpu_allowed_register_entry *asic_register_entry;
  280. uint32_t size, i;
  281. *value = 0;
  282. switch (adev->asic_type) {
  283. case CHIP_VEGA10:
  284. asic_register_table = vega10_allowed_read_registers;
  285. size = ARRAY_SIZE(vega10_allowed_read_registers);
  286. break;
  287. default:
  288. return -EINVAL;
  289. }
  290. if (asic_register_table) {
  291. for (i = 0; i < size; i++) {
  292. asic_register_entry = asic_register_table + i;
  293. if (reg_offset != asic_register_entry->reg_offset)
  294. continue;
  295. if (!asic_register_entry->untouched)
  296. *value = asic_register_entry->grbm_indexed ?
  297. soc15_read_indexed_register(adev, se_num,
  298. sh_num, reg_offset) :
  299. RREG32(reg_offset);
  300. return 0;
  301. }
  302. }
  303. for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
  304. if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
  305. continue;
  306. if (!soc15_allowed_read_registers[i].untouched)
  307. *value = soc15_allowed_read_registers[i].grbm_indexed ?
  308. soc15_read_indexed_register(adev, se_num,
  309. sh_num, reg_offset) :
  310. RREG32(reg_offset);
  311. return 0;
  312. }
  313. return -EINVAL;
  314. }
  315. static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
  316. {
  317. u32 i;
  318. dev_info(adev->dev, "GPU pci config reset\n");
  319. /* disable BM */
  320. pci_clear_master(adev->pdev);
  321. /* reset */
  322. amdgpu_pci_config_reset(adev);
  323. udelay(100);
  324. /* wait for asic to come out of reset */
  325. for (i = 0; i < adev->usec_timeout; i++) {
  326. if (nbio_v6_1_get_memsize(adev) != 0xffffffff)
  327. break;
  328. udelay(1);
  329. }
  330. }
  331. static int soc15_asic_reset(struct amdgpu_device *adev)
  332. {
  333. amdgpu_atombios_scratch_regs_engine_hung(adev, true);
  334. soc15_gpu_pci_config_reset(adev);
  335. amdgpu_atombios_scratch_regs_engine_hung(adev, false);
  336. return 0;
  337. }
  338. /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
  339. u32 cntl_reg, u32 status_reg)
  340. {
  341. return 0;
  342. }*/
  343. static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  344. {
  345. /*int r;
  346. r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
  347. if (r)
  348. return r;
  349. r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
  350. */
  351. return 0;
  352. }
  353. static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  354. {
  355. /* todo */
  356. return 0;
  357. }
  358. static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
  359. {
  360. if (pci_is_root_bus(adev->pdev->bus))
  361. return;
  362. if (amdgpu_pcie_gen2 == 0)
  363. return;
  364. if (adev->flags & AMD_IS_APU)
  365. return;
  366. if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  367. CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
  368. return;
  369. /* todo */
  370. }
  371. static void soc15_program_aspm(struct amdgpu_device *adev)
  372. {
  373. if (amdgpu_aspm == 0)
  374. return;
  375. /* todo */
  376. }
  377. static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
  378. bool enable)
  379. {
  380. nbio_v6_1_enable_doorbell_aperture(adev, enable);
  381. nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
  382. }
  383. static const struct amdgpu_ip_block_version vega10_common_ip_block =
  384. {
  385. .type = AMD_IP_BLOCK_TYPE_COMMON,
  386. .major = 2,
  387. .minor = 0,
  388. .rev = 0,
  389. .funcs = &soc15_common_ip_funcs,
  390. };
  391. int soc15_set_ip_blocks(struct amdgpu_device *adev)
  392. {
  393. nbio_v6_1_detect_hw_virt(adev);
  394. if (amdgpu_sriov_vf(adev))
  395. adev->virt.ops = &xgpu_ai_virt_ops;
  396. switch (adev->asic_type) {
  397. case CHIP_VEGA10:
  398. amdgpu_ip_block_add(adev, &vega10_common_ip_block);
  399. amdgpu_ip_block_add(adev, &gfxhub_v1_0_ip_block);
  400. amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
  401. amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
  402. amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
  403. if (!amdgpu_sriov_vf(adev))
  404. amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
  405. amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
  406. amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
  407. amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
  408. amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
  409. amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
  410. break;
  411. default:
  412. return -EINVAL;
  413. }
  414. return 0;
  415. }
  416. static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
  417. {
  418. return nbio_v6_1_get_rev_id(adev);
  419. }
  420. int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev)
  421. {
  422. /* to be implemented in MC IP*/
  423. return 0;
  424. }
  425. static const struct amdgpu_asic_funcs soc15_asic_funcs =
  426. {
  427. .read_disabled_bios = &soc15_read_disabled_bios,
  428. .read_bios_from_rom = &soc15_read_bios_from_rom,
  429. .read_register = &soc15_read_register,
  430. .reset = &soc15_asic_reset,
  431. .set_vga_state = &soc15_vga_set_state,
  432. .get_xclk = &soc15_get_xclk,
  433. .set_uvd_clocks = &soc15_set_uvd_clocks,
  434. .set_vce_clocks = &soc15_set_vce_clocks,
  435. .get_config_memsize = &soc15_get_config_memsize,
  436. };
  437. static int soc15_common_early_init(void *handle)
  438. {
  439. bool psp_enabled = false;
  440. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  441. adev->smc_rreg = NULL;
  442. adev->smc_wreg = NULL;
  443. adev->pcie_rreg = &soc15_pcie_rreg;
  444. adev->pcie_wreg = &soc15_pcie_wreg;
  445. adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
  446. adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
  447. adev->didt_rreg = &soc15_didt_rreg;
  448. adev->didt_wreg = &soc15_didt_wreg;
  449. adev->asic_funcs = &soc15_asic_funcs;
  450. if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
  451. (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
  452. psp_enabled = true;
  453. /*
  454. * nbio need be used for both sdma and gfx9, but only
  455. * initializes once
  456. */
  457. switch(adev->asic_type) {
  458. case CHIP_VEGA10:
  459. nbio_v6_1_init(adev);
  460. break;
  461. default:
  462. return -EINVAL;
  463. }
  464. adev->rev_id = soc15_get_rev_id(adev);
  465. adev->external_rev_id = 0xFF;
  466. switch (adev->asic_type) {
  467. case CHIP_VEGA10:
  468. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  469. AMD_CG_SUPPORT_GFX_MGLS |
  470. AMD_CG_SUPPORT_GFX_RLC_LS |
  471. AMD_CG_SUPPORT_GFX_CP_LS |
  472. AMD_CG_SUPPORT_GFX_3D_CGCG |
  473. AMD_CG_SUPPORT_GFX_3D_CGLS |
  474. AMD_CG_SUPPORT_GFX_CGCG |
  475. AMD_CG_SUPPORT_GFX_CGLS |
  476. AMD_CG_SUPPORT_BIF_MGCG |
  477. AMD_CG_SUPPORT_BIF_LS |
  478. AMD_CG_SUPPORT_HDP_LS |
  479. AMD_CG_SUPPORT_DRM_MGCG |
  480. AMD_CG_SUPPORT_DRM_LS |
  481. AMD_CG_SUPPORT_ROM_MGCG |
  482. AMD_CG_SUPPORT_DF_MGCG |
  483. AMD_CG_SUPPORT_SDMA_MGCG |
  484. AMD_CG_SUPPORT_SDMA_LS |
  485. AMD_CG_SUPPORT_MC_MGCG |
  486. AMD_CG_SUPPORT_MC_LS;
  487. adev->pg_flags = 0;
  488. adev->external_rev_id = 0x1;
  489. break;
  490. default:
  491. /* FIXME: not supported yet */
  492. return -EINVAL;
  493. }
  494. adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
  495. amdgpu_get_pcie_info(adev);
  496. return 0;
  497. }
  498. static int soc15_common_sw_init(void *handle)
  499. {
  500. return 0;
  501. }
  502. static int soc15_common_sw_fini(void *handle)
  503. {
  504. return 0;
  505. }
  506. static int soc15_common_hw_init(void *handle)
  507. {
  508. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  509. /* move the golden regs per IP block */
  510. soc15_init_golden_registers(adev);
  511. /* enable pcie gen2/3 link */
  512. soc15_pcie_gen3_enable(adev);
  513. /* enable aspm */
  514. soc15_program_aspm(adev);
  515. /* enable the doorbell aperture */
  516. soc15_enable_doorbell_aperture(adev, true);
  517. return 0;
  518. }
  519. static int soc15_common_hw_fini(void *handle)
  520. {
  521. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  522. /* disable the doorbell aperture */
  523. soc15_enable_doorbell_aperture(adev, false);
  524. return 0;
  525. }
  526. static int soc15_common_suspend(void *handle)
  527. {
  528. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  529. return soc15_common_hw_fini(adev);
  530. }
  531. static int soc15_common_resume(void *handle)
  532. {
  533. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  534. return soc15_common_hw_init(adev);
  535. }
  536. static bool soc15_common_is_idle(void *handle)
  537. {
  538. return true;
  539. }
  540. static int soc15_common_wait_for_idle(void *handle)
  541. {
  542. return 0;
  543. }
  544. static int soc15_common_soft_reset(void *handle)
  545. {
  546. return 0;
  547. }
  548. static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
  549. {
  550. uint32_t def, data;
  551. def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  552. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  553. data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  554. else
  555. data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  556. if (def != data)
  557. WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
  558. }
  559. static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
  560. {
  561. uint32_t def, data;
  562. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  563. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
  564. data &= ~(0x01000000 |
  565. 0x02000000 |
  566. 0x04000000 |
  567. 0x08000000 |
  568. 0x10000000 |
  569. 0x20000000 |
  570. 0x40000000 |
  571. 0x80000000);
  572. else
  573. data |= (0x01000000 |
  574. 0x02000000 |
  575. 0x04000000 |
  576. 0x08000000 |
  577. 0x10000000 |
  578. 0x20000000 |
  579. 0x40000000 |
  580. 0x80000000);
  581. if (def != data)
  582. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
  583. }
  584. static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
  585. {
  586. uint32_t def, data;
  587. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  588. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
  589. data |= 1;
  590. else
  591. data &= ~1;
  592. if (def != data)
  593. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
  594. }
  595. static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
  596. bool enable)
  597. {
  598. uint32_t def, data;
  599. def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  600. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
  601. data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  602. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
  603. else
  604. data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  605. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
  606. if (def != data)
  607. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
  608. }
  609. static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
  610. bool enable)
  611. {
  612. uint32_t data;
  613. /* Put DF on broadcast mode */
  614. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
  615. data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
  616. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
  617. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
  618. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  619. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  620. data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
  621. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  622. } else {
  623. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  624. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  625. data |= DF_MGCG_DISABLE;
  626. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  627. }
  628. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
  629. mmFabricConfigAccessControl_DEFAULT);
  630. }
  631. static int soc15_common_set_clockgating_state(void *handle,
  632. enum amd_clockgating_state state)
  633. {
  634. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  635. switch (adev->asic_type) {
  636. case CHIP_VEGA10:
  637. nbio_v6_1_update_medium_grain_clock_gating(adev,
  638. state == AMD_CG_STATE_GATE ? true : false);
  639. nbio_v6_1_update_medium_grain_light_sleep(adev,
  640. state == AMD_CG_STATE_GATE ? true : false);
  641. soc15_update_hdp_light_sleep(adev,
  642. state == AMD_CG_STATE_GATE ? true : false);
  643. soc15_update_drm_clock_gating(adev,
  644. state == AMD_CG_STATE_GATE ? true : false);
  645. soc15_update_drm_light_sleep(adev,
  646. state == AMD_CG_STATE_GATE ? true : false);
  647. soc15_update_rom_medium_grain_clock_gating(adev,
  648. state == AMD_CG_STATE_GATE ? true : false);
  649. soc15_update_df_medium_grain_clock_gating(adev,
  650. state == AMD_CG_STATE_GATE ? true : false);
  651. break;
  652. default:
  653. break;
  654. }
  655. return 0;
  656. }
  657. static int soc15_common_set_powergating_state(void *handle,
  658. enum amd_powergating_state state)
  659. {
  660. /* todo */
  661. return 0;
  662. }
  663. const struct amd_ip_funcs soc15_common_ip_funcs = {
  664. .name = "soc15_common",
  665. .early_init = soc15_common_early_init,
  666. .late_init = NULL,
  667. .sw_init = soc15_common_sw_init,
  668. .sw_fini = soc15_common_sw_fini,
  669. .hw_init = soc15_common_hw_init,
  670. .hw_fini = soc15_common_hw_fini,
  671. .suspend = soc15_common_suspend,
  672. .resume = soc15_common_resume,
  673. .is_idle = soc15_common_is_idle,
  674. .wait_for_idle = soc15_common_wait_for_idle,
  675. .soft_reset = soc15_common_soft_reset,
  676. .set_clockgating_state = soc15_common_set_clockgating_state,
  677. .set_powergating_state = soc15_common_set_powergating_state,
  678. };