soc15.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <drm/drmP.h>
  27. #include "amdgpu.h"
  28. #include "amdgpu_atombios.h"
  29. #include "amdgpu_ih.h"
  30. #include "amdgpu_uvd.h"
  31. #include "amdgpu_vce.h"
  32. #include "amdgpu_ucode.h"
  33. #include "amdgpu_psp.h"
  34. #include "atom.h"
  35. #include "amd_pcie.h"
  36. #include "uvd/uvd_7_0_offset.h"
  37. #include "gc/gc_9_0_offset.h"
  38. #include "gc/gc_9_0_sh_mask.h"
  39. #include "sdma0/sdma0_4_0_offset.h"
  40. #include "sdma1/sdma1_4_0_offset.h"
  41. #include "hdp/hdp_4_0_offset.h"
  42. #include "hdp/hdp_4_0_sh_mask.h"
  43. #include "mp/mp_9_0_offset.h"
  44. #include "mp/mp_9_0_sh_mask.h"
  45. #include "smuio/smuio_9_0_offset.h"
  46. #include "smuio/smuio_9_0_sh_mask.h"
  47. #include "soc15.h"
  48. #include "soc15_common.h"
  49. #include "gfx_v9_0.h"
  50. #include "gmc_v9_0.h"
  51. #include "gfxhub_v1_0.h"
  52. #include "mmhub_v1_0.h"
  53. #include "vega10_ih.h"
  54. #include "sdma_v4_0.h"
  55. #include "uvd_v7_0.h"
  56. #include "vce_v4_0.h"
  57. #include "vcn_v1_0.h"
  58. #include "dce_virtual.h"
  59. #include "mxgpu_ai.h"
  60. #define mmFabricConfigAccessControl 0x0410
  61. #define mmFabricConfigAccessControl_BASE_IDX 0
  62. #define mmFabricConfigAccessControl_DEFAULT 0x00000000
  63. //FabricConfigAccessControl
  64. #define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
  65. #define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
  66. #define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
  67. #define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
  68. #define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
  69. #define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
  70. #define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
  71. #define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
  72. //DF_PIE_AON0_DfGlobalClkGater
  73. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
  74. #define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
  75. enum {
  76. DF_MGCG_DISABLE = 0,
  77. DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
  78. DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
  79. DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
  80. DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
  81. DF_MGCG_ENABLE_63_CYCLE_DELAY =15
  82. };
  83. #define mmMP0_MISC_CGTT_CTRL0 0x01b9
  84. #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
  85. #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
  86. #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
  87. /*
  88. * Indirect registers accessor
  89. */
  90. static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  91. {
  92. unsigned long flags, address, data;
  93. u32 r;
  94. address = adev->nbio_funcs->get_pcie_index_offset(adev);
  95. data = adev->nbio_funcs->get_pcie_data_offset(adev);
  96. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  97. WREG32(address, reg);
  98. (void)RREG32(address);
  99. r = RREG32(data);
  100. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  101. return r;
  102. }
  103. static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  104. {
  105. unsigned long flags, address, data;
  106. address = adev->nbio_funcs->get_pcie_index_offset(adev);
  107. data = adev->nbio_funcs->get_pcie_data_offset(adev);
  108. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  109. WREG32(address, reg);
  110. (void)RREG32(address);
  111. WREG32(data, v);
  112. (void)RREG32(data);
  113. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  114. }
  115. static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
  116. {
  117. unsigned long flags, address, data;
  118. u32 r;
  119. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  120. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  121. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  122. WREG32(address, ((reg) & 0x1ff));
  123. r = RREG32(data);
  124. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  125. return r;
  126. }
  127. static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  128. {
  129. unsigned long flags, address, data;
  130. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  131. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  132. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  133. WREG32(address, ((reg) & 0x1ff));
  134. WREG32(data, (v));
  135. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  136. }
  137. static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
  138. {
  139. unsigned long flags, address, data;
  140. u32 r;
  141. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  142. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  143. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  144. WREG32(address, (reg));
  145. r = RREG32(data);
  146. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  147. return r;
  148. }
  149. static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  150. {
  151. unsigned long flags, address, data;
  152. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  153. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  154. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  155. WREG32(address, (reg));
  156. WREG32(data, (v));
  157. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  158. }
  159. static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
  160. {
  161. unsigned long flags;
  162. u32 r;
  163. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  164. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  165. r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
  166. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  167. return r;
  168. }
  169. static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  170. {
  171. unsigned long flags;
  172. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  173. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  174. WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
  175. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  176. }
  177. static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
  178. {
  179. unsigned long flags;
  180. u32 r;
  181. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  182. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  183. r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
  184. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  185. return r;
  186. }
  187. static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  188. {
  189. unsigned long flags;
  190. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  191. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  192. WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
  193. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  194. }
  195. static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
  196. {
  197. return adev->nbio_funcs->get_memsize(adev);
  198. }
  199. static u32 soc15_get_xclk(struct amdgpu_device *adev)
  200. {
  201. return adev->clock.spll.reference_freq;
  202. }
  203. void soc15_grbm_select(struct amdgpu_device *adev,
  204. u32 me, u32 pipe, u32 queue, u32 vmid)
  205. {
  206. u32 grbm_gfx_cntl = 0;
  207. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
  208. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
  209. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
  210. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
  211. WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
  212. }
  213. static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
  214. {
  215. /* todo */
  216. }
  217. static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
  218. {
  219. /* todo */
  220. return false;
  221. }
  222. static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
  223. u8 *bios, u32 length_bytes)
  224. {
  225. u32 *dw_ptr;
  226. u32 i, length_dw;
  227. if (bios == NULL)
  228. return false;
  229. if (length_bytes == 0)
  230. return false;
  231. /* APU vbios image is part of sbios image */
  232. if (adev->flags & AMD_IS_APU)
  233. return false;
  234. dw_ptr = (u32 *)bios;
  235. length_dw = ALIGN(length_bytes, 4) / 4;
  236. /* set rom index to 0 */
  237. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
  238. /* read out the rom data */
  239. for (i = 0; i < length_dw; i++)
  240. dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
  241. return true;
  242. }
  243. struct soc15_allowed_register_entry {
  244. uint32_t hwip;
  245. uint32_t inst;
  246. uint32_t seg;
  247. uint32_t reg_offset;
  248. bool grbm_indexed;
  249. };
  250. static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
  251. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
  252. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
  253. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
  254. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
  255. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
  256. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
  257. { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
  258. { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
  259. { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
  260. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
  261. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
  262. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
  263. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
  264. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
  265. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
  266. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
  267. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
  268. { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
  269. };
  270. static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
  271. u32 sh_num, u32 reg_offset)
  272. {
  273. uint32_t val;
  274. mutex_lock(&adev->grbm_idx_mutex);
  275. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  276. amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
  277. val = RREG32(reg_offset);
  278. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  279. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  280. mutex_unlock(&adev->grbm_idx_mutex);
  281. return val;
  282. }
  283. static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
  284. bool indexed, u32 se_num,
  285. u32 sh_num, u32 reg_offset)
  286. {
  287. if (indexed) {
  288. return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
  289. } else {
  290. if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
  291. return adev->gfx.config.gb_addr_config;
  292. return RREG32(reg_offset);
  293. }
  294. }
  295. static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
  296. u32 sh_num, u32 reg_offset, u32 *value)
  297. {
  298. uint32_t i;
  299. struct soc15_allowed_register_entry *en;
  300. *value = 0;
  301. for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
  302. en = &soc15_allowed_read_registers[i];
  303. if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
  304. + en->reg_offset))
  305. continue;
  306. *value = soc15_get_register_value(adev,
  307. soc15_allowed_read_registers[i].grbm_indexed,
  308. se_num, sh_num, reg_offset);
  309. return 0;
  310. }
  311. return -EINVAL;
  312. }
  313. /**
  314. * soc15_program_register_sequence - program an array of registers.
  315. *
  316. * @adev: amdgpu_device pointer
  317. * @regs: pointer to the register array
  318. * @array_size: size of the register array
  319. *
  320. * Programs an array or registers with and and or masks.
  321. * This is a helper for setting golden registers.
  322. */
  323. void soc15_program_register_sequence(struct amdgpu_device *adev,
  324. const struct soc15_reg_golden *regs,
  325. const u32 array_size)
  326. {
  327. const struct soc15_reg_golden *entry;
  328. u32 tmp, reg;
  329. int i;
  330. for (i = 0; i < array_size; ++i) {
  331. entry = &regs[i];
  332. reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
  333. if (entry->and_mask == 0xffffffff) {
  334. tmp = entry->or_mask;
  335. } else {
  336. tmp = RREG32(reg);
  337. tmp &= ~(entry->and_mask);
  338. tmp |= entry->or_mask;
  339. }
  340. WREG32(reg, tmp);
  341. }
  342. }
  343. static int soc15_asic_reset(struct amdgpu_device *adev)
  344. {
  345. u32 i;
  346. amdgpu_atombios_scratch_regs_engine_hung(adev, true);
  347. dev_info(adev->dev, "GPU reset\n");
  348. /* disable BM */
  349. pci_clear_master(adev->pdev);
  350. pci_save_state(adev->pdev);
  351. psp_gpu_reset(adev);
  352. pci_restore_state(adev->pdev);
  353. /* wait for asic to come out of reset */
  354. for (i = 0; i < adev->usec_timeout; i++) {
  355. u32 memsize = adev->nbio_funcs->get_memsize(adev);
  356. if (memsize != 0xffffffff)
  357. break;
  358. udelay(1);
  359. }
  360. amdgpu_atombios_scratch_regs_engine_hung(adev, false);
  361. return 0;
  362. }
  363. /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
  364. u32 cntl_reg, u32 status_reg)
  365. {
  366. return 0;
  367. }*/
  368. static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  369. {
  370. /*int r;
  371. r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
  372. if (r)
  373. return r;
  374. r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
  375. */
  376. return 0;
  377. }
  378. static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  379. {
  380. /* todo */
  381. return 0;
  382. }
  383. static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
  384. {
  385. if (pci_is_root_bus(adev->pdev->bus))
  386. return;
  387. if (amdgpu_pcie_gen2 == 0)
  388. return;
  389. if (adev->flags & AMD_IS_APU)
  390. return;
  391. if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  392. CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
  393. return;
  394. /* todo */
  395. }
  396. static void soc15_program_aspm(struct amdgpu_device *adev)
  397. {
  398. if (amdgpu_aspm == 0)
  399. return;
  400. /* todo */
  401. }
  402. static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
  403. bool enable)
  404. {
  405. adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
  406. adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
  407. }
  408. static const struct amdgpu_ip_block_version vega10_common_ip_block =
  409. {
  410. .type = AMD_IP_BLOCK_TYPE_COMMON,
  411. .major = 2,
  412. .minor = 0,
  413. .rev = 0,
  414. .funcs = &soc15_common_ip_funcs,
  415. };
  416. int soc15_set_ip_blocks(struct amdgpu_device *adev)
  417. {
  418. /* Set IP register base before any HW register access */
  419. switch (adev->asic_type) {
  420. case CHIP_VEGA10:
  421. case CHIP_RAVEN:
  422. vega10_reg_base_init(adev);
  423. break;
  424. default:
  425. return -EINVAL;
  426. }
  427. if (adev->flags & AMD_IS_APU)
  428. adev->nbio_funcs = &nbio_v7_0_funcs;
  429. else
  430. adev->nbio_funcs = &nbio_v6_1_funcs;
  431. adev->nbio_funcs->detect_hw_virt(adev);
  432. if (amdgpu_sriov_vf(adev))
  433. adev->virt.ops = &xgpu_ai_virt_ops;
  434. switch (adev->asic_type) {
  435. case CHIP_VEGA10:
  436. amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
  437. amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
  438. amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
  439. amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
  440. if (!amdgpu_sriov_vf(adev))
  441. amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
  442. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  443. amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
  444. #if defined(CONFIG_DRM_AMD_DC)
  445. else if (amdgpu_device_has_dc_support(adev))
  446. amdgpu_device_ip_block_add(adev, &dm_ip_block);
  447. #else
  448. # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
  449. #endif
  450. amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
  451. amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
  452. amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
  453. amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
  454. break;
  455. case CHIP_RAVEN:
  456. amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
  457. amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
  458. amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
  459. amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
  460. amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
  461. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  462. amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
  463. #if defined(CONFIG_DRM_AMD_DC)
  464. else if (amdgpu_device_has_dc_support(adev))
  465. amdgpu_device_ip_block_add(adev, &dm_ip_block);
  466. #else
  467. # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
  468. #endif
  469. amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
  470. amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
  471. amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
  472. break;
  473. default:
  474. return -EINVAL;
  475. }
  476. return 0;
  477. }
  478. static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
  479. {
  480. return adev->nbio_funcs->get_rev_id(adev);
  481. }
  482. static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
  483. {
  484. adev->nbio_funcs->hdp_flush(adev, ring);
  485. }
  486. static void soc15_invalidate_hdp(struct amdgpu_device *adev,
  487. struct amdgpu_ring *ring)
  488. {
  489. if (!ring || !ring->funcs->emit_wreg)
  490. WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
  491. else
  492. amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
  493. HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
  494. }
  495. static const struct amdgpu_asic_funcs soc15_asic_funcs =
  496. {
  497. .read_disabled_bios = &soc15_read_disabled_bios,
  498. .read_bios_from_rom = &soc15_read_bios_from_rom,
  499. .read_register = &soc15_read_register,
  500. .reset = &soc15_asic_reset,
  501. .set_vga_state = &soc15_vga_set_state,
  502. .get_xclk = &soc15_get_xclk,
  503. .set_uvd_clocks = &soc15_set_uvd_clocks,
  504. .set_vce_clocks = &soc15_set_vce_clocks,
  505. .get_config_memsize = &soc15_get_config_memsize,
  506. .flush_hdp = &soc15_flush_hdp,
  507. .invalidate_hdp = &soc15_invalidate_hdp,
  508. };
  509. static int soc15_common_early_init(void *handle)
  510. {
  511. bool psp_enabled = false;
  512. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  513. adev->smc_rreg = NULL;
  514. adev->smc_wreg = NULL;
  515. adev->pcie_rreg = &soc15_pcie_rreg;
  516. adev->pcie_wreg = &soc15_pcie_wreg;
  517. adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
  518. adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
  519. adev->didt_rreg = &soc15_didt_rreg;
  520. adev->didt_wreg = &soc15_didt_wreg;
  521. adev->gc_cac_rreg = &soc15_gc_cac_rreg;
  522. adev->gc_cac_wreg = &soc15_gc_cac_wreg;
  523. adev->se_cac_rreg = &soc15_se_cac_rreg;
  524. adev->se_cac_wreg = &soc15_se_cac_wreg;
  525. adev->asic_funcs = &soc15_asic_funcs;
  526. if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
  527. (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
  528. psp_enabled = true;
  529. adev->rev_id = soc15_get_rev_id(adev);
  530. adev->external_rev_id = 0xFF;
  531. switch (adev->asic_type) {
  532. case CHIP_VEGA10:
  533. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  534. AMD_CG_SUPPORT_GFX_MGLS |
  535. AMD_CG_SUPPORT_GFX_RLC_LS |
  536. AMD_CG_SUPPORT_GFX_CP_LS |
  537. AMD_CG_SUPPORT_GFX_3D_CGCG |
  538. AMD_CG_SUPPORT_GFX_3D_CGLS |
  539. AMD_CG_SUPPORT_GFX_CGCG |
  540. AMD_CG_SUPPORT_GFX_CGLS |
  541. AMD_CG_SUPPORT_BIF_MGCG |
  542. AMD_CG_SUPPORT_BIF_LS |
  543. AMD_CG_SUPPORT_HDP_LS |
  544. AMD_CG_SUPPORT_DRM_MGCG |
  545. AMD_CG_SUPPORT_DRM_LS |
  546. AMD_CG_SUPPORT_ROM_MGCG |
  547. AMD_CG_SUPPORT_DF_MGCG |
  548. AMD_CG_SUPPORT_SDMA_MGCG |
  549. AMD_CG_SUPPORT_SDMA_LS |
  550. AMD_CG_SUPPORT_MC_MGCG |
  551. AMD_CG_SUPPORT_MC_LS;
  552. adev->pg_flags = 0;
  553. adev->external_rev_id = 0x1;
  554. break;
  555. case CHIP_RAVEN:
  556. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  557. AMD_CG_SUPPORT_GFX_MGLS |
  558. AMD_CG_SUPPORT_GFX_RLC_LS |
  559. AMD_CG_SUPPORT_GFX_CP_LS |
  560. AMD_CG_SUPPORT_GFX_3D_CGCG |
  561. AMD_CG_SUPPORT_GFX_3D_CGLS |
  562. AMD_CG_SUPPORT_GFX_CGCG |
  563. AMD_CG_SUPPORT_GFX_CGLS |
  564. AMD_CG_SUPPORT_BIF_MGCG |
  565. AMD_CG_SUPPORT_BIF_LS |
  566. AMD_CG_SUPPORT_HDP_MGCG |
  567. AMD_CG_SUPPORT_HDP_LS |
  568. AMD_CG_SUPPORT_DRM_MGCG |
  569. AMD_CG_SUPPORT_DRM_LS |
  570. AMD_CG_SUPPORT_ROM_MGCG |
  571. AMD_CG_SUPPORT_MC_MGCG |
  572. AMD_CG_SUPPORT_MC_LS |
  573. AMD_CG_SUPPORT_SDMA_MGCG |
  574. AMD_CG_SUPPORT_SDMA_LS;
  575. adev->pg_flags = AMD_PG_SUPPORT_SDMA;
  576. adev->external_rev_id = 0x1;
  577. break;
  578. default:
  579. /* FIXME: not supported yet */
  580. return -EINVAL;
  581. }
  582. if (amdgpu_sriov_vf(adev)) {
  583. amdgpu_virt_init_setting(adev);
  584. xgpu_ai_mailbox_set_irq_funcs(adev);
  585. }
  586. return 0;
  587. }
  588. static int soc15_common_late_init(void *handle)
  589. {
  590. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  591. if (amdgpu_sriov_vf(adev))
  592. xgpu_ai_mailbox_get_irq(adev);
  593. return 0;
  594. }
  595. static int soc15_common_sw_init(void *handle)
  596. {
  597. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  598. if (amdgpu_sriov_vf(adev))
  599. xgpu_ai_mailbox_add_irq_id(adev);
  600. return 0;
  601. }
  602. static int soc15_common_sw_fini(void *handle)
  603. {
  604. return 0;
  605. }
  606. static int soc15_common_hw_init(void *handle)
  607. {
  608. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  609. /* enable pcie gen2/3 link */
  610. soc15_pcie_gen3_enable(adev);
  611. /* enable aspm */
  612. soc15_program_aspm(adev);
  613. /* setup nbio registers */
  614. adev->nbio_funcs->init_registers(adev);
  615. /* enable the doorbell aperture */
  616. soc15_enable_doorbell_aperture(adev, true);
  617. return 0;
  618. }
  619. static int soc15_common_hw_fini(void *handle)
  620. {
  621. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  622. /* disable the doorbell aperture */
  623. soc15_enable_doorbell_aperture(adev, false);
  624. if (amdgpu_sriov_vf(adev))
  625. xgpu_ai_mailbox_put_irq(adev);
  626. return 0;
  627. }
  628. static int soc15_common_suspend(void *handle)
  629. {
  630. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  631. return soc15_common_hw_fini(adev);
  632. }
  633. static int soc15_common_resume(void *handle)
  634. {
  635. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  636. return soc15_common_hw_init(adev);
  637. }
  638. static bool soc15_common_is_idle(void *handle)
  639. {
  640. return true;
  641. }
  642. static int soc15_common_wait_for_idle(void *handle)
  643. {
  644. return 0;
  645. }
  646. static int soc15_common_soft_reset(void *handle)
  647. {
  648. return 0;
  649. }
  650. static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
  651. {
  652. uint32_t def, data;
  653. def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  654. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  655. data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  656. else
  657. data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  658. if (def != data)
  659. WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
  660. }
  661. static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
  662. {
  663. uint32_t def, data;
  664. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  665. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
  666. data &= ~(0x01000000 |
  667. 0x02000000 |
  668. 0x04000000 |
  669. 0x08000000 |
  670. 0x10000000 |
  671. 0x20000000 |
  672. 0x40000000 |
  673. 0x80000000);
  674. else
  675. data |= (0x01000000 |
  676. 0x02000000 |
  677. 0x04000000 |
  678. 0x08000000 |
  679. 0x10000000 |
  680. 0x20000000 |
  681. 0x40000000 |
  682. 0x80000000);
  683. if (def != data)
  684. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
  685. }
  686. static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
  687. {
  688. uint32_t def, data;
  689. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  690. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
  691. data |= 1;
  692. else
  693. data &= ~1;
  694. if (def != data)
  695. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
  696. }
  697. static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
  698. bool enable)
  699. {
  700. uint32_t def, data;
  701. def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  702. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
  703. data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  704. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
  705. else
  706. data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  707. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
  708. if (def != data)
  709. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
  710. }
  711. static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
  712. bool enable)
  713. {
  714. uint32_t data;
  715. /* Put DF on broadcast mode */
  716. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
  717. data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
  718. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
  719. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
  720. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  721. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  722. data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
  723. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  724. } else {
  725. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  726. data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
  727. data |= DF_MGCG_DISABLE;
  728. WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
  729. }
  730. WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
  731. mmFabricConfigAccessControl_DEFAULT);
  732. }
  733. static int soc15_common_set_clockgating_state(void *handle,
  734. enum amd_clockgating_state state)
  735. {
  736. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  737. if (amdgpu_sriov_vf(adev))
  738. return 0;
  739. switch (adev->asic_type) {
  740. case CHIP_VEGA10:
  741. adev->nbio_funcs->update_medium_grain_clock_gating(adev,
  742. state == AMD_CG_STATE_GATE ? true : false);
  743. adev->nbio_funcs->update_medium_grain_light_sleep(adev,
  744. state == AMD_CG_STATE_GATE ? true : false);
  745. soc15_update_hdp_light_sleep(adev,
  746. state == AMD_CG_STATE_GATE ? true : false);
  747. soc15_update_drm_clock_gating(adev,
  748. state == AMD_CG_STATE_GATE ? true : false);
  749. soc15_update_drm_light_sleep(adev,
  750. state == AMD_CG_STATE_GATE ? true : false);
  751. soc15_update_rom_medium_grain_clock_gating(adev,
  752. state == AMD_CG_STATE_GATE ? true : false);
  753. soc15_update_df_medium_grain_clock_gating(adev,
  754. state == AMD_CG_STATE_GATE ? true : false);
  755. break;
  756. case CHIP_RAVEN:
  757. adev->nbio_funcs->update_medium_grain_clock_gating(adev,
  758. state == AMD_CG_STATE_GATE ? true : false);
  759. adev->nbio_funcs->update_medium_grain_light_sleep(adev,
  760. state == AMD_CG_STATE_GATE ? true : false);
  761. soc15_update_hdp_light_sleep(adev,
  762. state == AMD_CG_STATE_GATE ? true : false);
  763. soc15_update_drm_clock_gating(adev,
  764. state == AMD_CG_STATE_GATE ? true : false);
  765. soc15_update_drm_light_sleep(adev,
  766. state == AMD_CG_STATE_GATE ? true : false);
  767. soc15_update_rom_medium_grain_clock_gating(adev,
  768. state == AMD_CG_STATE_GATE ? true : false);
  769. break;
  770. default:
  771. break;
  772. }
  773. return 0;
  774. }
  775. static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
  776. {
  777. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  778. int data;
  779. if (amdgpu_sriov_vf(adev))
  780. *flags = 0;
  781. adev->nbio_funcs->get_clockgating_state(adev, flags);
  782. /* AMD_CG_SUPPORT_HDP_LS */
  783. data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  784. if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
  785. *flags |= AMD_CG_SUPPORT_HDP_LS;
  786. /* AMD_CG_SUPPORT_DRM_MGCG */
  787. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  788. if (!(data & 0x01000000))
  789. *flags |= AMD_CG_SUPPORT_DRM_MGCG;
  790. /* AMD_CG_SUPPORT_DRM_LS */
  791. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  792. if (data & 0x1)
  793. *flags |= AMD_CG_SUPPORT_DRM_LS;
  794. /* AMD_CG_SUPPORT_ROM_MGCG */
  795. data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  796. if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
  797. *flags |= AMD_CG_SUPPORT_ROM_MGCG;
  798. /* AMD_CG_SUPPORT_DF_MGCG */
  799. data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
  800. if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
  801. *flags |= AMD_CG_SUPPORT_DF_MGCG;
  802. }
  803. static int soc15_common_set_powergating_state(void *handle,
  804. enum amd_powergating_state state)
  805. {
  806. /* todo */
  807. return 0;
  808. }
  809. const struct amd_ip_funcs soc15_common_ip_funcs = {
  810. .name = "soc15_common",
  811. .early_init = soc15_common_early_init,
  812. .late_init = soc15_common_late_init,
  813. .sw_init = soc15_common_sw_init,
  814. .sw_fini = soc15_common_sw_fini,
  815. .hw_init = soc15_common_hw_init,
  816. .hw_fini = soc15_common_hw_fini,
  817. .suspend = soc15_common_suspend,
  818. .resume = soc15_common_resume,
  819. .is_idle = soc15_common_is_idle,
  820. .wait_for_idle = soc15_common_wait_for_idle,
  821. .soft_reset = soc15_common_soft_reset,
  822. .set_clockgating_state = soc15_common_set_clockgating_state,
  823. .set_powergating_state = soc15_common_set_powergating_state,
  824. .get_clockgating_state= soc15_common_get_clockgating_state,
  825. };