soc15.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <drm/drmP.h>
  27. #include "amdgpu.h"
  28. #include "amdgpu_atombios.h"
  29. #include "amdgpu_ih.h"
  30. #include "amdgpu_uvd.h"
  31. #include "amdgpu_vce.h"
  32. #include "amdgpu_ucode.h"
  33. #include "amdgpu_psp.h"
  34. #include "atom.h"
  35. #include "amd_pcie.h"
  36. #include "uvd/uvd_7_0_offset.h"
  37. #include "gc/gc_9_0_offset.h"
  38. #include "gc/gc_9_0_sh_mask.h"
  39. #include "sdma0/sdma0_4_0_offset.h"
  40. #include "sdma1/sdma1_4_0_offset.h"
  41. #include "hdp/hdp_4_0_offset.h"
  42. #include "hdp/hdp_4_0_sh_mask.h"
  43. #include "smuio/smuio_9_0_offset.h"
  44. #include "smuio/smuio_9_0_sh_mask.h"
  45. #include "soc15.h"
  46. #include "soc15_common.h"
  47. #include "gfx_v9_0.h"
  48. #include "gmc_v9_0.h"
  49. #include "gfxhub_v1_0.h"
  50. #include "mmhub_v1_0.h"
  51. #include "df_v1_7.h"
  52. #include "df_v3_6.h"
  53. #include "vega10_ih.h"
  54. #include "sdma_v4_0.h"
  55. #include "uvd_v7_0.h"
  56. #include "vce_v4_0.h"
  57. #include "vcn_v1_0.h"
  58. #include "dce_virtual.h"
  59. #include "mxgpu_ai.h"
  60. #define mmMP0_MISC_CGTT_CTRL0 0x01b9
  61. #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
  62. #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
  63. #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
  64. /* for Vega20 register name change */
  65. #define mmHDP_MEM_POWER_CTRL 0x00d4
  66. #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
  67. #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
  68. #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
  69. #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
  70. #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
  71. /*
  72. * Indirect registers accessor
  73. */
  74. static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  75. {
  76. unsigned long flags, address, data;
  77. u32 r;
  78. address = adev->nbio_funcs->get_pcie_index_offset(adev);
  79. data = adev->nbio_funcs->get_pcie_data_offset(adev);
  80. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  81. WREG32(address, reg);
  82. (void)RREG32(address);
  83. r = RREG32(data);
  84. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  85. return r;
  86. }
  87. static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  88. {
  89. unsigned long flags, address, data;
  90. address = adev->nbio_funcs->get_pcie_index_offset(adev);
  91. data = adev->nbio_funcs->get_pcie_data_offset(adev);
  92. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  93. WREG32(address, reg);
  94. (void)RREG32(address);
  95. WREG32(data, v);
  96. (void)RREG32(data);
  97. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  98. }
  99. static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
  100. {
  101. unsigned long flags, address, data;
  102. u32 r;
  103. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  104. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  105. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  106. WREG32(address, ((reg) & 0x1ff));
  107. r = RREG32(data);
  108. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  109. return r;
  110. }
  111. static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  112. {
  113. unsigned long flags, address, data;
  114. address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
  115. data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
  116. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  117. WREG32(address, ((reg) & 0x1ff));
  118. WREG32(data, (v));
  119. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  120. }
  121. static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
  122. {
  123. unsigned long flags, address, data;
  124. u32 r;
  125. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  126. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  127. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  128. WREG32(address, (reg));
  129. r = RREG32(data);
  130. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  131. return r;
  132. }
  133. static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  134. {
  135. unsigned long flags, address, data;
  136. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  137. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  138. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  139. WREG32(address, (reg));
  140. WREG32(data, (v));
  141. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  142. }
  143. static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
  144. {
  145. unsigned long flags;
  146. u32 r;
  147. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  148. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  149. r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
  150. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  151. return r;
  152. }
  153. static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  154. {
  155. unsigned long flags;
  156. spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
  157. WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
  158. WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
  159. spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
  160. }
  161. static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
  162. {
  163. unsigned long flags;
  164. u32 r;
  165. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  166. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  167. r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
  168. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  169. return r;
  170. }
  171. static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  172. {
  173. unsigned long flags;
  174. spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
  175. WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
  176. WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
  177. spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
  178. }
  179. static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
  180. {
  181. return adev->nbio_funcs->get_memsize(adev);
  182. }
  183. static u32 soc15_get_xclk(struct amdgpu_device *adev)
  184. {
  185. return adev->clock.spll.reference_freq;
  186. }
  187. void soc15_grbm_select(struct amdgpu_device *adev,
  188. u32 me, u32 pipe, u32 queue, u32 vmid)
  189. {
  190. u32 grbm_gfx_cntl = 0;
  191. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
  192. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
  193. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
  194. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
  195. WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
  196. }
  197. static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
  198. {
  199. /* todo */
  200. }
  201. static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
  202. {
  203. /* todo */
  204. return false;
  205. }
  206. static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
  207. u8 *bios, u32 length_bytes)
  208. {
  209. u32 *dw_ptr;
  210. u32 i, length_dw;
  211. if (bios == NULL)
  212. return false;
  213. if (length_bytes == 0)
  214. return false;
  215. /* APU vbios image is part of sbios image */
  216. if (adev->flags & AMD_IS_APU)
  217. return false;
  218. dw_ptr = (u32 *)bios;
  219. length_dw = ALIGN(length_bytes, 4) / 4;
  220. /* set rom index to 0 */
  221. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
  222. /* read out the rom data */
  223. for (i = 0; i < length_dw; i++)
  224. dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
  225. return true;
  226. }
  227. struct soc15_allowed_register_entry {
  228. uint32_t hwip;
  229. uint32_t inst;
  230. uint32_t seg;
  231. uint32_t reg_offset;
  232. bool grbm_indexed;
  233. };
  234. static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
  235. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
  236. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
  237. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
  238. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
  239. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
  240. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
  241. { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
  242. { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
  243. { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
  244. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
  245. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
  246. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
  247. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
  248. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
  249. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
  250. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
  251. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
  252. { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
  253. { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
  254. };
  255. static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
  256. u32 sh_num, u32 reg_offset)
  257. {
  258. uint32_t val;
  259. mutex_lock(&adev->grbm_idx_mutex);
  260. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  261. amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
  262. val = RREG32(reg_offset);
  263. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  264. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  265. mutex_unlock(&adev->grbm_idx_mutex);
  266. return val;
  267. }
  268. static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
  269. bool indexed, u32 se_num,
  270. u32 sh_num, u32 reg_offset)
  271. {
  272. if (indexed) {
  273. return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
  274. } else {
  275. if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
  276. return adev->gfx.config.gb_addr_config;
  277. else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
  278. return adev->gfx.config.db_debug2;
  279. return RREG32(reg_offset);
  280. }
  281. }
  282. static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
  283. u32 sh_num, u32 reg_offset, u32 *value)
  284. {
  285. uint32_t i;
  286. struct soc15_allowed_register_entry *en;
  287. *value = 0;
  288. for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
  289. en = &soc15_allowed_read_registers[i];
  290. if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
  291. + en->reg_offset))
  292. continue;
  293. *value = soc15_get_register_value(adev,
  294. soc15_allowed_read_registers[i].grbm_indexed,
  295. se_num, sh_num, reg_offset);
  296. return 0;
  297. }
  298. return -EINVAL;
  299. }
  300. /**
  301. * soc15_program_register_sequence - program an array of registers.
  302. *
  303. * @adev: amdgpu_device pointer
  304. * @regs: pointer to the register array
  305. * @array_size: size of the register array
  306. *
  307. * Programs an array or registers with and and or masks.
  308. * This is a helper for setting golden registers.
  309. */
  310. void soc15_program_register_sequence(struct amdgpu_device *adev,
  311. const struct soc15_reg_golden *regs,
  312. const u32 array_size)
  313. {
  314. const struct soc15_reg_golden *entry;
  315. u32 tmp, reg;
  316. int i;
  317. for (i = 0; i < array_size; ++i) {
  318. entry = &regs[i];
  319. reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
  320. if (entry->and_mask == 0xffffffff) {
  321. tmp = entry->or_mask;
  322. } else {
  323. tmp = RREG32(reg);
  324. tmp &= ~(entry->and_mask);
  325. tmp |= entry->or_mask;
  326. }
  327. WREG32(reg, tmp);
  328. }
  329. }
  330. static int soc15_asic_reset(struct amdgpu_device *adev)
  331. {
  332. u32 i;
  333. amdgpu_atombios_scratch_regs_engine_hung(adev, true);
  334. dev_info(adev->dev, "GPU reset\n");
  335. /* disable BM */
  336. pci_clear_master(adev->pdev);
  337. pci_save_state(adev->pdev);
  338. psp_gpu_reset(adev);
  339. pci_restore_state(adev->pdev);
  340. /* wait for asic to come out of reset */
  341. for (i = 0; i < adev->usec_timeout; i++) {
  342. u32 memsize = adev->nbio_funcs->get_memsize(adev);
  343. if (memsize != 0xffffffff)
  344. break;
  345. udelay(1);
  346. }
  347. amdgpu_atombios_scratch_regs_engine_hung(adev, false);
  348. return 0;
  349. }
  350. /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
  351. u32 cntl_reg, u32 status_reg)
  352. {
  353. return 0;
  354. }*/
  355. static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  356. {
  357. /*int r;
  358. r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
  359. if (r)
  360. return r;
  361. r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
  362. */
  363. return 0;
  364. }
  365. static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  366. {
  367. /* todo */
  368. return 0;
  369. }
  370. static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
  371. {
  372. if (pci_is_root_bus(adev->pdev->bus))
  373. return;
  374. if (amdgpu_pcie_gen2 == 0)
  375. return;
  376. if (adev->flags & AMD_IS_APU)
  377. return;
  378. if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  379. CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
  380. return;
  381. /* todo */
  382. }
  383. static void soc15_program_aspm(struct amdgpu_device *adev)
  384. {
  385. if (amdgpu_aspm == 0)
  386. return;
  387. /* todo */
  388. }
  389. static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
  390. bool enable)
  391. {
  392. adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
  393. adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
  394. }
  395. static const struct amdgpu_ip_block_version vega10_common_ip_block =
  396. {
  397. .type = AMD_IP_BLOCK_TYPE_COMMON,
  398. .major = 2,
  399. .minor = 0,
  400. .rev = 0,
  401. .funcs = &soc15_common_ip_funcs,
  402. };
  403. static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
  404. {
  405. return adev->nbio_funcs->get_rev_id(adev);
  406. }
  407. int soc15_set_ip_blocks(struct amdgpu_device *adev)
  408. {
  409. /* Set IP register base before any HW register access */
  410. switch (adev->asic_type) {
  411. case CHIP_VEGA10:
  412. case CHIP_VEGA12:
  413. case CHIP_RAVEN:
  414. vega10_reg_base_init(adev);
  415. break;
  416. case CHIP_VEGA20:
  417. vega20_reg_base_init(adev);
  418. break;
  419. default:
  420. return -EINVAL;
  421. }
  422. if (adev->flags & AMD_IS_APU)
  423. adev->nbio_funcs = &nbio_v7_0_funcs;
  424. else if (adev->asic_type == CHIP_VEGA20)
  425. adev->nbio_funcs = &nbio_v7_4_funcs;
  426. else
  427. adev->nbio_funcs = &nbio_v6_1_funcs;
  428. if (adev->asic_type == CHIP_VEGA20)
  429. adev->df_funcs = &df_v3_6_funcs;
  430. else
  431. adev->df_funcs = &df_v1_7_funcs;
  432. adev->rev_id = soc15_get_rev_id(adev);
  433. adev->nbio_funcs->detect_hw_virt(adev);
  434. if (amdgpu_sriov_vf(adev))
  435. adev->virt.ops = &xgpu_ai_virt_ops;
  436. switch (adev->asic_type) {
  437. case CHIP_VEGA10:
  438. case CHIP_VEGA12:
  439. case CHIP_VEGA20:
  440. amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
  441. amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
  442. amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
  443. if (adev->asic_type == CHIP_VEGA20)
  444. amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
  445. else
  446. amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
  447. amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
  448. amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
  449. if (!amdgpu_sriov_vf(adev))
  450. amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
  451. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  452. amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
  453. #if defined(CONFIG_DRM_AMD_DC)
  454. else if (amdgpu_device_has_dc_support(adev))
  455. amdgpu_device_ip_block_add(adev, &dm_ip_block);
  456. #else
  457. # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
  458. #endif
  459. if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
  460. amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
  461. amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
  462. }
  463. break;
  464. case CHIP_RAVEN:
  465. amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
  466. amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
  467. amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
  468. amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
  469. amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
  470. amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
  471. amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
  472. if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
  473. amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
  474. #if defined(CONFIG_DRM_AMD_DC)
  475. else if (amdgpu_device_has_dc_support(adev))
  476. amdgpu_device_ip_block_add(adev, &dm_ip_block);
  477. #else
  478. # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
  479. #endif
  480. amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
  481. break;
  482. default:
  483. return -EINVAL;
  484. }
  485. return 0;
  486. }
  487. static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
  488. {
  489. adev->nbio_funcs->hdp_flush(adev, ring);
  490. }
  491. static void soc15_invalidate_hdp(struct amdgpu_device *adev,
  492. struct amdgpu_ring *ring)
  493. {
  494. if (!ring || !ring->funcs->emit_wreg)
  495. WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
  496. else
  497. amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
  498. HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
  499. }
  500. static bool soc15_need_full_reset(struct amdgpu_device *adev)
  501. {
  502. /* change this when we implement soft reset */
  503. return true;
  504. }
  505. static const struct amdgpu_asic_funcs soc15_asic_funcs =
  506. {
  507. .read_disabled_bios = &soc15_read_disabled_bios,
  508. .read_bios_from_rom = &soc15_read_bios_from_rom,
  509. .read_register = &soc15_read_register,
  510. .reset = &soc15_asic_reset,
  511. .set_vga_state = &soc15_vga_set_state,
  512. .get_xclk = &soc15_get_xclk,
  513. .set_uvd_clocks = &soc15_set_uvd_clocks,
  514. .set_vce_clocks = &soc15_set_vce_clocks,
  515. .get_config_memsize = &soc15_get_config_memsize,
  516. .flush_hdp = &soc15_flush_hdp,
  517. .invalidate_hdp = &soc15_invalidate_hdp,
  518. .need_full_reset = &soc15_need_full_reset,
  519. };
  520. static int soc15_common_early_init(void *handle)
  521. {
  522. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  523. adev->smc_rreg = NULL;
  524. adev->smc_wreg = NULL;
  525. adev->pcie_rreg = &soc15_pcie_rreg;
  526. adev->pcie_wreg = &soc15_pcie_wreg;
  527. adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
  528. adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
  529. adev->didt_rreg = &soc15_didt_rreg;
  530. adev->didt_wreg = &soc15_didt_wreg;
  531. adev->gc_cac_rreg = &soc15_gc_cac_rreg;
  532. adev->gc_cac_wreg = &soc15_gc_cac_wreg;
  533. adev->se_cac_rreg = &soc15_se_cac_rreg;
  534. adev->se_cac_wreg = &soc15_se_cac_wreg;
  535. adev->asic_funcs = &soc15_asic_funcs;
  536. adev->external_rev_id = 0xFF;
  537. switch (adev->asic_type) {
  538. case CHIP_VEGA10:
  539. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  540. AMD_CG_SUPPORT_GFX_MGLS |
  541. AMD_CG_SUPPORT_GFX_RLC_LS |
  542. AMD_CG_SUPPORT_GFX_CP_LS |
  543. AMD_CG_SUPPORT_GFX_3D_CGCG |
  544. AMD_CG_SUPPORT_GFX_3D_CGLS |
  545. AMD_CG_SUPPORT_GFX_CGCG |
  546. AMD_CG_SUPPORT_GFX_CGLS |
  547. AMD_CG_SUPPORT_BIF_MGCG |
  548. AMD_CG_SUPPORT_BIF_LS |
  549. AMD_CG_SUPPORT_HDP_LS |
  550. AMD_CG_SUPPORT_DRM_MGCG |
  551. AMD_CG_SUPPORT_DRM_LS |
  552. AMD_CG_SUPPORT_ROM_MGCG |
  553. AMD_CG_SUPPORT_DF_MGCG |
  554. AMD_CG_SUPPORT_SDMA_MGCG |
  555. AMD_CG_SUPPORT_SDMA_LS |
  556. AMD_CG_SUPPORT_MC_MGCG |
  557. AMD_CG_SUPPORT_MC_LS;
  558. adev->pg_flags = 0;
  559. adev->external_rev_id = 0x1;
  560. break;
  561. case CHIP_VEGA12:
  562. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  563. AMD_CG_SUPPORT_GFX_MGLS |
  564. AMD_CG_SUPPORT_GFX_CGCG |
  565. AMD_CG_SUPPORT_GFX_CGLS |
  566. AMD_CG_SUPPORT_GFX_3D_CGCG |
  567. AMD_CG_SUPPORT_GFX_3D_CGLS |
  568. AMD_CG_SUPPORT_GFX_CP_LS |
  569. AMD_CG_SUPPORT_MC_LS |
  570. AMD_CG_SUPPORT_MC_MGCG |
  571. AMD_CG_SUPPORT_SDMA_MGCG |
  572. AMD_CG_SUPPORT_SDMA_LS |
  573. AMD_CG_SUPPORT_BIF_MGCG |
  574. AMD_CG_SUPPORT_BIF_LS |
  575. AMD_CG_SUPPORT_HDP_MGCG |
  576. AMD_CG_SUPPORT_HDP_LS |
  577. AMD_CG_SUPPORT_ROM_MGCG |
  578. AMD_CG_SUPPORT_VCE_MGCG |
  579. AMD_CG_SUPPORT_UVD_MGCG;
  580. adev->pg_flags = 0;
  581. adev->external_rev_id = adev->rev_id + 0x14;
  582. break;
  583. case CHIP_VEGA20:
  584. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  585. AMD_CG_SUPPORT_GFX_MGLS |
  586. AMD_CG_SUPPORT_GFX_CGCG |
  587. AMD_CG_SUPPORT_GFX_CGLS |
  588. AMD_CG_SUPPORT_GFX_3D_CGCG |
  589. AMD_CG_SUPPORT_GFX_3D_CGLS |
  590. AMD_CG_SUPPORT_GFX_CP_LS |
  591. AMD_CG_SUPPORT_MC_LS |
  592. AMD_CG_SUPPORT_MC_MGCG |
  593. AMD_CG_SUPPORT_SDMA_MGCG |
  594. AMD_CG_SUPPORT_SDMA_LS |
  595. AMD_CG_SUPPORT_BIF_MGCG |
  596. AMD_CG_SUPPORT_BIF_LS |
  597. AMD_CG_SUPPORT_HDP_MGCG |
  598. AMD_CG_SUPPORT_HDP_LS |
  599. AMD_CG_SUPPORT_ROM_MGCG |
  600. AMD_CG_SUPPORT_VCE_MGCG |
  601. AMD_CG_SUPPORT_UVD_MGCG;
  602. adev->pg_flags = 0;
  603. adev->external_rev_id = adev->rev_id + 0x28;
  604. break;
  605. case CHIP_RAVEN:
  606. if (adev->rev_id >= 0x8)
  607. adev->external_rev_id = adev->rev_id + 0x81;
  608. else if (adev->pdev->device == 0x15d8)
  609. adev->external_rev_id = adev->rev_id + 0x41;
  610. else
  611. adev->external_rev_id = 0x1;
  612. if (adev->rev_id >= 0x8) {
  613. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  614. AMD_CG_SUPPORT_GFX_MGLS |
  615. AMD_CG_SUPPORT_GFX_CP_LS |
  616. AMD_CG_SUPPORT_GFX_3D_CGCG |
  617. AMD_CG_SUPPORT_GFX_3D_CGLS |
  618. AMD_CG_SUPPORT_GFX_CGCG |
  619. AMD_CG_SUPPORT_GFX_CGLS |
  620. AMD_CG_SUPPORT_BIF_LS |
  621. AMD_CG_SUPPORT_HDP_LS |
  622. AMD_CG_SUPPORT_ROM_MGCG |
  623. AMD_CG_SUPPORT_MC_MGCG |
  624. AMD_CG_SUPPORT_MC_LS |
  625. AMD_CG_SUPPORT_SDMA_MGCG |
  626. AMD_CG_SUPPORT_SDMA_LS |
  627. AMD_CG_SUPPORT_VCN_MGCG;
  628. adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
  629. } else if (adev->pdev->device == 0x15d8) {
  630. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
  631. AMD_CG_SUPPORT_GFX_CP_LS |
  632. AMD_CG_SUPPORT_GFX_3D_CGCG |
  633. AMD_CG_SUPPORT_GFX_3D_CGLS |
  634. AMD_CG_SUPPORT_GFX_CGCG |
  635. AMD_CG_SUPPORT_GFX_CGLS |
  636. AMD_CG_SUPPORT_BIF_LS |
  637. AMD_CG_SUPPORT_HDP_LS |
  638. AMD_CG_SUPPORT_ROM_MGCG |
  639. AMD_CG_SUPPORT_MC_MGCG |
  640. AMD_CG_SUPPORT_MC_LS |
  641. AMD_CG_SUPPORT_SDMA_MGCG |
  642. AMD_CG_SUPPORT_SDMA_LS;
  643. adev->pg_flags = AMD_PG_SUPPORT_SDMA |
  644. AMD_PG_SUPPORT_MMHUB |
  645. AMD_PG_SUPPORT_VCN |
  646. AMD_PG_SUPPORT_VCN_DPG;
  647. } else {
  648. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  649. AMD_CG_SUPPORT_GFX_MGLS |
  650. AMD_CG_SUPPORT_GFX_RLC_LS |
  651. AMD_CG_SUPPORT_GFX_CP_LS |
  652. AMD_CG_SUPPORT_GFX_3D_CGCG |
  653. AMD_CG_SUPPORT_GFX_3D_CGLS |
  654. AMD_CG_SUPPORT_GFX_CGCG |
  655. AMD_CG_SUPPORT_GFX_CGLS |
  656. AMD_CG_SUPPORT_BIF_MGCG |
  657. AMD_CG_SUPPORT_BIF_LS |
  658. AMD_CG_SUPPORT_HDP_MGCG |
  659. AMD_CG_SUPPORT_HDP_LS |
  660. AMD_CG_SUPPORT_DRM_MGCG |
  661. AMD_CG_SUPPORT_DRM_LS |
  662. AMD_CG_SUPPORT_ROM_MGCG |
  663. AMD_CG_SUPPORT_MC_MGCG |
  664. AMD_CG_SUPPORT_MC_LS |
  665. AMD_CG_SUPPORT_SDMA_MGCG |
  666. AMD_CG_SUPPORT_SDMA_LS |
  667. AMD_CG_SUPPORT_VCN_MGCG;
  668. adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
  669. }
  670. if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
  671. adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
  672. AMD_PG_SUPPORT_CP |
  673. AMD_PG_SUPPORT_RLC_SMU_HS;
  674. break;
  675. default:
  676. /* FIXME: not supported yet */
  677. return -EINVAL;
  678. }
  679. if (amdgpu_sriov_vf(adev)) {
  680. amdgpu_virt_init_setting(adev);
  681. xgpu_ai_mailbox_set_irq_funcs(adev);
  682. }
  683. return 0;
  684. }
  685. static int soc15_common_late_init(void *handle)
  686. {
  687. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  688. if (amdgpu_sriov_vf(adev))
  689. xgpu_ai_mailbox_get_irq(adev);
  690. return 0;
  691. }
  692. static int soc15_common_sw_init(void *handle)
  693. {
  694. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  695. if (amdgpu_sriov_vf(adev))
  696. xgpu_ai_mailbox_add_irq_id(adev);
  697. return 0;
  698. }
  699. static int soc15_common_sw_fini(void *handle)
  700. {
  701. return 0;
  702. }
  703. static int soc15_common_hw_init(void *handle)
  704. {
  705. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  706. /* enable pcie gen2/3 link */
  707. soc15_pcie_gen3_enable(adev);
  708. /* enable aspm */
  709. soc15_program_aspm(adev);
  710. /* setup nbio registers */
  711. adev->nbio_funcs->init_registers(adev);
  712. /* enable the doorbell aperture */
  713. soc15_enable_doorbell_aperture(adev, true);
  714. return 0;
  715. }
  716. static int soc15_common_hw_fini(void *handle)
  717. {
  718. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  719. /* disable the doorbell aperture */
  720. soc15_enable_doorbell_aperture(adev, false);
  721. if (amdgpu_sriov_vf(adev))
  722. xgpu_ai_mailbox_put_irq(adev);
  723. return 0;
  724. }
  725. static int soc15_common_suspend(void *handle)
  726. {
  727. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  728. return soc15_common_hw_fini(adev);
  729. }
  730. static int soc15_common_resume(void *handle)
  731. {
  732. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  733. return soc15_common_hw_init(adev);
  734. }
  735. static bool soc15_common_is_idle(void *handle)
  736. {
  737. return true;
  738. }
  739. static int soc15_common_wait_for_idle(void *handle)
  740. {
  741. return 0;
  742. }
  743. static int soc15_common_soft_reset(void *handle)
  744. {
  745. return 0;
  746. }
  747. static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
  748. {
  749. uint32_t def, data;
  750. if (adev->asic_type == CHIP_VEGA20) {
  751. def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
  752. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  753. data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
  754. HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
  755. HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
  756. HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
  757. else
  758. data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
  759. HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
  760. HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
  761. HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
  762. if (def != data)
  763. WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
  764. } else {
  765. def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  766. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  767. data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  768. else
  769. data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
  770. if (def != data)
  771. WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
  772. }
  773. }
  774. static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
  775. {
  776. uint32_t def, data;
  777. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  778. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
  779. data &= ~(0x01000000 |
  780. 0x02000000 |
  781. 0x04000000 |
  782. 0x08000000 |
  783. 0x10000000 |
  784. 0x20000000 |
  785. 0x40000000 |
  786. 0x80000000);
  787. else
  788. data |= (0x01000000 |
  789. 0x02000000 |
  790. 0x04000000 |
  791. 0x08000000 |
  792. 0x10000000 |
  793. 0x20000000 |
  794. 0x40000000 |
  795. 0x80000000);
  796. if (def != data)
  797. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
  798. }
  799. static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
  800. {
  801. uint32_t def, data;
  802. def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  803. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
  804. data |= 1;
  805. else
  806. data &= ~1;
  807. if (def != data)
  808. WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
  809. }
  810. static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
  811. bool enable)
  812. {
  813. uint32_t def, data;
  814. def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  815. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
  816. data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  817. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
  818. else
  819. data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
  820. CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
  821. if (def != data)
  822. WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
  823. }
  824. static int soc15_common_set_clockgating_state(void *handle,
  825. enum amd_clockgating_state state)
  826. {
  827. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  828. if (amdgpu_sriov_vf(adev))
  829. return 0;
  830. switch (adev->asic_type) {
  831. case CHIP_VEGA10:
  832. case CHIP_VEGA12:
  833. case CHIP_VEGA20:
  834. adev->nbio_funcs->update_medium_grain_clock_gating(adev,
  835. state == AMD_CG_STATE_GATE ? true : false);
  836. adev->nbio_funcs->update_medium_grain_light_sleep(adev,
  837. state == AMD_CG_STATE_GATE ? true : false);
  838. soc15_update_hdp_light_sleep(adev,
  839. state == AMD_CG_STATE_GATE ? true : false);
  840. soc15_update_drm_clock_gating(adev,
  841. state == AMD_CG_STATE_GATE ? true : false);
  842. soc15_update_drm_light_sleep(adev,
  843. state == AMD_CG_STATE_GATE ? true : false);
  844. soc15_update_rom_medium_grain_clock_gating(adev,
  845. state == AMD_CG_STATE_GATE ? true : false);
  846. adev->df_funcs->update_medium_grain_clock_gating(adev,
  847. state == AMD_CG_STATE_GATE ? true : false);
  848. break;
  849. case CHIP_RAVEN:
  850. adev->nbio_funcs->update_medium_grain_clock_gating(adev,
  851. state == AMD_CG_STATE_GATE ? true : false);
  852. adev->nbio_funcs->update_medium_grain_light_sleep(adev,
  853. state == AMD_CG_STATE_GATE ? true : false);
  854. soc15_update_hdp_light_sleep(adev,
  855. state == AMD_CG_STATE_GATE ? true : false);
  856. soc15_update_drm_clock_gating(adev,
  857. state == AMD_CG_STATE_GATE ? true : false);
  858. soc15_update_drm_light_sleep(adev,
  859. state == AMD_CG_STATE_GATE ? true : false);
  860. soc15_update_rom_medium_grain_clock_gating(adev,
  861. state == AMD_CG_STATE_GATE ? true : false);
  862. break;
  863. default:
  864. break;
  865. }
  866. return 0;
  867. }
  868. static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
  869. {
  870. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  871. int data;
  872. if (amdgpu_sriov_vf(adev))
  873. *flags = 0;
  874. adev->nbio_funcs->get_clockgating_state(adev, flags);
  875. /* AMD_CG_SUPPORT_HDP_LS */
  876. data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
  877. if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
  878. *flags |= AMD_CG_SUPPORT_HDP_LS;
  879. /* AMD_CG_SUPPORT_DRM_MGCG */
  880. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
  881. if (!(data & 0x01000000))
  882. *flags |= AMD_CG_SUPPORT_DRM_MGCG;
  883. /* AMD_CG_SUPPORT_DRM_LS */
  884. data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
  885. if (data & 0x1)
  886. *flags |= AMD_CG_SUPPORT_DRM_LS;
  887. /* AMD_CG_SUPPORT_ROM_MGCG */
  888. data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
  889. if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
  890. *flags |= AMD_CG_SUPPORT_ROM_MGCG;
  891. adev->df_funcs->get_clockgating_state(adev, flags);
  892. }
  893. static int soc15_common_set_powergating_state(void *handle,
  894. enum amd_powergating_state state)
  895. {
  896. /* todo */
  897. return 0;
  898. }
  899. const struct amd_ip_funcs soc15_common_ip_funcs = {
  900. .name = "soc15_common",
  901. .early_init = soc15_common_early_init,
  902. .late_init = soc15_common_late_init,
  903. .sw_init = soc15_common_sw_init,
  904. .sw_fini = soc15_common_sw_fini,
  905. .hw_init = soc15_common_hw_init,
  906. .hw_fini = soc15_common_hw_fini,
  907. .suspend = soc15_common_suspend,
  908. .resume = soc15_common_resume,
  909. .is_idle = soc15_common_is_idle,
  910. .wait_for_idle = soc15_common_wait_for_idle,
  911. .soft_reset = soc15_common_soft_reset,
  912. .set_clockgating_state = soc15_common_set_clockgating_state,
  913. .set_powergating_state = soc15_common_set_powergating_state,
  914. .get_clockgating_state= soc15_common_get_clockgating_state,
  915. };