cik.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include "drmP.h"
  28. #include "amdgpu.h"
  29. #include "amdgpu_atombios.h"
  30. #include "amdgpu_ih.h"
  31. #include "amdgpu_uvd.h"
  32. #include "amdgpu_vce.h"
  33. #include "cikd.h"
  34. #include "atom.h"
  35. #include "amd_pcie.h"
  36. #include "cik.h"
  37. #include "gmc_v7_0.h"
  38. #include "cik_ih.h"
  39. #include "dce_v8_0.h"
  40. #include "gfx_v7_0.h"
  41. #include "cik_sdma.h"
  42. #include "uvd_v4_2.h"
  43. #include "vce_v2_0.h"
  44. #include "cik_dpm.h"
  45. #include "uvd/uvd_4_2_d.h"
  46. #include "smu/smu_7_0_1_d.h"
  47. #include "smu/smu_7_0_1_sh_mask.h"
  48. #include "dce/dce_8_0_d.h"
  49. #include "dce/dce_8_0_sh_mask.h"
  50. #include "bif/bif_4_1_d.h"
  51. #include "bif/bif_4_1_sh_mask.h"
  52. #include "gca/gfx_7_2_d.h"
  53. #include "gca/gfx_7_2_enum.h"
  54. #include "gca/gfx_7_2_sh_mask.h"
  55. #include "gmc/gmc_7_1_d.h"
  56. #include "gmc/gmc_7_1_sh_mask.h"
  57. #include "oss/oss_2_0_d.h"
  58. #include "oss/oss_2_0_sh_mask.h"
  59. #include "amdgpu_amdkfd.h"
  60. #include "amdgpu_powerplay.h"
  61. /*
  62. * Indirect registers accessor
  63. */
  64. static u32 cik_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  65. {
  66. unsigned long flags;
  67. u32 r;
  68. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  69. WREG32(mmPCIE_INDEX, reg);
  70. (void)RREG32(mmPCIE_INDEX);
  71. r = RREG32(mmPCIE_DATA);
  72. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  73. return r;
  74. }
  75. static void cik_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  76. {
  77. unsigned long flags;
  78. spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  79. WREG32(mmPCIE_INDEX, reg);
  80. (void)RREG32(mmPCIE_INDEX);
  81. WREG32(mmPCIE_DATA, v);
  82. (void)RREG32(mmPCIE_DATA);
  83. spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  84. }
  85. static u32 cik_smc_rreg(struct amdgpu_device *adev, u32 reg)
  86. {
  87. unsigned long flags;
  88. u32 r;
  89. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  90. WREG32(mmSMC_IND_INDEX_0, (reg));
  91. r = RREG32(mmSMC_IND_DATA_0);
  92. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  93. return r;
  94. }
  95. static void cik_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  96. {
  97. unsigned long flags;
  98. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  99. WREG32(mmSMC_IND_INDEX_0, (reg));
  100. WREG32(mmSMC_IND_DATA_0, (v));
  101. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  102. }
  103. static u32 cik_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
  104. {
  105. unsigned long flags;
  106. u32 r;
  107. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  108. WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
  109. r = RREG32(mmUVD_CTX_DATA);
  110. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  111. return r;
  112. }
  113. static void cik_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  114. {
  115. unsigned long flags;
  116. spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
  117. WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
  118. WREG32(mmUVD_CTX_DATA, (v));
  119. spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
  120. }
  121. static u32 cik_didt_rreg(struct amdgpu_device *adev, u32 reg)
  122. {
  123. unsigned long flags;
  124. u32 r;
  125. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  126. WREG32(mmDIDT_IND_INDEX, (reg));
  127. r = RREG32(mmDIDT_IND_DATA);
  128. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  129. return r;
  130. }
  131. static void cik_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  132. {
  133. unsigned long flags;
  134. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  135. WREG32(mmDIDT_IND_INDEX, (reg));
  136. WREG32(mmDIDT_IND_DATA, (v));
  137. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  138. }
  139. static const u32 bonaire_golden_spm_registers[] =
  140. {
  141. 0xc200, 0xe0ffffff, 0xe0000000
  142. };
  143. static const u32 bonaire_golden_common_registers[] =
  144. {
  145. 0x31dc, 0xffffffff, 0x00000800,
  146. 0x31dd, 0xffffffff, 0x00000800,
  147. 0x31e6, 0xffffffff, 0x00007fbf,
  148. 0x31e7, 0xffffffff, 0x00007faf
  149. };
  150. static const u32 bonaire_golden_registers[] =
  151. {
  152. 0xcd5, 0x00000333, 0x00000333,
  153. 0xcd4, 0x000c0fc0, 0x00040200,
  154. 0x2684, 0x00010000, 0x00058208,
  155. 0xf000, 0xffff1fff, 0x00140000,
  156. 0xf080, 0xfdfc0fff, 0x00000100,
  157. 0xf08d, 0x40000000, 0x40000200,
  158. 0x260c, 0xffffffff, 0x00000000,
  159. 0x260d, 0xf00fffff, 0x00000400,
  160. 0x260e, 0x0002021c, 0x00020200,
  161. 0x31e, 0x00000080, 0x00000000,
  162. 0x16ec, 0x000000f0, 0x00000070,
  163. 0x16f0, 0xf0311fff, 0x80300000,
  164. 0x263e, 0x73773777, 0x12010001,
  165. 0xd43, 0x00810000, 0x408af000,
  166. 0x1c0c, 0x31000111, 0x00000011,
  167. 0xbd2, 0x73773777, 0x12010001,
  168. 0x883, 0x00007fb6, 0x0021a1b1,
  169. 0x884, 0x00007fb6, 0x002021b1,
  170. 0x860, 0x00007fb6, 0x00002191,
  171. 0x886, 0x00007fb6, 0x002121b1,
  172. 0x887, 0x00007fb6, 0x002021b1,
  173. 0x877, 0x00007fb6, 0x00002191,
  174. 0x878, 0x00007fb6, 0x00002191,
  175. 0xd8a, 0x0000003f, 0x0000000a,
  176. 0xd8b, 0x0000003f, 0x0000000a,
  177. 0xab9, 0x00073ffe, 0x000022a2,
  178. 0x903, 0x000007ff, 0x00000000,
  179. 0x2285, 0xf000003f, 0x00000007,
  180. 0x22fc, 0x00002001, 0x00000001,
  181. 0x22c9, 0xffffffff, 0x00ffffff,
  182. 0xc281, 0x0000ff0f, 0x00000000,
  183. 0xa293, 0x07ffffff, 0x06000000,
  184. 0x136, 0x00000fff, 0x00000100,
  185. 0xf9e, 0x00000001, 0x00000002,
  186. 0x2440, 0x03000000, 0x0362c688,
  187. 0x2300, 0x000000ff, 0x00000001,
  188. 0x390, 0x00001fff, 0x00001fff,
  189. 0x2418, 0x0000007f, 0x00000020,
  190. 0x2542, 0x00010000, 0x00010000,
  191. 0x2b05, 0x000003ff, 0x000000f3,
  192. 0x2b03, 0xffffffff, 0x00001032
  193. };
  194. static const u32 bonaire_mgcg_cgcg_init[] =
  195. {
  196. 0x3108, 0xffffffff, 0xfffffffc,
  197. 0xc200, 0xffffffff, 0xe0000000,
  198. 0xf0a8, 0xffffffff, 0x00000100,
  199. 0xf082, 0xffffffff, 0x00000100,
  200. 0xf0b0, 0xffffffff, 0xc0000100,
  201. 0xf0b2, 0xffffffff, 0xc0000100,
  202. 0xf0b1, 0xffffffff, 0xc0000100,
  203. 0x1579, 0xffffffff, 0x00600100,
  204. 0xf0a0, 0xffffffff, 0x00000100,
  205. 0xf085, 0xffffffff, 0x06000100,
  206. 0xf088, 0xffffffff, 0x00000100,
  207. 0xf086, 0xffffffff, 0x06000100,
  208. 0xf081, 0xffffffff, 0x00000100,
  209. 0xf0b8, 0xffffffff, 0x00000100,
  210. 0xf089, 0xffffffff, 0x00000100,
  211. 0xf080, 0xffffffff, 0x00000100,
  212. 0xf08c, 0xffffffff, 0x00000100,
  213. 0xf08d, 0xffffffff, 0x00000100,
  214. 0xf094, 0xffffffff, 0x00000100,
  215. 0xf095, 0xffffffff, 0x00000100,
  216. 0xf096, 0xffffffff, 0x00000100,
  217. 0xf097, 0xffffffff, 0x00000100,
  218. 0xf098, 0xffffffff, 0x00000100,
  219. 0xf09f, 0xffffffff, 0x00000100,
  220. 0xf09e, 0xffffffff, 0x00000100,
  221. 0xf084, 0xffffffff, 0x06000100,
  222. 0xf0a4, 0xffffffff, 0x00000100,
  223. 0xf09d, 0xffffffff, 0x00000100,
  224. 0xf0ad, 0xffffffff, 0x00000100,
  225. 0xf0ac, 0xffffffff, 0x00000100,
  226. 0xf09c, 0xffffffff, 0x00000100,
  227. 0xc200, 0xffffffff, 0xe0000000,
  228. 0xf008, 0xffffffff, 0x00010000,
  229. 0xf009, 0xffffffff, 0x00030002,
  230. 0xf00a, 0xffffffff, 0x00040007,
  231. 0xf00b, 0xffffffff, 0x00060005,
  232. 0xf00c, 0xffffffff, 0x00090008,
  233. 0xf00d, 0xffffffff, 0x00010000,
  234. 0xf00e, 0xffffffff, 0x00030002,
  235. 0xf00f, 0xffffffff, 0x00040007,
  236. 0xf010, 0xffffffff, 0x00060005,
  237. 0xf011, 0xffffffff, 0x00090008,
  238. 0xf012, 0xffffffff, 0x00010000,
  239. 0xf013, 0xffffffff, 0x00030002,
  240. 0xf014, 0xffffffff, 0x00040007,
  241. 0xf015, 0xffffffff, 0x00060005,
  242. 0xf016, 0xffffffff, 0x00090008,
  243. 0xf017, 0xffffffff, 0x00010000,
  244. 0xf018, 0xffffffff, 0x00030002,
  245. 0xf019, 0xffffffff, 0x00040007,
  246. 0xf01a, 0xffffffff, 0x00060005,
  247. 0xf01b, 0xffffffff, 0x00090008,
  248. 0xf01c, 0xffffffff, 0x00010000,
  249. 0xf01d, 0xffffffff, 0x00030002,
  250. 0xf01e, 0xffffffff, 0x00040007,
  251. 0xf01f, 0xffffffff, 0x00060005,
  252. 0xf020, 0xffffffff, 0x00090008,
  253. 0xf021, 0xffffffff, 0x00010000,
  254. 0xf022, 0xffffffff, 0x00030002,
  255. 0xf023, 0xffffffff, 0x00040007,
  256. 0xf024, 0xffffffff, 0x00060005,
  257. 0xf025, 0xffffffff, 0x00090008,
  258. 0xf026, 0xffffffff, 0x00010000,
  259. 0xf027, 0xffffffff, 0x00030002,
  260. 0xf028, 0xffffffff, 0x00040007,
  261. 0xf029, 0xffffffff, 0x00060005,
  262. 0xf02a, 0xffffffff, 0x00090008,
  263. 0xf000, 0xffffffff, 0x96e00200,
  264. 0x21c2, 0xffffffff, 0x00900100,
  265. 0x3109, 0xffffffff, 0x0020003f,
  266. 0xe, 0xffffffff, 0x0140001c,
  267. 0xf, 0x000f0000, 0x000f0000,
  268. 0x88, 0xffffffff, 0xc060000c,
  269. 0x89, 0xc0000fff, 0x00000100,
  270. 0x3e4, 0xffffffff, 0x00000100,
  271. 0x3e6, 0x00000101, 0x00000000,
  272. 0x82a, 0xffffffff, 0x00000104,
  273. 0x1579, 0xff000fff, 0x00000100,
  274. 0xc33, 0xc0000fff, 0x00000104,
  275. 0x3079, 0x00000001, 0x00000001,
  276. 0x3403, 0xff000ff0, 0x00000100,
  277. 0x3603, 0xff000ff0, 0x00000100
  278. };
  279. static const u32 spectre_golden_spm_registers[] =
  280. {
  281. 0xc200, 0xe0ffffff, 0xe0000000
  282. };
  283. static const u32 spectre_golden_common_registers[] =
  284. {
  285. 0x31dc, 0xffffffff, 0x00000800,
  286. 0x31dd, 0xffffffff, 0x00000800,
  287. 0x31e6, 0xffffffff, 0x00007fbf,
  288. 0x31e7, 0xffffffff, 0x00007faf
  289. };
  290. static const u32 spectre_golden_registers[] =
  291. {
  292. 0xf000, 0xffff1fff, 0x96940200,
  293. 0xf003, 0xffff0001, 0xff000000,
  294. 0xf080, 0xfffc0fff, 0x00000100,
  295. 0x1bb6, 0x00010101, 0x00010000,
  296. 0x260d, 0xf00fffff, 0x00000400,
  297. 0x260e, 0xfffffffc, 0x00020200,
  298. 0x16ec, 0x000000f0, 0x00000070,
  299. 0x16f0, 0xf0311fff, 0x80300000,
  300. 0x263e, 0x73773777, 0x12010001,
  301. 0x26df, 0x00ff0000, 0x00fc0000,
  302. 0xbd2, 0x73773777, 0x12010001,
  303. 0x2285, 0xf000003f, 0x00000007,
  304. 0x22c9, 0xffffffff, 0x00ffffff,
  305. 0xa0d4, 0x3f3f3fff, 0x00000082,
  306. 0xa0d5, 0x0000003f, 0x00000000,
  307. 0xf9e, 0x00000001, 0x00000002,
  308. 0x244f, 0xffff03df, 0x00000004,
  309. 0x31da, 0x00000008, 0x00000008,
  310. 0x2300, 0x000008ff, 0x00000800,
  311. 0x2542, 0x00010000, 0x00010000,
  312. 0x2b03, 0xffffffff, 0x54763210,
  313. 0x853e, 0x01ff01ff, 0x00000002,
  314. 0x8526, 0x007ff800, 0x00200000,
  315. 0x8057, 0xffffffff, 0x00000f40,
  316. 0xc24d, 0xffffffff, 0x00000001
  317. };
  318. static const u32 spectre_mgcg_cgcg_init[] =
  319. {
  320. 0x3108, 0xffffffff, 0xfffffffc,
  321. 0xc200, 0xffffffff, 0xe0000000,
  322. 0xf0a8, 0xffffffff, 0x00000100,
  323. 0xf082, 0xffffffff, 0x00000100,
  324. 0xf0b0, 0xffffffff, 0x00000100,
  325. 0xf0b2, 0xffffffff, 0x00000100,
  326. 0xf0b1, 0xffffffff, 0x00000100,
  327. 0x1579, 0xffffffff, 0x00600100,
  328. 0xf0a0, 0xffffffff, 0x00000100,
  329. 0xf085, 0xffffffff, 0x06000100,
  330. 0xf088, 0xffffffff, 0x00000100,
  331. 0xf086, 0xffffffff, 0x06000100,
  332. 0xf081, 0xffffffff, 0x00000100,
  333. 0xf0b8, 0xffffffff, 0x00000100,
  334. 0xf089, 0xffffffff, 0x00000100,
  335. 0xf080, 0xffffffff, 0x00000100,
  336. 0xf08c, 0xffffffff, 0x00000100,
  337. 0xf08d, 0xffffffff, 0x00000100,
  338. 0xf094, 0xffffffff, 0x00000100,
  339. 0xf095, 0xffffffff, 0x00000100,
  340. 0xf096, 0xffffffff, 0x00000100,
  341. 0xf097, 0xffffffff, 0x00000100,
  342. 0xf098, 0xffffffff, 0x00000100,
  343. 0xf09f, 0xffffffff, 0x00000100,
  344. 0xf09e, 0xffffffff, 0x00000100,
  345. 0xf084, 0xffffffff, 0x06000100,
  346. 0xf0a4, 0xffffffff, 0x00000100,
  347. 0xf09d, 0xffffffff, 0x00000100,
  348. 0xf0ad, 0xffffffff, 0x00000100,
  349. 0xf0ac, 0xffffffff, 0x00000100,
  350. 0xf09c, 0xffffffff, 0x00000100,
  351. 0xc200, 0xffffffff, 0xe0000000,
  352. 0xf008, 0xffffffff, 0x00010000,
  353. 0xf009, 0xffffffff, 0x00030002,
  354. 0xf00a, 0xffffffff, 0x00040007,
  355. 0xf00b, 0xffffffff, 0x00060005,
  356. 0xf00c, 0xffffffff, 0x00090008,
  357. 0xf00d, 0xffffffff, 0x00010000,
  358. 0xf00e, 0xffffffff, 0x00030002,
  359. 0xf00f, 0xffffffff, 0x00040007,
  360. 0xf010, 0xffffffff, 0x00060005,
  361. 0xf011, 0xffffffff, 0x00090008,
  362. 0xf012, 0xffffffff, 0x00010000,
  363. 0xf013, 0xffffffff, 0x00030002,
  364. 0xf014, 0xffffffff, 0x00040007,
  365. 0xf015, 0xffffffff, 0x00060005,
  366. 0xf016, 0xffffffff, 0x00090008,
  367. 0xf017, 0xffffffff, 0x00010000,
  368. 0xf018, 0xffffffff, 0x00030002,
  369. 0xf019, 0xffffffff, 0x00040007,
  370. 0xf01a, 0xffffffff, 0x00060005,
  371. 0xf01b, 0xffffffff, 0x00090008,
  372. 0xf01c, 0xffffffff, 0x00010000,
  373. 0xf01d, 0xffffffff, 0x00030002,
  374. 0xf01e, 0xffffffff, 0x00040007,
  375. 0xf01f, 0xffffffff, 0x00060005,
  376. 0xf020, 0xffffffff, 0x00090008,
  377. 0xf021, 0xffffffff, 0x00010000,
  378. 0xf022, 0xffffffff, 0x00030002,
  379. 0xf023, 0xffffffff, 0x00040007,
  380. 0xf024, 0xffffffff, 0x00060005,
  381. 0xf025, 0xffffffff, 0x00090008,
  382. 0xf026, 0xffffffff, 0x00010000,
  383. 0xf027, 0xffffffff, 0x00030002,
  384. 0xf028, 0xffffffff, 0x00040007,
  385. 0xf029, 0xffffffff, 0x00060005,
  386. 0xf02a, 0xffffffff, 0x00090008,
  387. 0xf02b, 0xffffffff, 0x00010000,
  388. 0xf02c, 0xffffffff, 0x00030002,
  389. 0xf02d, 0xffffffff, 0x00040007,
  390. 0xf02e, 0xffffffff, 0x00060005,
  391. 0xf02f, 0xffffffff, 0x00090008,
  392. 0xf000, 0xffffffff, 0x96e00200,
  393. 0x21c2, 0xffffffff, 0x00900100,
  394. 0x3109, 0xffffffff, 0x0020003f,
  395. 0xe, 0xffffffff, 0x0140001c,
  396. 0xf, 0x000f0000, 0x000f0000,
  397. 0x88, 0xffffffff, 0xc060000c,
  398. 0x89, 0xc0000fff, 0x00000100,
  399. 0x3e4, 0xffffffff, 0x00000100,
  400. 0x3e6, 0x00000101, 0x00000000,
  401. 0x82a, 0xffffffff, 0x00000104,
  402. 0x1579, 0xff000fff, 0x00000100,
  403. 0xc33, 0xc0000fff, 0x00000104,
  404. 0x3079, 0x00000001, 0x00000001,
  405. 0x3403, 0xff000ff0, 0x00000100,
  406. 0x3603, 0xff000ff0, 0x00000100
  407. };
  408. static const u32 kalindi_golden_spm_registers[] =
  409. {
  410. 0xc200, 0xe0ffffff, 0xe0000000
  411. };
  412. static const u32 kalindi_golden_common_registers[] =
  413. {
  414. 0x31dc, 0xffffffff, 0x00000800,
  415. 0x31dd, 0xffffffff, 0x00000800,
  416. 0x31e6, 0xffffffff, 0x00007fbf,
  417. 0x31e7, 0xffffffff, 0x00007faf
  418. };
  419. static const u32 kalindi_golden_registers[] =
  420. {
  421. 0xf000, 0xffffdfff, 0x6e944040,
  422. 0x1579, 0xff607fff, 0xfc000100,
  423. 0xf088, 0xff000fff, 0x00000100,
  424. 0xf089, 0xff000fff, 0x00000100,
  425. 0xf080, 0xfffc0fff, 0x00000100,
  426. 0x1bb6, 0x00010101, 0x00010000,
  427. 0x260c, 0xffffffff, 0x00000000,
  428. 0x260d, 0xf00fffff, 0x00000400,
  429. 0x16ec, 0x000000f0, 0x00000070,
  430. 0x16f0, 0xf0311fff, 0x80300000,
  431. 0x263e, 0x73773777, 0x12010001,
  432. 0x263f, 0xffffffff, 0x00000010,
  433. 0x26df, 0x00ff0000, 0x00fc0000,
  434. 0x200c, 0x00001f0f, 0x0000100a,
  435. 0xbd2, 0x73773777, 0x12010001,
  436. 0x902, 0x000fffff, 0x000c007f,
  437. 0x2285, 0xf000003f, 0x00000007,
  438. 0x22c9, 0x3fff3fff, 0x00ffcfff,
  439. 0xc281, 0x0000ff0f, 0x00000000,
  440. 0xa293, 0x07ffffff, 0x06000000,
  441. 0x136, 0x00000fff, 0x00000100,
  442. 0xf9e, 0x00000001, 0x00000002,
  443. 0x31da, 0x00000008, 0x00000008,
  444. 0x2300, 0x000000ff, 0x00000003,
  445. 0x853e, 0x01ff01ff, 0x00000002,
  446. 0x8526, 0x007ff800, 0x00200000,
  447. 0x8057, 0xffffffff, 0x00000f40,
  448. 0x2231, 0x001f3ae3, 0x00000082,
  449. 0x2235, 0x0000001f, 0x00000010,
  450. 0xc24d, 0xffffffff, 0x00000000
  451. };
  452. static const u32 kalindi_mgcg_cgcg_init[] =
  453. {
  454. 0x3108, 0xffffffff, 0xfffffffc,
  455. 0xc200, 0xffffffff, 0xe0000000,
  456. 0xf0a8, 0xffffffff, 0x00000100,
  457. 0xf082, 0xffffffff, 0x00000100,
  458. 0xf0b0, 0xffffffff, 0x00000100,
  459. 0xf0b2, 0xffffffff, 0x00000100,
  460. 0xf0b1, 0xffffffff, 0x00000100,
  461. 0x1579, 0xffffffff, 0x00600100,
  462. 0xf0a0, 0xffffffff, 0x00000100,
  463. 0xf085, 0xffffffff, 0x06000100,
  464. 0xf088, 0xffffffff, 0x00000100,
  465. 0xf086, 0xffffffff, 0x06000100,
  466. 0xf081, 0xffffffff, 0x00000100,
  467. 0xf0b8, 0xffffffff, 0x00000100,
  468. 0xf089, 0xffffffff, 0x00000100,
  469. 0xf080, 0xffffffff, 0x00000100,
  470. 0xf08c, 0xffffffff, 0x00000100,
  471. 0xf08d, 0xffffffff, 0x00000100,
  472. 0xf094, 0xffffffff, 0x00000100,
  473. 0xf095, 0xffffffff, 0x00000100,
  474. 0xf096, 0xffffffff, 0x00000100,
  475. 0xf097, 0xffffffff, 0x00000100,
  476. 0xf098, 0xffffffff, 0x00000100,
  477. 0xf09f, 0xffffffff, 0x00000100,
  478. 0xf09e, 0xffffffff, 0x00000100,
  479. 0xf084, 0xffffffff, 0x06000100,
  480. 0xf0a4, 0xffffffff, 0x00000100,
  481. 0xf09d, 0xffffffff, 0x00000100,
  482. 0xf0ad, 0xffffffff, 0x00000100,
  483. 0xf0ac, 0xffffffff, 0x00000100,
  484. 0xf09c, 0xffffffff, 0x00000100,
  485. 0xc200, 0xffffffff, 0xe0000000,
  486. 0xf008, 0xffffffff, 0x00010000,
  487. 0xf009, 0xffffffff, 0x00030002,
  488. 0xf00a, 0xffffffff, 0x00040007,
  489. 0xf00b, 0xffffffff, 0x00060005,
  490. 0xf00c, 0xffffffff, 0x00090008,
  491. 0xf00d, 0xffffffff, 0x00010000,
  492. 0xf00e, 0xffffffff, 0x00030002,
  493. 0xf00f, 0xffffffff, 0x00040007,
  494. 0xf010, 0xffffffff, 0x00060005,
  495. 0xf011, 0xffffffff, 0x00090008,
  496. 0xf000, 0xffffffff, 0x96e00200,
  497. 0x21c2, 0xffffffff, 0x00900100,
  498. 0x3109, 0xffffffff, 0x0020003f,
  499. 0xe, 0xffffffff, 0x0140001c,
  500. 0xf, 0x000f0000, 0x000f0000,
  501. 0x88, 0xffffffff, 0xc060000c,
  502. 0x89, 0xc0000fff, 0x00000100,
  503. 0x82a, 0xffffffff, 0x00000104,
  504. 0x1579, 0xff000fff, 0x00000100,
  505. 0xc33, 0xc0000fff, 0x00000104,
  506. 0x3079, 0x00000001, 0x00000001,
  507. 0x3403, 0xff000ff0, 0x00000100,
  508. 0x3603, 0xff000ff0, 0x00000100
  509. };
  510. static const u32 hawaii_golden_spm_registers[] =
  511. {
  512. 0xc200, 0xe0ffffff, 0xe0000000
  513. };
  514. static const u32 hawaii_golden_common_registers[] =
  515. {
  516. 0xc200, 0xffffffff, 0xe0000000,
  517. 0xa0d4, 0xffffffff, 0x3a00161a,
  518. 0xa0d5, 0xffffffff, 0x0000002e,
  519. 0x2684, 0xffffffff, 0x00018208,
  520. 0x263e, 0xffffffff, 0x12011003
  521. };
  522. static const u32 hawaii_golden_registers[] =
  523. {
  524. 0xcd5, 0x00000333, 0x00000333,
  525. 0x2684, 0x00010000, 0x00058208,
  526. 0x260c, 0xffffffff, 0x00000000,
  527. 0x260d, 0xf00fffff, 0x00000400,
  528. 0x260e, 0x0002021c, 0x00020200,
  529. 0x31e, 0x00000080, 0x00000000,
  530. 0x16ec, 0x000000f0, 0x00000070,
  531. 0x16f0, 0xf0311fff, 0x80300000,
  532. 0xd43, 0x00810000, 0x408af000,
  533. 0x1c0c, 0x31000111, 0x00000011,
  534. 0xbd2, 0x73773777, 0x12010001,
  535. 0x848, 0x0000007f, 0x0000001b,
  536. 0x877, 0x00007fb6, 0x00002191,
  537. 0xd8a, 0x0000003f, 0x0000000a,
  538. 0xd8b, 0x0000003f, 0x0000000a,
  539. 0xab9, 0x00073ffe, 0x000022a2,
  540. 0x903, 0x000007ff, 0x00000000,
  541. 0x22fc, 0x00002001, 0x00000001,
  542. 0x22c9, 0xffffffff, 0x00ffffff,
  543. 0xc281, 0x0000ff0f, 0x00000000,
  544. 0xa293, 0x07ffffff, 0x06000000,
  545. 0xf9e, 0x00000001, 0x00000002,
  546. 0x31da, 0x00000008, 0x00000008,
  547. 0x31dc, 0x00000f00, 0x00000800,
  548. 0x31dd, 0x00000f00, 0x00000800,
  549. 0x31e6, 0x00ffffff, 0x00ff7fbf,
  550. 0x31e7, 0x00ffffff, 0x00ff7faf,
  551. 0x2300, 0x000000ff, 0x00000800,
  552. 0x390, 0x00001fff, 0x00001fff,
  553. 0x2418, 0x0000007f, 0x00000020,
  554. 0x2542, 0x00010000, 0x00010000,
  555. 0x2b80, 0x00100000, 0x000ff07c,
  556. 0x2b05, 0x000003ff, 0x0000000f,
  557. 0x2b04, 0xffffffff, 0x7564fdec,
  558. 0x2b03, 0xffffffff, 0x3120b9a8,
  559. 0x2b02, 0x20000000, 0x0f9c0000
  560. };
  561. static const u32 hawaii_mgcg_cgcg_init[] =
  562. {
  563. 0x3108, 0xffffffff, 0xfffffffd,
  564. 0xc200, 0xffffffff, 0xe0000000,
  565. 0xf0a8, 0xffffffff, 0x00000100,
  566. 0xf082, 0xffffffff, 0x00000100,
  567. 0xf0b0, 0xffffffff, 0x00000100,
  568. 0xf0b2, 0xffffffff, 0x00000100,
  569. 0xf0b1, 0xffffffff, 0x00000100,
  570. 0x1579, 0xffffffff, 0x00200100,
  571. 0xf0a0, 0xffffffff, 0x00000100,
  572. 0xf085, 0xffffffff, 0x06000100,
  573. 0xf088, 0xffffffff, 0x00000100,
  574. 0xf086, 0xffffffff, 0x06000100,
  575. 0xf081, 0xffffffff, 0x00000100,
  576. 0xf0b8, 0xffffffff, 0x00000100,
  577. 0xf089, 0xffffffff, 0x00000100,
  578. 0xf080, 0xffffffff, 0x00000100,
  579. 0xf08c, 0xffffffff, 0x00000100,
  580. 0xf08d, 0xffffffff, 0x00000100,
  581. 0xf094, 0xffffffff, 0x00000100,
  582. 0xf095, 0xffffffff, 0x00000100,
  583. 0xf096, 0xffffffff, 0x00000100,
  584. 0xf097, 0xffffffff, 0x00000100,
  585. 0xf098, 0xffffffff, 0x00000100,
  586. 0xf09f, 0xffffffff, 0x00000100,
  587. 0xf09e, 0xffffffff, 0x00000100,
  588. 0xf084, 0xffffffff, 0x06000100,
  589. 0xf0a4, 0xffffffff, 0x00000100,
  590. 0xf09d, 0xffffffff, 0x00000100,
  591. 0xf0ad, 0xffffffff, 0x00000100,
  592. 0xf0ac, 0xffffffff, 0x00000100,
  593. 0xf09c, 0xffffffff, 0x00000100,
  594. 0xc200, 0xffffffff, 0xe0000000,
  595. 0xf008, 0xffffffff, 0x00010000,
  596. 0xf009, 0xffffffff, 0x00030002,
  597. 0xf00a, 0xffffffff, 0x00040007,
  598. 0xf00b, 0xffffffff, 0x00060005,
  599. 0xf00c, 0xffffffff, 0x00090008,
  600. 0xf00d, 0xffffffff, 0x00010000,
  601. 0xf00e, 0xffffffff, 0x00030002,
  602. 0xf00f, 0xffffffff, 0x00040007,
  603. 0xf010, 0xffffffff, 0x00060005,
  604. 0xf011, 0xffffffff, 0x00090008,
  605. 0xf012, 0xffffffff, 0x00010000,
  606. 0xf013, 0xffffffff, 0x00030002,
  607. 0xf014, 0xffffffff, 0x00040007,
  608. 0xf015, 0xffffffff, 0x00060005,
  609. 0xf016, 0xffffffff, 0x00090008,
  610. 0xf017, 0xffffffff, 0x00010000,
  611. 0xf018, 0xffffffff, 0x00030002,
  612. 0xf019, 0xffffffff, 0x00040007,
  613. 0xf01a, 0xffffffff, 0x00060005,
  614. 0xf01b, 0xffffffff, 0x00090008,
  615. 0xf01c, 0xffffffff, 0x00010000,
  616. 0xf01d, 0xffffffff, 0x00030002,
  617. 0xf01e, 0xffffffff, 0x00040007,
  618. 0xf01f, 0xffffffff, 0x00060005,
  619. 0xf020, 0xffffffff, 0x00090008,
  620. 0xf021, 0xffffffff, 0x00010000,
  621. 0xf022, 0xffffffff, 0x00030002,
  622. 0xf023, 0xffffffff, 0x00040007,
  623. 0xf024, 0xffffffff, 0x00060005,
  624. 0xf025, 0xffffffff, 0x00090008,
  625. 0xf026, 0xffffffff, 0x00010000,
  626. 0xf027, 0xffffffff, 0x00030002,
  627. 0xf028, 0xffffffff, 0x00040007,
  628. 0xf029, 0xffffffff, 0x00060005,
  629. 0xf02a, 0xffffffff, 0x00090008,
  630. 0xf02b, 0xffffffff, 0x00010000,
  631. 0xf02c, 0xffffffff, 0x00030002,
  632. 0xf02d, 0xffffffff, 0x00040007,
  633. 0xf02e, 0xffffffff, 0x00060005,
  634. 0xf02f, 0xffffffff, 0x00090008,
  635. 0xf030, 0xffffffff, 0x00010000,
  636. 0xf031, 0xffffffff, 0x00030002,
  637. 0xf032, 0xffffffff, 0x00040007,
  638. 0xf033, 0xffffffff, 0x00060005,
  639. 0xf034, 0xffffffff, 0x00090008,
  640. 0xf035, 0xffffffff, 0x00010000,
  641. 0xf036, 0xffffffff, 0x00030002,
  642. 0xf037, 0xffffffff, 0x00040007,
  643. 0xf038, 0xffffffff, 0x00060005,
  644. 0xf039, 0xffffffff, 0x00090008,
  645. 0xf03a, 0xffffffff, 0x00010000,
  646. 0xf03b, 0xffffffff, 0x00030002,
  647. 0xf03c, 0xffffffff, 0x00040007,
  648. 0xf03d, 0xffffffff, 0x00060005,
  649. 0xf03e, 0xffffffff, 0x00090008,
  650. 0x30c6, 0xffffffff, 0x00020200,
  651. 0xcd4, 0xffffffff, 0x00000200,
  652. 0x570, 0xffffffff, 0x00000400,
  653. 0x157a, 0xffffffff, 0x00000000,
  654. 0xbd4, 0xffffffff, 0x00000902,
  655. 0xf000, 0xffffffff, 0x96940200,
  656. 0x21c2, 0xffffffff, 0x00900100,
  657. 0x3109, 0xffffffff, 0x0020003f,
  658. 0xe, 0xffffffff, 0x0140001c,
  659. 0xf, 0x000f0000, 0x000f0000,
  660. 0x88, 0xffffffff, 0xc060000c,
  661. 0x89, 0xc0000fff, 0x00000100,
  662. 0x3e4, 0xffffffff, 0x00000100,
  663. 0x3e6, 0x00000101, 0x00000000,
  664. 0x82a, 0xffffffff, 0x00000104,
  665. 0x1579, 0xff000fff, 0x00000100,
  666. 0xc33, 0xc0000fff, 0x00000104,
  667. 0x3079, 0x00000001, 0x00000001,
  668. 0x3403, 0xff000ff0, 0x00000100,
  669. 0x3603, 0xff000ff0, 0x00000100
  670. };
  671. static const u32 godavari_golden_registers[] =
  672. {
  673. 0x1579, 0xff607fff, 0xfc000100,
  674. 0x1bb6, 0x00010101, 0x00010000,
  675. 0x260c, 0xffffffff, 0x00000000,
  676. 0x260c0, 0xf00fffff, 0x00000400,
  677. 0x184c, 0xffffffff, 0x00010000,
  678. 0x16ec, 0x000000f0, 0x00000070,
  679. 0x16f0, 0xf0311fff, 0x80300000,
  680. 0x263e, 0x73773777, 0x12010001,
  681. 0x263f, 0xffffffff, 0x00000010,
  682. 0x200c, 0x00001f0f, 0x0000100a,
  683. 0xbd2, 0x73773777, 0x12010001,
  684. 0x902, 0x000fffff, 0x000c007f,
  685. 0x2285, 0xf000003f, 0x00000007,
  686. 0x22c9, 0xffffffff, 0x00ff0fff,
  687. 0xc281, 0x0000ff0f, 0x00000000,
  688. 0xa293, 0x07ffffff, 0x06000000,
  689. 0x136, 0x00000fff, 0x00000100,
  690. 0x3405, 0x00010000, 0x00810001,
  691. 0x3605, 0x00010000, 0x00810001,
  692. 0xf9e, 0x00000001, 0x00000002,
  693. 0x31da, 0x00000008, 0x00000008,
  694. 0x31dc, 0x00000f00, 0x00000800,
  695. 0x31dd, 0x00000f00, 0x00000800,
  696. 0x31e6, 0x00ffffff, 0x00ff7fbf,
  697. 0x31e7, 0x00ffffff, 0x00ff7faf,
  698. 0x2300, 0x000000ff, 0x00000001,
  699. 0x853e, 0x01ff01ff, 0x00000002,
  700. 0x8526, 0x007ff800, 0x00200000,
  701. 0x8057, 0xffffffff, 0x00000f40,
  702. 0x2231, 0x001f3ae3, 0x00000082,
  703. 0x2235, 0x0000001f, 0x00000010,
  704. 0xc24d, 0xffffffff, 0x00000000
  705. };
  706. static void cik_init_golden_registers(struct amdgpu_device *adev)
  707. {
  708. /* Some of the registers might be dependent on GRBM_GFX_INDEX */
  709. mutex_lock(&adev->grbm_idx_mutex);
  710. switch (adev->asic_type) {
  711. case CHIP_BONAIRE:
  712. amdgpu_program_register_sequence(adev,
  713. bonaire_mgcg_cgcg_init,
  714. (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
  715. amdgpu_program_register_sequence(adev,
  716. bonaire_golden_registers,
  717. (const u32)ARRAY_SIZE(bonaire_golden_registers));
  718. amdgpu_program_register_sequence(adev,
  719. bonaire_golden_common_registers,
  720. (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
  721. amdgpu_program_register_sequence(adev,
  722. bonaire_golden_spm_registers,
  723. (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
  724. break;
  725. case CHIP_KABINI:
  726. amdgpu_program_register_sequence(adev,
  727. kalindi_mgcg_cgcg_init,
  728. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  729. amdgpu_program_register_sequence(adev,
  730. kalindi_golden_registers,
  731. (const u32)ARRAY_SIZE(kalindi_golden_registers));
  732. amdgpu_program_register_sequence(adev,
  733. kalindi_golden_common_registers,
  734. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  735. amdgpu_program_register_sequence(adev,
  736. kalindi_golden_spm_registers,
  737. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  738. break;
  739. case CHIP_MULLINS:
  740. amdgpu_program_register_sequence(adev,
  741. kalindi_mgcg_cgcg_init,
  742. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  743. amdgpu_program_register_sequence(adev,
  744. godavari_golden_registers,
  745. (const u32)ARRAY_SIZE(godavari_golden_registers));
  746. amdgpu_program_register_sequence(adev,
  747. kalindi_golden_common_registers,
  748. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  749. amdgpu_program_register_sequence(adev,
  750. kalindi_golden_spm_registers,
  751. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  752. break;
  753. case CHIP_KAVERI:
  754. amdgpu_program_register_sequence(adev,
  755. spectre_mgcg_cgcg_init,
  756. (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
  757. amdgpu_program_register_sequence(adev,
  758. spectre_golden_registers,
  759. (const u32)ARRAY_SIZE(spectre_golden_registers));
  760. amdgpu_program_register_sequence(adev,
  761. spectre_golden_common_registers,
  762. (const u32)ARRAY_SIZE(spectre_golden_common_registers));
  763. amdgpu_program_register_sequence(adev,
  764. spectre_golden_spm_registers,
  765. (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
  766. break;
  767. case CHIP_HAWAII:
  768. amdgpu_program_register_sequence(adev,
  769. hawaii_mgcg_cgcg_init,
  770. (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
  771. amdgpu_program_register_sequence(adev,
  772. hawaii_golden_registers,
  773. (const u32)ARRAY_SIZE(hawaii_golden_registers));
  774. amdgpu_program_register_sequence(adev,
  775. hawaii_golden_common_registers,
  776. (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
  777. amdgpu_program_register_sequence(adev,
  778. hawaii_golden_spm_registers,
  779. (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
  780. break;
  781. default:
  782. break;
  783. }
  784. mutex_unlock(&adev->grbm_idx_mutex);
  785. }
  786. /**
  787. * cik_get_xclk - get the xclk
  788. *
  789. * @adev: amdgpu_device pointer
  790. *
  791. * Returns the reference clock used by the gfx engine
  792. * (CIK).
  793. */
  794. static u32 cik_get_xclk(struct amdgpu_device *adev)
  795. {
  796. u32 reference_clock = adev->clock.spll.reference_freq;
  797. if (adev->flags & AMD_IS_APU) {
  798. if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
  799. return reference_clock / 2;
  800. } else {
  801. if (RREG32_SMC(ixCG_CLKPIN_CNTL) & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
  802. return reference_clock / 4;
  803. }
  804. return reference_clock;
  805. }
  806. /**
  807. * cik_srbm_select - select specific register instances
  808. *
  809. * @adev: amdgpu_device pointer
  810. * @me: selected ME (micro engine)
  811. * @pipe: pipe
  812. * @queue: queue
  813. * @vmid: VMID
  814. *
  815. * Switches the currently active registers instances. Some
  816. * registers are instanced per VMID, others are instanced per
  817. * me/pipe/queue combination.
  818. */
  819. void cik_srbm_select(struct amdgpu_device *adev,
  820. u32 me, u32 pipe, u32 queue, u32 vmid)
  821. {
  822. u32 srbm_gfx_cntl =
  823. (((pipe << SRBM_GFX_CNTL__PIPEID__SHIFT) & SRBM_GFX_CNTL__PIPEID_MASK)|
  824. ((me << SRBM_GFX_CNTL__MEID__SHIFT) & SRBM_GFX_CNTL__MEID_MASK)|
  825. ((vmid << SRBM_GFX_CNTL__VMID__SHIFT) & SRBM_GFX_CNTL__VMID_MASK)|
  826. ((queue << SRBM_GFX_CNTL__QUEUEID__SHIFT) & SRBM_GFX_CNTL__QUEUEID_MASK));
  827. WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
  828. }
  829. static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
  830. {
  831. uint32_t tmp;
  832. tmp = RREG32(mmCONFIG_CNTL);
  833. if (state == false)
  834. tmp |= CONFIG_CNTL__VGA_DIS_MASK;
  835. else
  836. tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
  837. WREG32(mmCONFIG_CNTL, tmp);
  838. }
  839. static bool cik_read_disabled_bios(struct amdgpu_device *adev)
  840. {
  841. u32 bus_cntl;
  842. u32 d1vga_control = 0;
  843. u32 d2vga_control = 0;
  844. u32 vga_render_control = 0;
  845. u32 rom_cntl;
  846. bool r;
  847. bus_cntl = RREG32(mmBUS_CNTL);
  848. if (adev->mode_info.num_crtc) {
  849. d1vga_control = RREG32(mmD1VGA_CONTROL);
  850. d2vga_control = RREG32(mmD2VGA_CONTROL);
  851. vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
  852. }
  853. rom_cntl = RREG32_SMC(ixROM_CNTL);
  854. /* enable the rom */
  855. WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
  856. if (adev->mode_info.num_crtc) {
  857. /* Disable VGA mode */
  858. WREG32(mmD1VGA_CONTROL,
  859. (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
  860. D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
  861. WREG32(mmD2VGA_CONTROL,
  862. (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
  863. D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
  864. WREG32(mmVGA_RENDER_CONTROL,
  865. (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
  866. }
  867. WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
  868. r = amdgpu_read_bios(adev);
  869. /* restore regs */
  870. WREG32(mmBUS_CNTL, bus_cntl);
  871. if (adev->mode_info.num_crtc) {
  872. WREG32(mmD1VGA_CONTROL, d1vga_control);
  873. WREG32(mmD2VGA_CONTROL, d2vga_control);
  874. WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
  875. }
  876. WREG32_SMC(ixROM_CNTL, rom_cntl);
  877. return r;
  878. }
  879. static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
  880. u8 *bios, u32 length_bytes)
  881. {
  882. u32 *dw_ptr;
  883. unsigned long flags;
  884. u32 i, length_dw;
  885. if (bios == NULL)
  886. return false;
  887. if (length_bytes == 0)
  888. return false;
  889. /* APU vbios image is part of sbios image */
  890. if (adev->flags & AMD_IS_APU)
  891. return false;
  892. dw_ptr = (u32 *)bios;
  893. length_dw = ALIGN(length_bytes, 4) / 4;
  894. /* take the smc lock since we are using the smc index */
  895. spin_lock_irqsave(&adev->smc_idx_lock, flags);
  896. /* set rom index to 0 */
  897. WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
  898. WREG32(mmSMC_IND_DATA_0, 0);
  899. /* set index to data for continous read */
  900. WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
  901. for (i = 0; i < length_dw; i++)
  902. dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
  903. spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
  904. return true;
  905. }
  906. static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
  907. {mmGRBM_STATUS, false},
  908. {mmGB_ADDR_CONFIG, false},
  909. {mmMC_ARB_RAMCFG, false},
  910. {mmGB_TILE_MODE0, false},
  911. {mmGB_TILE_MODE1, false},
  912. {mmGB_TILE_MODE2, false},
  913. {mmGB_TILE_MODE3, false},
  914. {mmGB_TILE_MODE4, false},
  915. {mmGB_TILE_MODE5, false},
  916. {mmGB_TILE_MODE6, false},
  917. {mmGB_TILE_MODE7, false},
  918. {mmGB_TILE_MODE8, false},
  919. {mmGB_TILE_MODE9, false},
  920. {mmGB_TILE_MODE10, false},
  921. {mmGB_TILE_MODE11, false},
  922. {mmGB_TILE_MODE12, false},
  923. {mmGB_TILE_MODE13, false},
  924. {mmGB_TILE_MODE14, false},
  925. {mmGB_TILE_MODE15, false},
  926. {mmGB_TILE_MODE16, false},
  927. {mmGB_TILE_MODE17, false},
  928. {mmGB_TILE_MODE18, false},
  929. {mmGB_TILE_MODE19, false},
  930. {mmGB_TILE_MODE20, false},
  931. {mmGB_TILE_MODE21, false},
  932. {mmGB_TILE_MODE22, false},
  933. {mmGB_TILE_MODE23, false},
  934. {mmGB_TILE_MODE24, false},
  935. {mmGB_TILE_MODE25, false},
  936. {mmGB_TILE_MODE26, false},
  937. {mmGB_TILE_MODE27, false},
  938. {mmGB_TILE_MODE28, false},
  939. {mmGB_TILE_MODE29, false},
  940. {mmGB_TILE_MODE30, false},
  941. {mmGB_TILE_MODE31, false},
  942. {mmGB_MACROTILE_MODE0, false},
  943. {mmGB_MACROTILE_MODE1, false},
  944. {mmGB_MACROTILE_MODE2, false},
  945. {mmGB_MACROTILE_MODE3, false},
  946. {mmGB_MACROTILE_MODE4, false},
  947. {mmGB_MACROTILE_MODE5, false},
  948. {mmGB_MACROTILE_MODE6, false},
  949. {mmGB_MACROTILE_MODE7, false},
  950. {mmGB_MACROTILE_MODE8, false},
  951. {mmGB_MACROTILE_MODE9, false},
  952. {mmGB_MACROTILE_MODE10, false},
  953. {mmGB_MACROTILE_MODE11, false},
  954. {mmGB_MACROTILE_MODE12, false},
  955. {mmGB_MACROTILE_MODE13, false},
  956. {mmGB_MACROTILE_MODE14, false},
  957. {mmGB_MACROTILE_MODE15, false},
  958. {mmCC_RB_BACKEND_DISABLE, false, true},
  959. {mmGC_USER_RB_BACKEND_DISABLE, false, true},
  960. {mmGB_BACKEND_MAP, false, false},
  961. {mmPA_SC_RASTER_CONFIG, false, true},
  962. {mmPA_SC_RASTER_CONFIG_1, false, true},
  963. };
  964. static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
  965. u32 se_num, u32 sh_num,
  966. u32 reg_offset)
  967. {
  968. uint32_t val;
  969. mutex_lock(&adev->grbm_idx_mutex);
  970. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  971. gfx_v7_0_select_se_sh(adev, se_num, sh_num);
  972. val = RREG32(reg_offset);
  973. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  974. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  975. mutex_unlock(&adev->grbm_idx_mutex);
  976. return val;
  977. }
  978. static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
  979. u32 sh_num, u32 reg_offset, u32 *value)
  980. {
  981. uint32_t i;
  982. *value = 0;
  983. for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
  984. if (reg_offset != cik_allowed_read_registers[i].reg_offset)
  985. continue;
  986. if (!cik_allowed_read_registers[i].untouched)
  987. *value = cik_allowed_read_registers[i].grbm_indexed ?
  988. cik_read_indexed_register(adev, se_num,
  989. sh_num, reg_offset) :
  990. RREG32(reg_offset);
  991. return 0;
  992. }
  993. return -EINVAL;
  994. }
  995. static void cik_print_gpu_status_regs(struct amdgpu_device *adev)
  996. {
  997. dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
  998. RREG32(mmGRBM_STATUS));
  999. dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
  1000. RREG32(mmGRBM_STATUS2));
  1001. dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  1002. RREG32(mmGRBM_STATUS_SE0));
  1003. dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  1004. RREG32(mmGRBM_STATUS_SE1));
  1005. dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
  1006. RREG32(mmGRBM_STATUS_SE2));
  1007. dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
  1008. RREG32(mmGRBM_STATUS_SE3));
  1009. dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
  1010. RREG32(mmSRBM_STATUS));
  1011. dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
  1012. RREG32(mmSRBM_STATUS2));
  1013. dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
  1014. RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
  1015. dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
  1016. RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
  1017. dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
  1018. dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
  1019. RREG32(mmCP_STALLED_STAT1));
  1020. dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
  1021. RREG32(mmCP_STALLED_STAT2));
  1022. dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
  1023. RREG32(mmCP_STALLED_STAT3));
  1024. dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
  1025. RREG32(mmCP_CPF_BUSY_STAT));
  1026. dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
  1027. RREG32(mmCP_CPF_STALLED_STAT1));
  1028. dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
  1029. dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
  1030. dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
  1031. RREG32(mmCP_CPC_STALLED_STAT1));
  1032. dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
  1033. }
  1034. /**
  1035. * cik_gpu_check_soft_reset - check which blocks are busy
  1036. *
  1037. * @adev: amdgpu_device pointer
  1038. *
  1039. * Check which blocks are busy and return the relevant reset
  1040. * mask to be used by cik_gpu_soft_reset().
  1041. * Returns a mask of the blocks to be reset.
  1042. */
  1043. u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev)
  1044. {
  1045. u32 reset_mask = 0;
  1046. u32 tmp;
  1047. /* GRBM_STATUS */
  1048. tmp = RREG32(mmGRBM_STATUS);
  1049. if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
  1050. GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
  1051. GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
  1052. GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
  1053. GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
  1054. GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
  1055. reset_mask |= AMDGPU_RESET_GFX;
  1056. if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
  1057. reset_mask |= AMDGPU_RESET_CP;
  1058. /* GRBM_STATUS2 */
  1059. tmp = RREG32(mmGRBM_STATUS2);
  1060. if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
  1061. reset_mask |= AMDGPU_RESET_RLC;
  1062. /* SDMA0_STATUS_REG */
  1063. tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
  1064. if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
  1065. reset_mask |= AMDGPU_RESET_DMA;
  1066. /* SDMA1_STATUS_REG */
  1067. tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
  1068. if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
  1069. reset_mask |= AMDGPU_RESET_DMA1;
  1070. /* SRBM_STATUS2 */
  1071. tmp = RREG32(mmSRBM_STATUS2);
  1072. if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
  1073. reset_mask |= AMDGPU_RESET_DMA;
  1074. if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
  1075. reset_mask |= AMDGPU_RESET_DMA1;
  1076. /* SRBM_STATUS */
  1077. tmp = RREG32(mmSRBM_STATUS);
  1078. if (tmp & SRBM_STATUS__IH_BUSY_MASK)
  1079. reset_mask |= AMDGPU_RESET_IH;
  1080. if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
  1081. reset_mask |= AMDGPU_RESET_SEM;
  1082. if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
  1083. reset_mask |= AMDGPU_RESET_GRBM;
  1084. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  1085. reset_mask |= AMDGPU_RESET_VMC;
  1086. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1087. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
  1088. reset_mask |= AMDGPU_RESET_MC;
  1089. if (amdgpu_display_is_display_hung(adev))
  1090. reset_mask |= AMDGPU_RESET_DISPLAY;
  1091. /* Skip MC reset as it's mostly likely not hung, just busy */
  1092. if (reset_mask & AMDGPU_RESET_MC) {
  1093. DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
  1094. reset_mask &= ~AMDGPU_RESET_MC;
  1095. }
  1096. return reset_mask;
  1097. }
  1098. /**
  1099. * cik_gpu_soft_reset - soft reset GPU
  1100. *
  1101. * @adev: amdgpu_device pointer
  1102. * @reset_mask: mask of which blocks to reset
  1103. *
  1104. * Soft reset the blocks specified in @reset_mask.
  1105. */
  1106. static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
  1107. {
  1108. struct amdgpu_mode_mc_save save;
  1109. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  1110. u32 tmp;
  1111. if (reset_mask == 0)
  1112. return;
  1113. dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
  1114. cik_print_gpu_status_regs(adev);
  1115. dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  1116. RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
  1117. dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  1118. RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
  1119. /* disable CG/PG */
  1120. /* stop the rlc */
  1121. gfx_v7_0_rlc_stop(adev);
  1122. /* Disable GFX parsing/prefetching */
  1123. WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
  1124. /* Disable MEC parsing/prefetching */
  1125. WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
  1126. if (reset_mask & AMDGPU_RESET_DMA) {
  1127. /* sdma0 */
  1128. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
  1129. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1130. WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  1131. }
  1132. if (reset_mask & AMDGPU_RESET_DMA1) {
  1133. /* sdma1 */
  1134. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
  1135. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1136. WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  1137. }
  1138. gmc_v7_0_mc_stop(adev, &save);
  1139. if (amdgpu_asic_wait_for_mc_idle(adev)) {
  1140. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  1141. }
  1142. if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP))
  1143. grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
  1144. GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
  1145. if (reset_mask & AMDGPU_RESET_CP) {
  1146. grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
  1147. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
  1148. }
  1149. if (reset_mask & AMDGPU_RESET_DMA)
  1150. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
  1151. if (reset_mask & AMDGPU_RESET_DMA1)
  1152. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
  1153. if (reset_mask & AMDGPU_RESET_DISPLAY)
  1154. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
  1155. if (reset_mask & AMDGPU_RESET_RLC)
  1156. grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
  1157. if (reset_mask & AMDGPU_RESET_SEM)
  1158. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK;
  1159. if (reset_mask & AMDGPU_RESET_IH)
  1160. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
  1161. if (reset_mask & AMDGPU_RESET_GRBM)
  1162. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
  1163. if (reset_mask & AMDGPU_RESET_VMC)
  1164. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
  1165. if (!(adev->flags & AMD_IS_APU)) {
  1166. if (reset_mask & AMDGPU_RESET_MC)
  1167. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
  1168. }
  1169. if (grbm_soft_reset) {
  1170. tmp = RREG32(mmGRBM_SOFT_RESET);
  1171. tmp |= grbm_soft_reset;
  1172. dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  1173. WREG32(mmGRBM_SOFT_RESET, tmp);
  1174. tmp = RREG32(mmGRBM_SOFT_RESET);
  1175. udelay(50);
  1176. tmp &= ~grbm_soft_reset;
  1177. WREG32(mmGRBM_SOFT_RESET, tmp);
  1178. tmp = RREG32(mmGRBM_SOFT_RESET);
  1179. }
  1180. if (srbm_soft_reset) {
  1181. tmp = RREG32(mmSRBM_SOFT_RESET);
  1182. tmp |= srbm_soft_reset;
  1183. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  1184. WREG32(mmSRBM_SOFT_RESET, tmp);
  1185. tmp = RREG32(mmSRBM_SOFT_RESET);
  1186. udelay(50);
  1187. tmp &= ~srbm_soft_reset;
  1188. WREG32(mmSRBM_SOFT_RESET, tmp);
  1189. tmp = RREG32(mmSRBM_SOFT_RESET);
  1190. }
  1191. /* Wait a little for things to settle down */
  1192. udelay(50);
  1193. gmc_v7_0_mc_resume(adev, &save);
  1194. udelay(50);
  1195. cik_print_gpu_status_regs(adev);
  1196. }
  1197. struct kv_reset_save_regs {
  1198. u32 gmcon_reng_execute;
  1199. u32 gmcon_misc;
  1200. u32 gmcon_misc3;
  1201. };
  1202. static void kv_save_regs_for_reset(struct amdgpu_device *adev,
  1203. struct kv_reset_save_regs *save)
  1204. {
  1205. save->gmcon_reng_execute = RREG32(mmGMCON_RENG_EXECUTE);
  1206. save->gmcon_misc = RREG32(mmGMCON_MISC);
  1207. save->gmcon_misc3 = RREG32(mmGMCON_MISC3);
  1208. WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute &
  1209. ~GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK);
  1210. WREG32(mmGMCON_MISC, save->gmcon_misc &
  1211. ~(GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK |
  1212. GMCON_MISC__STCTRL_STUTTER_EN_MASK));
  1213. }
  1214. static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
  1215. struct kv_reset_save_regs *save)
  1216. {
  1217. int i;
  1218. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1219. WREG32(mmGMCON_PGFSM_CONFIG, 0x200010ff);
  1220. for (i = 0; i < 5; i++)
  1221. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1222. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1223. WREG32(mmGMCON_PGFSM_CONFIG, 0x300010ff);
  1224. for (i = 0; i < 5; i++)
  1225. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1226. WREG32(mmGMCON_PGFSM_WRITE, 0x210000);
  1227. WREG32(mmGMCON_PGFSM_CONFIG, 0xa00010ff);
  1228. for (i = 0; i < 5; i++)
  1229. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1230. WREG32(mmGMCON_PGFSM_WRITE, 0x21003);
  1231. WREG32(mmGMCON_PGFSM_CONFIG, 0xb00010ff);
  1232. for (i = 0; i < 5; i++)
  1233. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1234. WREG32(mmGMCON_PGFSM_WRITE, 0x2b00);
  1235. WREG32(mmGMCON_PGFSM_CONFIG, 0xc00010ff);
  1236. for (i = 0; i < 5; i++)
  1237. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1238. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1239. WREG32(mmGMCON_PGFSM_CONFIG, 0xd00010ff);
  1240. for (i = 0; i < 5; i++)
  1241. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1242. WREG32(mmGMCON_PGFSM_WRITE, 0x420000);
  1243. WREG32(mmGMCON_PGFSM_CONFIG, 0x100010ff);
  1244. for (i = 0; i < 5; i++)
  1245. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1246. WREG32(mmGMCON_PGFSM_WRITE, 0x120202);
  1247. WREG32(mmGMCON_PGFSM_CONFIG, 0x500010ff);
  1248. for (i = 0; i < 5; i++)
  1249. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1250. WREG32(mmGMCON_PGFSM_WRITE, 0x3e3e36);
  1251. WREG32(mmGMCON_PGFSM_CONFIG, 0x600010ff);
  1252. for (i = 0; i < 5; i++)
  1253. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1254. WREG32(mmGMCON_PGFSM_WRITE, 0x373f3e);
  1255. WREG32(mmGMCON_PGFSM_CONFIG, 0x700010ff);
  1256. for (i = 0; i < 5; i++)
  1257. WREG32(mmGMCON_PGFSM_WRITE, 0);
  1258. WREG32(mmGMCON_PGFSM_WRITE, 0x3e1332);
  1259. WREG32(mmGMCON_PGFSM_CONFIG, 0xe00010ff);
  1260. WREG32(mmGMCON_MISC3, save->gmcon_misc3);
  1261. WREG32(mmGMCON_MISC, save->gmcon_misc);
  1262. WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
  1263. }
  1264. static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
  1265. {
  1266. struct amdgpu_mode_mc_save save;
  1267. struct kv_reset_save_regs kv_save = { 0 };
  1268. u32 tmp, i;
  1269. dev_info(adev->dev, "GPU pci config reset\n");
  1270. /* disable dpm? */
  1271. /* disable cg/pg */
  1272. /* Disable GFX parsing/prefetching */
  1273. WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
  1274. CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
  1275. /* Disable MEC parsing/prefetching */
  1276. WREG32(mmCP_MEC_CNTL,
  1277. CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
  1278. /* sdma0 */
  1279. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
  1280. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1281. WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  1282. /* sdma1 */
  1283. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
  1284. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  1285. WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  1286. /* XXX other engines? */
  1287. /* halt the rlc, disable cp internal ints */
  1288. gfx_v7_0_rlc_stop(adev);
  1289. udelay(50);
  1290. /* disable mem access */
  1291. gmc_v7_0_mc_stop(adev, &save);
  1292. if (amdgpu_asic_wait_for_mc_idle(adev)) {
  1293. dev_warn(adev->dev, "Wait for MC idle timed out !\n");
  1294. }
  1295. if (adev->flags & AMD_IS_APU)
  1296. kv_save_regs_for_reset(adev, &kv_save);
  1297. /* disable BM */
  1298. pci_clear_master(adev->pdev);
  1299. /* reset */
  1300. amdgpu_pci_config_reset(adev);
  1301. udelay(100);
  1302. /* wait for asic to come out of reset */
  1303. for (i = 0; i < adev->usec_timeout; i++) {
  1304. if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
  1305. break;
  1306. udelay(1);
  1307. }
  1308. /* does asic init need to be run first??? */
  1309. if (adev->flags & AMD_IS_APU)
  1310. kv_restore_regs_for_reset(adev, &kv_save);
  1311. }
  1312. static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
  1313. {
  1314. u32 tmp = RREG32(mmBIOS_SCRATCH_3);
  1315. if (hung)
  1316. tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
  1317. else
  1318. tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
  1319. WREG32(mmBIOS_SCRATCH_3, tmp);
  1320. }
  1321. /**
  1322. * cik_asic_reset - soft reset GPU
  1323. *
  1324. * @adev: amdgpu_device pointer
  1325. *
  1326. * Look up which blocks are hung and attempt
  1327. * to reset them.
  1328. * Returns 0 for success.
  1329. */
  1330. static int cik_asic_reset(struct amdgpu_device *adev)
  1331. {
  1332. u32 reset_mask;
  1333. reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
  1334. if (reset_mask)
  1335. cik_set_bios_scratch_engine_hung(adev, true);
  1336. /* try soft reset */
  1337. cik_gpu_soft_reset(adev, reset_mask);
  1338. reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
  1339. /* try pci config reset */
  1340. if (reset_mask && amdgpu_hard_reset)
  1341. cik_gpu_pci_config_reset(adev);
  1342. reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
  1343. if (!reset_mask)
  1344. cik_set_bios_scratch_engine_hung(adev, false);
  1345. return 0;
  1346. }
  1347. static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
  1348. u32 cntl_reg, u32 status_reg)
  1349. {
  1350. int r, i;
  1351. struct atom_clock_dividers dividers;
  1352. uint32_t tmp;
  1353. r = amdgpu_atombios_get_clock_dividers(adev,
  1354. COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  1355. clock, false, &dividers);
  1356. if (r)
  1357. return r;
  1358. tmp = RREG32_SMC(cntl_reg);
  1359. tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
  1360. CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
  1361. tmp |= dividers.post_divider;
  1362. WREG32_SMC(cntl_reg, tmp);
  1363. for (i = 0; i < 100; i++) {
  1364. if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
  1365. break;
  1366. mdelay(10);
  1367. }
  1368. if (i == 100)
  1369. return -ETIMEDOUT;
  1370. return 0;
  1371. }
  1372. static int cik_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  1373. {
  1374. int r = 0;
  1375. r = cik_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
  1376. if (r)
  1377. return r;
  1378. r = cik_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
  1379. return r;
  1380. }
  1381. static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  1382. {
  1383. int r, i;
  1384. struct atom_clock_dividers dividers;
  1385. u32 tmp;
  1386. r = amdgpu_atombios_get_clock_dividers(adev,
  1387. COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  1388. ecclk, false, &dividers);
  1389. if (r)
  1390. return r;
  1391. for (i = 0; i < 100; i++) {
  1392. if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
  1393. break;
  1394. mdelay(10);
  1395. }
  1396. if (i == 100)
  1397. return -ETIMEDOUT;
  1398. tmp = RREG32_SMC(ixCG_ECLK_CNTL);
  1399. tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
  1400. CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
  1401. tmp |= dividers.post_divider;
  1402. WREG32_SMC(ixCG_ECLK_CNTL, tmp);
  1403. for (i = 0; i < 100; i++) {
  1404. if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
  1405. break;
  1406. mdelay(10);
  1407. }
  1408. if (i == 100)
  1409. return -ETIMEDOUT;
  1410. return 0;
  1411. }
  1412. static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
  1413. {
  1414. struct pci_dev *root = adev->pdev->bus->self;
  1415. int bridge_pos, gpu_pos;
  1416. u32 speed_cntl, current_data_rate;
  1417. int i;
  1418. u16 tmp16;
  1419. if (pci_is_root_bus(adev->pdev->bus))
  1420. return;
  1421. if (amdgpu_pcie_gen2 == 0)
  1422. return;
  1423. if (adev->flags & AMD_IS_APU)
  1424. return;
  1425. if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  1426. CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
  1427. return;
  1428. speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
  1429. current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
  1430. PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
  1431. if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
  1432. if (current_data_rate == 2) {
  1433. DRM_INFO("PCIE gen 3 link speeds already enabled\n");
  1434. return;
  1435. }
  1436. DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
  1437. } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
  1438. if (current_data_rate == 1) {
  1439. DRM_INFO("PCIE gen 2 link speeds already enabled\n");
  1440. return;
  1441. }
  1442. DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
  1443. }
  1444. bridge_pos = pci_pcie_cap(root);
  1445. if (!bridge_pos)
  1446. return;
  1447. gpu_pos = pci_pcie_cap(adev->pdev);
  1448. if (!gpu_pos)
  1449. return;
  1450. if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
  1451. /* re-try equalization if gen3 is not already enabled */
  1452. if (current_data_rate != 2) {
  1453. u16 bridge_cfg, gpu_cfg;
  1454. u16 bridge_cfg2, gpu_cfg2;
  1455. u32 max_lw, current_lw, tmp;
  1456. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  1457. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  1458. tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
  1459. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  1460. tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
  1461. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  1462. tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
  1463. max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
  1464. PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
  1465. current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK)
  1466. >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
  1467. if (current_lw < max_lw) {
  1468. tmp = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
  1469. if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
  1470. tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK |
  1471. PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
  1472. tmp |= (max_lw <<
  1473. PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
  1474. tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK |
  1475. PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK |
  1476. PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
  1477. WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
  1478. }
  1479. }
  1480. for (i = 0; i < 10; i++) {
  1481. /* check status */
  1482. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
  1483. if (tmp16 & PCI_EXP_DEVSTA_TRPND)
  1484. break;
  1485. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  1486. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  1487. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
  1488. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
  1489. tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
  1490. tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
  1491. WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
  1492. tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
  1493. tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
  1494. WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
  1495. mdelay(100);
  1496. /* linkctl */
  1497. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
  1498. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  1499. tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
  1500. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  1501. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
  1502. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  1503. tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
  1504. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  1505. /* linkctl2 */
  1506. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
  1507. tmp16 &= ~((1 << 4) | (7 << 9));
  1508. tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
  1509. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
  1510. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  1511. tmp16 &= ~((1 << 4) | (7 << 9));
  1512. tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
  1513. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  1514. tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
  1515. tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
  1516. WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
  1517. }
  1518. }
  1519. }
  1520. /* set the link speed */
  1521. speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK |
  1522. PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
  1523. speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
  1524. WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
  1525. pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  1526. tmp16 &= ~0xf;
  1527. if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
  1528. tmp16 |= 3; /* gen3 */
  1529. else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
  1530. tmp16 |= 2; /* gen2 */
  1531. else
  1532. tmp16 |= 1; /* gen1 */
  1533. pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  1534. speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
  1535. speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
  1536. WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
  1537. for (i = 0; i < adev->usec_timeout; i++) {
  1538. speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
  1539. if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
  1540. break;
  1541. udelay(1);
  1542. }
  1543. }
  1544. static void cik_program_aspm(struct amdgpu_device *adev)
  1545. {
  1546. u32 data, orig;
  1547. bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
  1548. bool disable_clkreq = false;
  1549. if (amdgpu_aspm == 0)
  1550. return;
  1551. /* XXX double check APUs */
  1552. if (adev->flags & AMD_IS_APU)
  1553. return;
  1554. orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
  1555. data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
  1556. data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) |
  1557. PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
  1558. if (orig != data)
  1559. WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
  1560. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
  1561. data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
  1562. if (orig != data)
  1563. WREG32_PCIE(ixPCIE_LC_CNTL3, data);
  1564. orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
  1565. data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
  1566. if (orig != data)
  1567. WREG32_PCIE(ixPCIE_P_CNTL, data);
  1568. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
  1569. data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK |
  1570. PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
  1571. data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
  1572. if (!disable_l0s)
  1573. data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
  1574. if (!disable_l1) {
  1575. data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
  1576. data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
  1577. if (orig != data)
  1578. WREG32_PCIE(ixPCIE_LC_CNTL, data);
  1579. if (!disable_plloff_in_l1) {
  1580. bool clk_req_support;
  1581. orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_0);
  1582. data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
  1583. PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
  1584. data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
  1585. (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
  1586. if (orig != data)
  1587. WREG32_PCIE(ixPB0_PIF_PWRDOWN_0, data);
  1588. orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_1);
  1589. data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
  1590. PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
  1591. data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
  1592. (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
  1593. if (orig != data)
  1594. WREG32_PCIE(ixPB0_PIF_PWRDOWN_1, data);
  1595. orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_0);
  1596. data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
  1597. PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
  1598. data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
  1599. (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
  1600. if (orig != data)
  1601. WREG32_PCIE(ixPB1_PIF_PWRDOWN_0, data);
  1602. orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_1);
  1603. data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
  1604. PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
  1605. data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
  1606. (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
  1607. if (orig != data)
  1608. WREG32_PCIE(ixPB1_PIF_PWRDOWN_1, data);
  1609. orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
  1610. data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
  1611. data |= ~(3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
  1612. if (orig != data)
  1613. WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
  1614. if (!disable_clkreq) {
  1615. struct pci_dev *root = adev->pdev->bus->self;
  1616. u32 lnkcap;
  1617. clk_req_support = false;
  1618. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  1619. if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
  1620. clk_req_support = true;
  1621. } else {
  1622. clk_req_support = false;
  1623. }
  1624. if (clk_req_support) {
  1625. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
  1626. data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
  1627. PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
  1628. if (orig != data)
  1629. WREG32_PCIE(ixPCIE_LC_CNTL2, data);
  1630. orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
  1631. data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK |
  1632. THM_CLK_CNTL__TMON_CLK_SEL_MASK);
  1633. data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
  1634. (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
  1635. if (orig != data)
  1636. WREG32_SMC(ixTHM_CLK_CNTL, data);
  1637. orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
  1638. data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
  1639. MISC_CLK_CTRL__ZCLK_SEL_MASK);
  1640. data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
  1641. (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
  1642. if (orig != data)
  1643. WREG32_SMC(ixMISC_CLK_CTRL, data);
  1644. orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
  1645. data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
  1646. if (orig != data)
  1647. WREG32_SMC(ixCG_CLKPIN_CNTL, data);
  1648. orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
  1649. data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
  1650. if (orig != data)
  1651. WREG32_SMC(ixCG_CLKPIN_CNTL_2, data);
  1652. orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
  1653. data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
  1654. data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
  1655. if (orig != data)
  1656. WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
  1657. }
  1658. }
  1659. } else {
  1660. if (orig != data)
  1661. WREG32_PCIE(ixPCIE_LC_CNTL, data);
  1662. }
  1663. orig = data = RREG32_PCIE(ixPCIE_CNTL2);
  1664. data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
  1665. PCIE_CNTL2__MST_MEM_LS_EN_MASK |
  1666. PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
  1667. if (orig != data)
  1668. WREG32_PCIE(ixPCIE_CNTL2, data);
  1669. if (!disable_l0s) {
  1670. data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
  1671. if ((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) ==
  1672. PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
  1673. data = RREG32_PCIE(ixPCIE_LC_STATUS1);
  1674. if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) &&
  1675. (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
  1676. orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
  1677. data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
  1678. if (orig != data)
  1679. WREG32_PCIE(ixPCIE_LC_CNTL, data);
  1680. }
  1681. }
  1682. }
  1683. }
  1684. static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
  1685. {
  1686. return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
  1687. >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
  1688. }
  1689. static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
  1690. {
  1691. /* ORDER MATTERS! */
  1692. {
  1693. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1694. .major = 1,
  1695. .minor = 0,
  1696. .rev = 0,
  1697. .funcs = &cik_common_ip_funcs,
  1698. },
  1699. {
  1700. .type = AMD_IP_BLOCK_TYPE_GMC,
  1701. .major = 7,
  1702. .minor = 0,
  1703. .rev = 0,
  1704. .funcs = &gmc_v7_0_ip_funcs,
  1705. },
  1706. {
  1707. .type = AMD_IP_BLOCK_TYPE_IH,
  1708. .major = 2,
  1709. .minor = 0,
  1710. .rev = 0,
  1711. .funcs = &cik_ih_ip_funcs,
  1712. },
  1713. {
  1714. .type = AMD_IP_BLOCK_TYPE_SMC,
  1715. .major = 7,
  1716. .minor = 0,
  1717. .rev = 0,
  1718. .funcs = &amdgpu_pp_ip_funcs,
  1719. },
  1720. {
  1721. .type = AMD_IP_BLOCK_TYPE_DCE,
  1722. .major = 8,
  1723. .minor = 2,
  1724. .rev = 0,
  1725. .funcs = &dce_v8_0_ip_funcs,
  1726. },
  1727. {
  1728. .type = AMD_IP_BLOCK_TYPE_GFX,
  1729. .major = 7,
  1730. .minor = 2,
  1731. .rev = 0,
  1732. .funcs = &gfx_v7_0_ip_funcs,
  1733. },
  1734. {
  1735. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1736. .major = 2,
  1737. .minor = 0,
  1738. .rev = 0,
  1739. .funcs = &cik_sdma_ip_funcs,
  1740. },
  1741. {
  1742. .type = AMD_IP_BLOCK_TYPE_UVD,
  1743. .major = 4,
  1744. .minor = 2,
  1745. .rev = 0,
  1746. .funcs = &uvd_v4_2_ip_funcs,
  1747. },
  1748. {
  1749. .type = AMD_IP_BLOCK_TYPE_VCE,
  1750. .major = 2,
  1751. .minor = 0,
  1752. .rev = 0,
  1753. .funcs = &vce_v2_0_ip_funcs,
  1754. },
  1755. };
  1756. static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
  1757. {
  1758. /* ORDER MATTERS! */
  1759. {
  1760. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1761. .major = 1,
  1762. .minor = 0,
  1763. .rev = 0,
  1764. .funcs = &cik_common_ip_funcs,
  1765. },
  1766. {
  1767. .type = AMD_IP_BLOCK_TYPE_GMC,
  1768. .major = 7,
  1769. .minor = 0,
  1770. .rev = 0,
  1771. .funcs = &gmc_v7_0_ip_funcs,
  1772. },
  1773. {
  1774. .type = AMD_IP_BLOCK_TYPE_IH,
  1775. .major = 2,
  1776. .minor = 0,
  1777. .rev = 0,
  1778. .funcs = &cik_ih_ip_funcs,
  1779. },
  1780. {
  1781. .type = AMD_IP_BLOCK_TYPE_SMC,
  1782. .major = 7,
  1783. .minor = 0,
  1784. .rev = 0,
  1785. .funcs = &amdgpu_pp_ip_funcs,
  1786. },
  1787. {
  1788. .type = AMD_IP_BLOCK_TYPE_DCE,
  1789. .major = 8,
  1790. .minor = 5,
  1791. .rev = 0,
  1792. .funcs = &dce_v8_0_ip_funcs,
  1793. },
  1794. {
  1795. .type = AMD_IP_BLOCK_TYPE_GFX,
  1796. .major = 7,
  1797. .minor = 3,
  1798. .rev = 0,
  1799. .funcs = &gfx_v7_0_ip_funcs,
  1800. },
  1801. {
  1802. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1803. .major = 2,
  1804. .minor = 0,
  1805. .rev = 0,
  1806. .funcs = &cik_sdma_ip_funcs,
  1807. },
  1808. {
  1809. .type = AMD_IP_BLOCK_TYPE_UVD,
  1810. .major = 4,
  1811. .minor = 2,
  1812. .rev = 0,
  1813. .funcs = &uvd_v4_2_ip_funcs,
  1814. },
  1815. {
  1816. .type = AMD_IP_BLOCK_TYPE_VCE,
  1817. .major = 2,
  1818. .minor = 0,
  1819. .rev = 0,
  1820. .funcs = &vce_v2_0_ip_funcs,
  1821. },
  1822. };
  1823. static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
  1824. {
  1825. /* ORDER MATTERS! */
  1826. {
  1827. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1828. .major = 1,
  1829. .minor = 0,
  1830. .rev = 0,
  1831. .funcs = &cik_common_ip_funcs,
  1832. },
  1833. {
  1834. .type = AMD_IP_BLOCK_TYPE_GMC,
  1835. .major = 7,
  1836. .minor = 0,
  1837. .rev = 0,
  1838. .funcs = &gmc_v7_0_ip_funcs,
  1839. },
  1840. {
  1841. .type = AMD_IP_BLOCK_TYPE_IH,
  1842. .major = 2,
  1843. .minor = 0,
  1844. .rev = 0,
  1845. .funcs = &cik_ih_ip_funcs,
  1846. },
  1847. {
  1848. .type = AMD_IP_BLOCK_TYPE_SMC,
  1849. .major = 7,
  1850. .minor = 0,
  1851. .rev = 0,
  1852. .funcs = &amdgpu_pp_ip_funcs,
  1853. },
  1854. {
  1855. .type = AMD_IP_BLOCK_TYPE_DCE,
  1856. .major = 8,
  1857. .minor = 3,
  1858. .rev = 0,
  1859. .funcs = &dce_v8_0_ip_funcs,
  1860. },
  1861. {
  1862. .type = AMD_IP_BLOCK_TYPE_GFX,
  1863. .major = 7,
  1864. .minor = 2,
  1865. .rev = 0,
  1866. .funcs = &gfx_v7_0_ip_funcs,
  1867. },
  1868. {
  1869. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1870. .major = 2,
  1871. .minor = 0,
  1872. .rev = 0,
  1873. .funcs = &cik_sdma_ip_funcs,
  1874. },
  1875. {
  1876. .type = AMD_IP_BLOCK_TYPE_UVD,
  1877. .major = 4,
  1878. .minor = 2,
  1879. .rev = 0,
  1880. .funcs = &uvd_v4_2_ip_funcs,
  1881. },
  1882. {
  1883. .type = AMD_IP_BLOCK_TYPE_VCE,
  1884. .major = 2,
  1885. .minor = 0,
  1886. .rev = 0,
  1887. .funcs = &vce_v2_0_ip_funcs,
  1888. },
  1889. };
  1890. static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
  1891. {
  1892. /* ORDER MATTERS! */
  1893. {
  1894. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1895. .major = 1,
  1896. .minor = 0,
  1897. .rev = 0,
  1898. .funcs = &cik_common_ip_funcs,
  1899. },
  1900. {
  1901. .type = AMD_IP_BLOCK_TYPE_GMC,
  1902. .major = 7,
  1903. .minor = 0,
  1904. .rev = 0,
  1905. .funcs = &gmc_v7_0_ip_funcs,
  1906. },
  1907. {
  1908. .type = AMD_IP_BLOCK_TYPE_IH,
  1909. .major = 2,
  1910. .minor = 0,
  1911. .rev = 0,
  1912. .funcs = &cik_ih_ip_funcs,
  1913. },
  1914. {
  1915. .type = AMD_IP_BLOCK_TYPE_SMC,
  1916. .major = 7,
  1917. .minor = 0,
  1918. .rev = 0,
  1919. .funcs = &amdgpu_pp_ip_funcs,
  1920. },
  1921. {
  1922. .type = AMD_IP_BLOCK_TYPE_DCE,
  1923. .major = 8,
  1924. .minor = 3,
  1925. .rev = 0,
  1926. .funcs = &dce_v8_0_ip_funcs,
  1927. },
  1928. {
  1929. .type = AMD_IP_BLOCK_TYPE_GFX,
  1930. .major = 7,
  1931. .minor = 2,
  1932. .rev = 0,
  1933. .funcs = &gfx_v7_0_ip_funcs,
  1934. },
  1935. {
  1936. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1937. .major = 2,
  1938. .minor = 0,
  1939. .rev = 0,
  1940. .funcs = &cik_sdma_ip_funcs,
  1941. },
  1942. {
  1943. .type = AMD_IP_BLOCK_TYPE_UVD,
  1944. .major = 4,
  1945. .minor = 2,
  1946. .rev = 0,
  1947. .funcs = &uvd_v4_2_ip_funcs,
  1948. },
  1949. {
  1950. .type = AMD_IP_BLOCK_TYPE_VCE,
  1951. .major = 2,
  1952. .minor = 0,
  1953. .rev = 0,
  1954. .funcs = &vce_v2_0_ip_funcs,
  1955. },
  1956. };
  1957. static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
  1958. {
  1959. /* ORDER MATTERS! */
  1960. {
  1961. .type = AMD_IP_BLOCK_TYPE_COMMON,
  1962. .major = 1,
  1963. .minor = 0,
  1964. .rev = 0,
  1965. .funcs = &cik_common_ip_funcs,
  1966. },
  1967. {
  1968. .type = AMD_IP_BLOCK_TYPE_GMC,
  1969. .major = 7,
  1970. .minor = 0,
  1971. .rev = 0,
  1972. .funcs = &gmc_v7_0_ip_funcs,
  1973. },
  1974. {
  1975. .type = AMD_IP_BLOCK_TYPE_IH,
  1976. .major = 2,
  1977. .minor = 0,
  1978. .rev = 0,
  1979. .funcs = &cik_ih_ip_funcs,
  1980. },
  1981. {
  1982. .type = AMD_IP_BLOCK_TYPE_SMC,
  1983. .major = 7,
  1984. .minor = 0,
  1985. .rev = 0,
  1986. .funcs = &amdgpu_pp_ip_funcs,
  1987. },
  1988. {
  1989. .type = AMD_IP_BLOCK_TYPE_DCE,
  1990. .major = 8,
  1991. .minor = 1,
  1992. .rev = 0,
  1993. .funcs = &dce_v8_0_ip_funcs,
  1994. },
  1995. {
  1996. .type = AMD_IP_BLOCK_TYPE_GFX,
  1997. .major = 7,
  1998. .minor = 1,
  1999. .rev = 0,
  2000. .funcs = &gfx_v7_0_ip_funcs,
  2001. },
  2002. {
  2003. .type = AMD_IP_BLOCK_TYPE_SDMA,
  2004. .major = 2,
  2005. .minor = 0,
  2006. .rev = 0,
  2007. .funcs = &cik_sdma_ip_funcs,
  2008. },
  2009. {
  2010. .type = AMD_IP_BLOCK_TYPE_UVD,
  2011. .major = 4,
  2012. .minor = 2,
  2013. .rev = 0,
  2014. .funcs = &uvd_v4_2_ip_funcs,
  2015. },
  2016. {
  2017. .type = AMD_IP_BLOCK_TYPE_VCE,
  2018. .major = 2,
  2019. .minor = 0,
  2020. .rev = 0,
  2021. .funcs = &vce_v2_0_ip_funcs,
  2022. },
  2023. };
  2024. int cik_set_ip_blocks(struct amdgpu_device *adev)
  2025. {
  2026. switch (adev->asic_type) {
  2027. case CHIP_BONAIRE:
  2028. adev->ip_blocks = bonaire_ip_blocks;
  2029. adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
  2030. break;
  2031. case CHIP_HAWAII:
  2032. adev->ip_blocks = hawaii_ip_blocks;
  2033. adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
  2034. break;
  2035. case CHIP_KAVERI:
  2036. adev->ip_blocks = kaveri_ip_blocks;
  2037. adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
  2038. break;
  2039. case CHIP_KABINI:
  2040. adev->ip_blocks = kabini_ip_blocks;
  2041. adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
  2042. break;
  2043. case CHIP_MULLINS:
  2044. adev->ip_blocks = mullins_ip_blocks;
  2045. adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
  2046. break;
  2047. default:
  2048. /* FIXME: not supported yet */
  2049. return -EINVAL;
  2050. }
  2051. return 0;
  2052. }
  2053. static const struct amdgpu_asic_funcs cik_asic_funcs =
  2054. {
  2055. .read_disabled_bios = &cik_read_disabled_bios,
  2056. .read_bios_from_rom = &cik_read_bios_from_rom,
  2057. .read_register = &cik_read_register,
  2058. .reset = &cik_asic_reset,
  2059. .set_vga_state = &cik_vga_set_state,
  2060. .get_xclk = &cik_get_xclk,
  2061. .set_uvd_clocks = &cik_set_uvd_clocks,
  2062. .set_vce_clocks = &cik_set_vce_clocks,
  2063. .get_cu_info = &gfx_v7_0_get_cu_info,
  2064. /* these should be moved to their own ip modules */
  2065. .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
  2066. .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
  2067. };
  2068. static int cik_common_early_init(void *handle)
  2069. {
  2070. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2071. adev->smc_rreg = &cik_smc_rreg;
  2072. adev->smc_wreg = &cik_smc_wreg;
  2073. adev->pcie_rreg = &cik_pcie_rreg;
  2074. adev->pcie_wreg = &cik_pcie_wreg;
  2075. adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg;
  2076. adev->uvd_ctx_wreg = &cik_uvd_ctx_wreg;
  2077. adev->didt_rreg = &cik_didt_rreg;
  2078. adev->didt_wreg = &cik_didt_wreg;
  2079. adev->asic_funcs = &cik_asic_funcs;
  2080. adev->has_uvd = true;
  2081. adev->rev_id = cik_get_rev_id(adev);
  2082. adev->external_rev_id = 0xFF;
  2083. switch (adev->asic_type) {
  2084. case CHIP_BONAIRE:
  2085. adev->cg_flags =
  2086. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2087. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2088. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2089. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2090. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2091. AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
  2092. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2093. AMDGPU_CG_SUPPORT_MC_LS |
  2094. AMDGPU_CG_SUPPORT_MC_MGCG |
  2095. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2096. AMDGPU_CG_SUPPORT_SDMA_LS |
  2097. AMDGPU_CG_SUPPORT_BIF_LS |
  2098. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2099. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2100. AMDGPU_CG_SUPPORT_HDP_LS |
  2101. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2102. adev->pg_flags = 0;
  2103. adev->external_rev_id = adev->rev_id + 0x14;
  2104. break;
  2105. case CHIP_HAWAII:
  2106. adev->cg_flags =
  2107. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2108. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2109. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2110. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2111. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2112. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2113. AMDGPU_CG_SUPPORT_MC_LS |
  2114. AMDGPU_CG_SUPPORT_MC_MGCG |
  2115. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2116. AMDGPU_CG_SUPPORT_SDMA_LS |
  2117. AMDGPU_CG_SUPPORT_BIF_LS |
  2118. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2119. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2120. AMDGPU_CG_SUPPORT_HDP_LS |
  2121. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2122. adev->pg_flags = 0;
  2123. adev->external_rev_id = 0x28;
  2124. break;
  2125. case CHIP_KAVERI:
  2126. adev->cg_flags =
  2127. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2128. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2129. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2130. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2131. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2132. AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
  2133. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2134. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2135. AMDGPU_CG_SUPPORT_SDMA_LS |
  2136. AMDGPU_CG_SUPPORT_BIF_LS |
  2137. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2138. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2139. AMDGPU_CG_SUPPORT_HDP_LS |
  2140. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2141. adev->pg_flags =
  2142. /*AMDGPU_PG_SUPPORT_GFX_PG |
  2143. AMDGPU_PG_SUPPORT_GFX_SMG |
  2144. AMDGPU_PG_SUPPORT_GFX_DMG |*/
  2145. AMDGPU_PG_SUPPORT_UVD |
  2146. /*AMDGPU_PG_SUPPORT_VCE |
  2147. AMDGPU_PG_SUPPORT_CP |
  2148. AMDGPU_PG_SUPPORT_GDS |
  2149. AMDGPU_PG_SUPPORT_RLC_SMU_HS |
  2150. AMDGPU_PG_SUPPORT_ACP |
  2151. AMDGPU_PG_SUPPORT_SAMU |*/
  2152. 0;
  2153. if (adev->pdev->device == 0x1312 ||
  2154. adev->pdev->device == 0x1316 ||
  2155. adev->pdev->device == 0x1317)
  2156. adev->external_rev_id = 0x41;
  2157. else
  2158. adev->external_rev_id = 0x1;
  2159. break;
  2160. case CHIP_KABINI:
  2161. case CHIP_MULLINS:
  2162. adev->cg_flags =
  2163. AMDGPU_CG_SUPPORT_GFX_MGCG |
  2164. AMDGPU_CG_SUPPORT_GFX_MGLS |
  2165. /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
  2166. AMDGPU_CG_SUPPORT_GFX_CGLS |
  2167. AMDGPU_CG_SUPPORT_GFX_CGTS |
  2168. AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
  2169. AMDGPU_CG_SUPPORT_GFX_CP_LS |
  2170. AMDGPU_CG_SUPPORT_SDMA_MGCG |
  2171. AMDGPU_CG_SUPPORT_SDMA_LS |
  2172. AMDGPU_CG_SUPPORT_BIF_LS |
  2173. AMDGPU_CG_SUPPORT_VCE_MGCG |
  2174. AMDGPU_CG_SUPPORT_UVD_MGCG |
  2175. AMDGPU_CG_SUPPORT_HDP_LS |
  2176. AMDGPU_CG_SUPPORT_HDP_MGCG;
  2177. adev->pg_flags =
  2178. /*AMDGPU_PG_SUPPORT_GFX_PG |
  2179. AMDGPU_PG_SUPPORT_GFX_SMG | */
  2180. AMDGPU_PG_SUPPORT_UVD |
  2181. /*AMDGPU_PG_SUPPORT_VCE |
  2182. AMDGPU_PG_SUPPORT_CP |
  2183. AMDGPU_PG_SUPPORT_GDS |
  2184. AMDGPU_PG_SUPPORT_RLC_SMU_HS |
  2185. AMDGPU_PG_SUPPORT_SAMU |*/
  2186. 0;
  2187. if (adev->asic_type == CHIP_KABINI) {
  2188. if (adev->rev_id == 0)
  2189. adev->external_rev_id = 0x81;
  2190. else if (adev->rev_id == 1)
  2191. adev->external_rev_id = 0x82;
  2192. else if (adev->rev_id == 2)
  2193. adev->external_rev_id = 0x85;
  2194. } else
  2195. adev->external_rev_id = adev->rev_id + 0xa1;
  2196. break;
  2197. default:
  2198. /* FIXME: not supported yet */
  2199. return -EINVAL;
  2200. }
  2201. amdgpu_get_pcie_info(adev);
  2202. return 0;
  2203. }
  2204. static int cik_common_sw_init(void *handle)
  2205. {
  2206. return 0;
  2207. }
  2208. static int cik_common_sw_fini(void *handle)
  2209. {
  2210. return 0;
  2211. }
  2212. static int cik_common_hw_init(void *handle)
  2213. {
  2214. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2215. /* move the golden regs per IP block */
  2216. cik_init_golden_registers(adev);
  2217. /* enable pcie gen2/3 link */
  2218. cik_pcie_gen3_enable(adev);
  2219. /* enable aspm */
  2220. cik_program_aspm(adev);
  2221. return 0;
  2222. }
  2223. static int cik_common_hw_fini(void *handle)
  2224. {
  2225. return 0;
  2226. }
  2227. static int cik_common_suspend(void *handle)
  2228. {
  2229. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2230. amdgpu_amdkfd_suspend(adev);
  2231. return cik_common_hw_fini(adev);
  2232. }
  2233. static int cik_common_resume(void *handle)
  2234. {
  2235. int r;
  2236. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2237. r = cik_common_hw_init(adev);
  2238. if (r)
  2239. return r;
  2240. return amdgpu_amdkfd_resume(adev);
  2241. }
  2242. static bool cik_common_is_idle(void *handle)
  2243. {
  2244. return true;
  2245. }
  2246. static int cik_common_wait_for_idle(void *handle)
  2247. {
  2248. return 0;
  2249. }
  2250. static void cik_common_print_status(void *handle)
  2251. {
  2252. }
  2253. static int cik_common_soft_reset(void *handle)
  2254. {
  2255. /* XXX hard reset?? */
  2256. return 0;
  2257. }
  2258. static int cik_common_set_clockgating_state(void *handle,
  2259. enum amd_clockgating_state state)
  2260. {
  2261. return 0;
  2262. }
  2263. static int cik_common_set_powergating_state(void *handle,
  2264. enum amd_powergating_state state)
  2265. {
  2266. return 0;
  2267. }
  2268. const struct amd_ip_funcs cik_common_ip_funcs = {
  2269. .early_init = cik_common_early_init,
  2270. .late_init = NULL,
  2271. .sw_init = cik_common_sw_init,
  2272. .sw_fini = cik_common_sw_fini,
  2273. .hw_init = cik_common_hw_init,
  2274. .hw_fini = cik_common_hw_fini,
  2275. .suspend = cik_common_suspend,
  2276. .resume = cik_common_resume,
  2277. .is_idle = cik_common_is_idle,
  2278. .wait_for_idle = cik_common_wait_for_idle,
  2279. .soft_reset = cik_common_soft_reset,
  2280. .print_status = cik_common_print_status,
  2281. .set_clockgating_state = cik_common_set_clockgating_state,
  2282. .set_powergating_state = cik_common_set_powergating_state,
  2283. };