amdgpu_device.c 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/kthread.h>
  29. #include <linux/console.h>
  30. #include <linux/slab.h>
  31. #include <linux/debugfs.h>
  32. #include <drm/drmP.h>
  33. #include <drm/drm_crtc_helper.h>
  34. #include <drm/amdgpu_drm.h>
  35. #include <linux/vgaarb.h>
  36. #include <linux/vga_switcheroo.h>
  37. #include <linux/efi.h>
  38. #include "amdgpu.h"
  39. #include "amdgpu_trace.h"
  40. #include "amdgpu_i2c.h"
  41. #include "atom.h"
  42. #include "amdgpu_atombios.h"
  43. #include "amdgpu_atomfirmware.h"
  44. #include "amd_pcie.h"
  45. #ifdef CONFIG_DRM_AMDGPU_SI
  46. #include "si.h"
  47. #endif
  48. #ifdef CONFIG_DRM_AMDGPU_CIK
  49. #include "cik.h"
  50. #endif
  51. #include "vi.h"
  52. #include "soc15.h"
  53. #include "bif/bif_4_1_d.h"
  54. #include <linux/pci.h>
  55. #include <linux/firmware.h>
  56. #include "amdgpu_vf_error.h"
  57. #include "amdgpu_amdkfd.h"
  58. MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  59. MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  60. #define AMDGPU_RESUME_MS 2000
  61. static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
  62. static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
  63. static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
  64. static const char *amdgpu_asic_name[] = {
  65. "TAHITI",
  66. "PITCAIRN",
  67. "VERDE",
  68. "OLAND",
  69. "HAINAN",
  70. "BONAIRE",
  71. "KAVERI",
  72. "KABINI",
  73. "HAWAII",
  74. "MULLINS",
  75. "TOPAZ",
  76. "TONGA",
  77. "FIJI",
  78. "CARRIZO",
  79. "STONEY",
  80. "POLARIS10",
  81. "POLARIS11",
  82. "POLARIS12",
  83. "VEGA10",
  84. "RAVEN",
  85. "LAST",
  86. };
  87. bool amdgpu_device_is_px(struct drm_device *dev)
  88. {
  89. struct amdgpu_device *adev = dev->dev_private;
  90. if (adev->flags & AMD_IS_PX)
  91. return true;
  92. return false;
  93. }
  94. /*
  95. * MMIO register access helper functions.
  96. */
  97. uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  98. uint32_t acc_flags)
  99. {
  100. uint32_t ret;
  101. if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
  102. BUG_ON(in_interrupt());
  103. return amdgpu_virt_kiq_rreg(adev, reg);
  104. }
  105. if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
  106. ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
  107. else {
  108. unsigned long flags;
  109. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  110. writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
  111. ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
  112. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  113. }
  114. trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
  115. return ret;
  116. }
  117. void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  118. uint32_t acc_flags)
  119. {
  120. trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
  121. if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
  122. adev->last_mm_index = v;
  123. }
  124. if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
  125. BUG_ON(in_interrupt());
  126. return amdgpu_virt_kiq_wreg(adev, reg, v);
  127. }
  128. if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
  129. writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
  130. else {
  131. unsigned long flags;
  132. spin_lock_irqsave(&adev->mmio_idx_lock, flags);
  133. writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
  134. writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
  135. spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
  136. }
  137. if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
  138. udelay(500);
  139. }
  140. }
  141. u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
  142. {
  143. if ((reg * 4) < adev->rio_mem_size)
  144. return ioread32(adev->rio_mem + (reg * 4));
  145. else {
  146. iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
  147. return ioread32(adev->rio_mem + (mmMM_DATA * 4));
  148. }
  149. }
  150. void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  151. {
  152. if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
  153. adev->last_mm_index = v;
  154. }
  155. if ((reg * 4) < adev->rio_mem_size)
  156. iowrite32(v, adev->rio_mem + (reg * 4));
  157. else {
  158. iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
  159. iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
  160. }
  161. if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
  162. udelay(500);
  163. }
  164. }
  165. /**
  166. * amdgpu_mm_rdoorbell - read a doorbell dword
  167. *
  168. * @adev: amdgpu_device pointer
  169. * @index: doorbell index
  170. *
  171. * Returns the value in the doorbell aperture at the
  172. * requested doorbell index (CIK).
  173. */
  174. u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
  175. {
  176. if (index < adev->doorbell.num_doorbells) {
  177. return readl(adev->doorbell.ptr + index);
  178. } else {
  179. DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
  180. return 0;
  181. }
  182. }
  183. /**
  184. * amdgpu_mm_wdoorbell - write a doorbell dword
  185. *
  186. * @adev: amdgpu_device pointer
  187. * @index: doorbell index
  188. * @v: value to write
  189. *
  190. * Writes @v to the doorbell aperture at the
  191. * requested doorbell index (CIK).
  192. */
  193. void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
  194. {
  195. if (index < adev->doorbell.num_doorbells) {
  196. writel(v, adev->doorbell.ptr + index);
  197. } else {
  198. DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
  199. }
  200. }
  201. /**
  202. * amdgpu_mm_rdoorbell64 - read a doorbell Qword
  203. *
  204. * @adev: amdgpu_device pointer
  205. * @index: doorbell index
  206. *
  207. * Returns the value in the doorbell aperture at the
  208. * requested doorbell index (VEGA10+).
  209. */
  210. u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
  211. {
  212. if (index < adev->doorbell.num_doorbells) {
  213. return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
  214. } else {
  215. DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
  216. return 0;
  217. }
  218. }
  219. /**
  220. * amdgpu_mm_wdoorbell64 - write a doorbell Qword
  221. *
  222. * @adev: amdgpu_device pointer
  223. * @index: doorbell index
  224. * @v: value to write
  225. *
  226. * Writes @v to the doorbell aperture at the
  227. * requested doorbell index (VEGA10+).
  228. */
  229. void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
  230. {
  231. if (index < adev->doorbell.num_doorbells) {
  232. atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
  233. } else {
  234. DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
  235. }
  236. }
  237. /**
  238. * amdgpu_invalid_rreg - dummy reg read function
  239. *
  240. * @adev: amdgpu device pointer
  241. * @reg: offset of register
  242. *
  243. * Dummy register read function. Used for register blocks
  244. * that certain asics don't have (all asics).
  245. * Returns the value in the register.
  246. */
  247. static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
  248. {
  249. DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
  250. BUG();
  251. return 0;
  252. }
  253. /**
  254. * amdgpu_invalid_wreg - dummy reg write function
  255. *
  256. * @adev: amdgpu device pointer
  257. * @reg: offset of register
  258. * @v: value to write to the register
  259. *
  260. * Dummy register read function. Used for register blocks
  261. * that certain asics don't have (all asics).
  262. */
  263. static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
  264. {
  265. DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
  266. reg, v);
  267. BUG();
  268. }
  269. /**
  270. * amdgpu_block_invalid_rreg - dummy reg read function
  271. *
  272. * @adev: amdgpu device pointer
  273. * @block: offset of instance
  274. * @reg: offset of register
  275. *
  276. * Dummy register read function. Used for register blocks
  277. * that certain asics don't have (all asics).
  278. * Returns the value in the register.
  279. */
  280. static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
  281. uint32_t block, uint32_t reg)
  282. {
  283. DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
  284. reg, block);
  285. BUG();
  286. return 0;
  287. }
  288. /**
  289. * amdgpu_block_invalid_wreg - dummy reg write function
  290. *
  291. * @adev: amdgpu device pointer
  292. * @block: offset of instance
  293. * @reg: offset of register
  294. * @v: value to write to the register
  295. *
  296. * Dummy register read function. Used for register blocks
  297. * that certain asics don't have (all asics).
  298. */
  299. static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
  300. uint32_t block,
  301. uint32_t reg, uint32_t v)
  302. {
  303. DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
  304. reg, block, v);
  305. BUG();
  306. }
  307. static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
  308. {
  309. int r;
  310. if (adev->vram_scratch.robj == NULL) {
  311. r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
  312. PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
  313. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  314. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
  315. NULL, NULL, &adev->vram_scratch.robj);
  316. if (r) {
  317. return r;
  318. }
  319. }
  320. r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
  321. if (unlikely(r != 0))
  322. return r;
  323. r = amdgpu_bo_pin(adev->vram_scratch.robj,
  324. AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
  325. if (r) {
  326. amdgpu_bo_unreserve(adev->vram_scratch.robj);
  327. return r;
  328. }
  329. r = amdgpu_bo_kmap(adev->vram_scratch.robj,
  330. (void **)&adev->vram_scratch.ptr);
  331. if (r)
  332. amdgpu_bo_unpin(adev->vram_scratch.robj);
  333. amdgpu_bo_unreserve(adev->vram_scratch.robj);
  334. return r;
  335. }
  336. static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
  337. {
  338. int r;
  339. if (adev->vram_scratch.robj == NULL) {
  340. return;
  341. }
  342. r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
  343. if (likely(r == 0)) {
  344. amdgpu_bo_kunmap(adev->vram_scratch.robj);
  345. amdgpu_bo_unpin(adev->vram_scratch.robj);
  346. amdgpu_bo_unreserve(adev->vram_scratch.robj);
  347. }
  348. amdgpu_bo_unref(&adev->vram_scratch.robj);
  349. }
  350. /**
  351. * amdgpu_program_register_sequence - program an array of registers.
  352. *
  353. * @adev: amdgpu_device pointer
  354. * @registers: pointer to the register array
  355. * @array_size: size of the register array
  356. *
  357. * Programs an array or registers with and and or masks.
  358. * This is a helper for setting golden registers.
  359. */
  360. void amdgpu_program_register_sequence(struct amdgpu_device *adev,
  361. const u32 *registers,
  362. const u32 array_size)
  363. {
  364. u32 tmp, reg, and_mask, or_mask;
  365. int i;
  366. if (array_size % 3)
  367. return;
  368. for (i = 0; i < array_size; i +=3) {
  369. reg = registers[i + 0];
  370. and_mask = registers[i + 1];
  371. or_mask = registers[i + 2];
  372. if (and_mask == 0xffffffff) {
  373. tmp = or_mask;
  374. } else {
  375. tmp = RREG32(reg);
  376. tmp &= ~and_mask;
  377. tmp |= or_mask;
  378. }
  379. WREG32(reg, tmp);
  380. }
  381. }
  382. void amdgpu_pci_config_reset(struct amdgpu_device *adev)
  383. {
  384. pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
  385. }
  386. /*
  387. * GPU doorbell aperture helpers function.
  388. */
  389. /**
  390. * amdgpu_doorbell_init - Init doorbell driver information.
  391. *
  392. * @adev: amdgpu_device pointer
  393. *
  394. * Init doorbell driver information (CIK)
  395. * Returns 0 on success, error on failure.
  396. */
  397. static int amdgpu_doorbell_init(struct amdgpu_device *adev)
  398. {
  399. /* doorbell bar mapping */
  400. adev->doorbell.base = pci_resource_start(adev->pdev, 2);
  401. adev->doorbell.size = pci_resource_len(adev->pdev, 2);
  402. adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
  403. AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
  404. if (adev->doorbell.num_doorbells == 0)
  405. return -EINVAL;
  406. adev->doorbell.ptr = ioremap(adev->doorbell.base,
  407. adev->doorbell.num_doorbells *
  408. sizeof(u32));
  409. if (adev->doorbell.ptr == NULL)
  410. return -ENOMEM;
  411. return 0;
  412. }
  413. /**
  414. * amdgpu_doorbell_fini - Tear down doorbell driver information.
  415. *
  416. * @adev: amdgpu_device pointer
  417. *
  418. * Tear down doorbell driver information (CIK)
  419. */
  420. static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
  421. {
  422. iounmap(adev->doorbell.ptr);
  423. adev->doorbell.ptr = NULL;
  424. }
  425. /**
  426. * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
  427. * setup amdkfd
  428. *
  429. * @adev: amdgpu_device pointer
  430. * @aperture_base: output returning doorbell aperture base physical address
  431. * @aperture_size: output returning doorbell aperture size in bytes
  432. * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
  433. *
  434. * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
  435. * takes doorbells required for its own rings and reports the setup to amdkfd.
  436. * amdgpu reserved doorbells are at the start of the doorbell aperture.
  437. */
  438. void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
  439. phys_addr_t *aperture_base,
  440. size_t *aperture_size,
  441. size_t *start_offset)
  442. {
  443. /*
  444. * The first num_doorbells are used by amdgpu.
  445. * amdkfd takes whatever's left in the aperture.
  446. */
  447. if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
  448. *aperture_base = adev->doorbell.base;
  449. *aperture_size = adev->doorbell.size;
  450. *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
  451. } else {
  452. *aperture_base = 0;
  453. *aperture_size = 0;
  454. *start_offset = 0;
  455. }
  456. }
  457. /*
  458. * amdgpu_wb_*()
  459. * Writeback is the method by which the GPU updates special pages in memory
  460. * with the status of certain GPU events (fences, ring pointers,etc.).
  461. */
  462. /**
  463. * amdgpu_wb_fini - Disable Writeback and free memory
  464. *
  465. * @adev: amdgpu_device pointer
  466. *
  467. * Disables Writeback and frees the Writeback memory (all asics).
  468. * Used at driver shutdown.
  469. */
  470. static void amdgpu_wb_fini(struct amdgpu_device *adev)
  471. {
  472. if (adev->wb.wb_obj) {
  473. amdgpu_bo_free_kernel(&adev->wb.wb_obj,
  474. &adev->wb.gpu_addr,
  475. (void **)&adev->wb.wb);
  476. adev->wb.wb_obj = NULL;
  477. }
  478. }
  479. /**
  480. * amdgpu_wb_init- Init Writeback driver info and allocate memory
  481. *
  482. * @adev: amdgpu_device pointer
  483. *
  484. * Initializes writeback and allocates writeback memory (all asics).
  485. * Used at driver startup.
  486. * Returns 0 on success or an -error on failure.
  487. */
  488. static int amdgpu_wb_init(struct amdgpu_device *adev)
  489. {
  490. int r;
  491. if (adev->wb.wb_obj == NULL) {
  492. r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
  493. PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
  494. &adev->wb.wb_obj, &adev->wb.gpu_addr,
  495. (void **)&adev->wb.wb);
  496. if (r) {
  497. dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
  498. return r;
  499. }
  500. adev->wb.num_wb = AMDGPU_MAX_WB;
  501. memset(&adev->wb.used, 0, sizeof(adev->wb.used));
  502. /* clear wb memory */
  503. memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
  504. }
  505. return 0;
  506. }
  507. /**
  508. * amdgpu_wb_get - Allocate a wb entry
  509. *
  510. * @adev: amdgpu_device pointer
  511. * @wb: wb index
  512. *
  513. * Allocate a wb slot for use by the driver (all asics).
  514. * Returns 0 on success or -EINVAL on failure.
  515. */
  516. int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
  517. {
  518. unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
  519. if (offset < adev->wb.num_wb) {
  520. __set_bit(offset, adev->wb.used);
  521. *wb = offset;
  522. return 0;
  523. } else {
  524. return -EINVAL;
  525. }
  526. }
  527. /**
  528. * amdgpu_wb_get_64bit - Allocate a wb entry
  529. *
  530. * @adev: amdgpu_device pointer
  531. * @wb: wb index
  532. *
  533. * Allocate a wb slot for use by the driver (all asics).
  534. * Returns 0 on success or -EINVAL on failure.
  535. */
  536. int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
  537. {
  538. unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
  539. adev->wb.num_wb, 0, 2, 7, 0);
  540. if ((offset + 1) < adev->wb.num_wb) {
  541. __set_bit(offset, adev->wb.used);
  542. __set_bit(offset + 1, adev->wb.used);
  543. *wb = offset;
  544. return 0;
  545. } else {
  546. return -EINVAL;
  547. }
  548. }
  549. int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb)
  550. {
  551. int i = 0;
  552. unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
  553. adev->wb.num_wb, 0, 8, 63, 0);
  554. if ((offset + 7) < adev->wb.num_wb) {
  555. for (i = 0; i < 8; i++)
  556. __set_bit(offset + i, adev->wb.used);
  557. *wb = offset;
  558. return 0;
  559. } else {
  560. return -EINVAL;
  561. }
  562. }
  563. /**
  564. * amdgpu_wb_free - Free a wb entry
  565. *
  566. * @adev: amdgpu_device pointer
  567. * @wb: wb index
  568. *
  569. * Free a wb slot allocated for use by the driver (all asics)
  570. */
  571. void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
  572. {
  573. if (wb < adev->wb.num_wb)
  574. __clear_bit(wb, adev->wb.used);
  575. }
  576. /**
  577. * amdgpu_wb_free_64bit - Free a wb entry
  578. *
  579. * @adev: amdgpu_device pointer
  580. * @wb: wb index
  581. *
  582. * Free a wb slot allocated for use by the driver (all asics)
  583. */
  584. void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
  585. {
  586. if ((wb + 1) < adev->wb.num_wb) {
  587. __clear_bit(wb, adev->wb.used);
  588. __clear_bit(wb + 1, adev->wb.used);
  589. }
  590. }
  591. /**
  592. * amdgpu_wb_free_256bit - Free a wb entry
  593. *
  594. * @adev: amdgpu_device pointer
  595. * @wb: wb index
  596. *
  597. * Free a wb slot allocated for use by the driver (all asics)
  598. */
  599. void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb)
  600. {
  601. int i = 0;
  602. if ((wb + 7) < adev->wb.num_wb)
  603. for (i = 0; i < 8; i++)
  604. __clear_bit(wb + i, adev->wb.used);
  605. }
  606. /**
  607. * amdgpu_vram_location - try to find VRAM location
  608. * @adev: amdgpu device structure holding all necessary informations
  609. * @mc: memory controller structure holding memory informations
  610. * @base: base address at which to put VRAM
  611. *
  612. * Function will try to place VRAM at base address provided
  613. * as parameter (which is so far either PCI aperture address or
  614. * for IGP TOM base address).
  615. *
  616. * If there is not enough space to fit the unvisible VRAM in the 32bits
  617. * address space then we limit the VRAM size to the aperture.
  618. *
  619. * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
  620. * this shouldn't be a problem as we are using the PCI aperture as a reference.
  621. * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
  622. * not IGP.
  623. *
  624. * Note: we use mc_vram_size as on some board we need to program the mc to
  625. * cover the whole aperture even if VRAM size is inferior to aperture size
  626. * Novell bug 204882 + along with lots of ubuntu ones
  627. *
  628. * Note: when limiting vram it's safe to overwritte real_vram_size because
  629. * we are not in case where real_vram_size is inferior to mc_vram_size (ie
  630. * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
  631. * ones)
  632. *
  633. * Note: IGP TOM addr should be the same as the aperture addr, we don't
  634. * explicitly check for that though.
  635. *
  636. * FIXME: when reducing VRAM size align new size on power of 2.
  637. */
  638. void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
  639. {
  640. uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
  641. mc->vram_start = base;
  642. if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
  643. dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
  644. mc->real_vram_size = mc->aper_size;
  645. mc->mc_vram_size = mc->aper_size;
  646. }
  647. mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  648. if (limit && limit < mc->real_vram_size)
  649. mc->real_vram_size = limit;
  650. dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
  651. mc->mc_vram_size >> 20, mc->vram_start,
  652. mc->vram_end, mc->real_vram_size >> 20);
  653. }
  654. /**
  655. * amdgpu_gart_location - try to find GTT location
  656. * @adev: amdgpu device structure holding all necessary informations
  657. * @mc: memory controller structure holding memory informations
  658. *
  659. * Function will place try to place GTT before or after VRAM.
  660. *
  661. * If GTT size is bigger than space left then we ajust GTT size.
  662. * Thus function will never fails.
  663. *
  664. * FIXME: when reducing GTT size align new size on power of 2.
  665. */
  666. void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
  667. {
  668. u64 size_af, size_bf;
  669. size_af = adev->mc.mc_mask - mc->vram_end;
  670. size_bf = mc->vram_start;
  671. if (size_bf > size_af) {
  672. if (mc->gart_size > size_bf) {
  673. dev_warn(adev->dev, "limiting GTT\n");
  674. mc->gart_size = size_bf;
  675. }
  676. mc->gart_start = 0;
  677. } else {
  678. if (mc->gart_size > size_af) {
  679. dev_warn(adev->dev, "limiting GTT\n");
  680. mc->gart_size = size_af;
  681. }
  682. mc->gart_start = mc->vram_end + 1;
  683. }
  684. mc->gart_end = mc->gart_start + mc->gart_size - 1;
  685. dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
  686. mc->gart_size >> 20, mc->gart_start, mc->gart_end);
  687. }
  688. /*
  689. * GPU helpers function.
  690. */
  691. /**
  692. * amdgpu_need_post - check if the hw need post or not
  693. *
  694. * @adev: amdgpu_device pointer
  695. *
  696. * Check if the asic has been initialized (all asics) at driver startup
  697. * or post is needed if hw reset is performed.
  698. * Returns true if need or false if not.
  699. */
  700. bool amdgpu_need_post(struct amdgpu_device *adev)
  701. {
  702. uint32_t reg;
  703. if (adev->has_hw_reset) {
  704. adev->has_hw_reset = false;
  705. return true;
  706. }
  707. /* bios scratch used on CIK+ */
  708. if (adev->asic_type >= CHIP_BONAIRE)
  709. return amdgpu_atombios_scratch_need_asic_init(adev);
  710. /* check MEM_SIZE for older asics */
  711. reg = amdgpu_asic_get_config_memsize(adev);
  712. if ((reg != 0) && (reg != 0xffffffff))
  713. return false;
  714. return true;
  715. }
  716. static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
  717. {
  718. if (amdgpu_sriov_vf(adev))
  719. return false;
  720. if (amdgpu_passthrough(adev)) {
  721. /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
  722. * some old smc fw still need driver do vPost otherwise gpu hang, while
  723. * those smc fw version above 22.15 doesn't have this flaw, so we force
  724. * vpost executed for smc version below 22.15
  725. */
  726. if (adev->asic_type == CHIP_FIJI) {
  727. int err;
  728. uint32_t fw_ver;
  729. err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
  730. /* force vPost if error occured */
  731. if (err)
  732. return true;
  733. fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
  734. if (fw_ver < 0x00160e00)
  735. return true;
  736. }
  737. }
  738. return amdgpu_need_post(adev);
  739. }
  740. /**
  741. * amdgpu_dummy_page_init - init dummy page used by the driver
  742. *
  743. * @adev: amdgpu_device pointer
  744. *
  745. * Allocate the dummy page used by the driver (all asics).
  746. * This dummy page is used by the driver as a filler for gart entries
  747. * when pages are taken out of the GART
  748. * Returns 0 on sucess, -ENOMEM on failure.
  749. */
  750. int amdgpu_dummy_page_init(struct amdgpu_device *adev)
  751. {
  752. if (adev->dummy_page.page)
  753. return 0;
  754. adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
  755. if (adev->dummy_page.page == NULL)
  756. return -ENOMEM;
  757. adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
  758. 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  759. if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
  760. dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
  761. __free_page(adev->dummy_page.page);
  762. adev->dummy_page.page = NULL;
  763. return -ENOMEM;
  764. }
  765. return 0;
  766. }
  767. /**
  768. * amdgpu_dummy_page_fini - free dummy page used by the driver
  769. *
  770. * @adev: amdgpu_device pointer
  771. *
  772. * Frees the dummy page used by the driver (all asics).
  773. */
  774. void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
  775. {
  776. if (adev->dummy_page.page == NULL)
  777. return;
  778. pci_unmap_page(adev->pdev, adev->dummy_page.addr,
  779. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  780. __free_page(adev->dummy_page.page);
  781. adev->dummy_page.page = NULL;
  782. }
  783. /* ATOM accessor methods */
  784. /*
  785. * ATOM is an interpreted byte code stored in tables in the vbios. The
  786. * driver registers callbacks to access registers and the interpreter
  787. * in the driver parses the tables and executes then to program specific
  788. * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
  789. * atombios.h, and atom.c
  790. */
  791. /**
  792. * cail_pll_read - read PLL register
  793. *
  794. * @info: atom card_info pointer
  795. * @reg: PLL register offset
  796. *
  797. * Provides a PLL register accessor for the atom interpreter (r4xx+).
  798. * Returns the value of the PLL register.
  799. */
  800. static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
  801. {
  802. return 0;
  803. }
  804. /**
  805. * cail_pll_write - write PLL register
  806. *
  807. * @info: atom card_info pointer
  808. * @reg: PLL register offset
  809. * @val: value to write to the pll register
  810. *
  811. * Provides a PLL register accessor for the atom interpreter (r4xx+).
  812. */
  813. static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
  814. {
  815. }
  816. /**
  817. * cail_mc_read - read MC (Memory Controller) register
  818. *
  819. * @info: atom card_info pointer
  820. * @reg: MC register offset
  821. *
  822. * Provides an MC register accessor for the atom interpreter (r4xx+).
  823. * Returns the value of the MC register.
  824. */
  825. static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
  826. {
  827. return 0;
  828. }
  829. /**
  830. * cail_mc_write - write MC (Memory Controller) register
  831. *
  832. * @info: atom card_info pointer
  833. * @reg: MC register offset
  834. * @val: value to write to the pll register
  835. *
  836. * Provides a MC register accessor for the atom interpreter (r4xx+).
  837. */
  838. static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
  839. {
  840. }
  841. /**
  842. * cail_reg_write - write MMIO register
  843. *
  844. * @info: atom card_info pointer
  845. * @reg: MMIO register offset
  846. * @val: value to write to the pll register
  847. *
  848. * Provides a MMIO register accessor for the atom interpreter (r4xx+).
  849. */
  850. static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
  851. {
  852. struct amdgpu_device *adev = info->dev->dev_private;
  853. WREG32(reg, val);
  854. }
  855. /**
  856. * cail_reg_read - read MMIO register
  857. *
  858. * @info: atom card_info pointer
  859. * @reg: MMIO register offset
  860. *
  861. * Provides an MMIO register accessor for the atom interpreter (r4xx+).
  862. * Returns the value of the MMIO register.
  863. */
  864. static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
  865. {
  866. struct amdgpu_device *adev = info->dev->dev_private;
  867. uint32_t r;
  868. r = RREG32(reg);
  869. return r;
  870. }
  871. /**
  872. * cail_ioreg_write - write IO register
  873. *
  874. * @info: atom card_info pointer
  875. * @reg: IO register offset
  876. * @val: value to write to the pll register
  877. *
  878. * Provides a IO register accessor for the atom interpreter (r4xx+).
  879. */
  880. static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
  881. {
  882. struct amdgpu_device *adev = info->dev->dev_private;
  883. WREG32_IO(reg, val);
  884. }
  885. /**
  886. * cail_ioreg_read - read IO register
  887. *
  888. * @info: atom card_info pointer
  889. * @reg: IO register offset
  890. *
  891. * Provides an IO register accessor for the atom interpreter (r4xx+).
  892. * Returns the value of the IO register.
  893. */
  894. static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
  895. {
  896. struct amdgpu_device *adev = info->dev->dev_private;
  897. uint32_t r;
  898. r = RREG32_IO(reg);
  899. return r;
  900. }
  901. /**
  902. * amdgpu_atombios_fini - free the driver info and callbacks for atombios
  903. *
  904. * @adev: amdgpu_device pointer
  905. *
  906. * Frees the driver info and register access callbacks for the ATOM
  907. * interpreter (r4xx+).
  908. * Called at driver shutdown.
  909. */
  910. static void amdgpu_atombios_fini(struct amdgpu_device *adev)
  911. {
  912. if (adev->mode_info.atom_context) {
  913. kfree(adev->mode_info.atom_context->scratch);
  914. kfree(adev->mode_info.atom_context->iio);
  915. }
  916. kfree(adev->mode_info.atom_context);
  917. adev->mode_info.atom_context = NULL;
  918. kfree(adev->mode_info.atom_card_info);
  919. adev->mode_info.atom_card_info = NULL;
  920. }
  921. /**
  922. * amdgpu_atombios_init - init the driver info and callbacks for atombios
  923. *
  924. * @adev: amdgpu_device pointer
  925. *
  926. * Initializes the driver info and register access callbacks for the
  927. * ATOM interpreter (r4xx+).
  928. * Returns 0 on sucess, -ENOMEM on failure.
  929. * Called at driver startup.
  930. */
  931. static int amdgpu_atombios_init(struct amdgpu_device *adev)
  932. {
  933. struct card_info *atom_card_info =
  934. kzalloc(sizeof(struct card_info), GFP_KERNEL);
  935. if (!atom_card_info)
  936. return -ENOMEM;
  937. adev->mode_info.atom_card_info = atom_card_info;
  938. atom_card_info->dev = adev->ddev;
  939. atom_card_info->reg_read = cail_reg_read;
  940. atom_card_info->reg_write = cail_reg_write;
  941. /* needed for iio ops */
  942. if (adev->rio_mem) {
  943. atom_card_info->ioreg_read = cail_ioreg_read;
  944. atom_card_info->ioreg_write = cail_ioreg_write;
  945. } else {
  946. DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
  947. atom_card_info->ioreg_read = cail_reg_read;
  948. atom_card_info->ioreg_write = cail_reg_write;
  949. }
  950. atom_card_info->mc_read = cail_mc_read;
  951. atom_card_info->mc_write = cail_mc_write;
  952. atom_card_info->pll_read = cail_pll_read;
  953. atom_card_info->pll_write = cail_pll_write;
  954. adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
  955. if (!adev->mode_info.atom_context) {
  956. amdgpu_atombios_fini(adev);
  957. return -ENOMEM;
  958. }
  959. mutex_init(&adev->mode_info.atom_context->mutex);
  960. if (adev->is_atom_fw) {
  961. amdgpu_atomfirmware_scratch_regs_init(adev);
  962. amdgpu_atomfirmware_allocate_fb_scratch(adev);
  963. } else {
  964. amdgpu_atombios_scratch_regs_init(adev);
  965. amdgpu_atombios_allocate_fb_scratch(adev);
  966. }
  967. return 0;
  968. }
  969. /* if we get transitioned to only one device, take VGA back */
  970. /**
  971. * amdgpu_vga_set_decode - enable/disable vga decode
  972. *
  973. * @cookie: amdgpu_device pointer
  974. * @state: enable/disable vga decode
  975. *
  976. * Enable/disable vga decode (all asics).
  977. * Returns VGA resource flags.
  978. */
  979. static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
  980. {
  981. struct amdgpu_device *adev = cookie;
  982. amdgpu_asic_set_vga_state(adev, state);
  983. if (state)
  984. return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  985. VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  986. else
  987. return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  988. }
  989. static void amdgpu_check_block_size(struct amdgpu_device *adev)
  990. {
  991. /* defines number of bits in page table versus page directory,
  992. * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
  993. * page table and the remaining bits are in the page directory */
  994. if (amdgpu_vm_block_size == -1)
  995. return;
  996. if (amdgpu_vm_block_size < 9) {
  997. dev_warn(adev->dev, "VM page table size (%d) too small\n",
  998. amdgpu_vm_block_size);
  999. goto def_value;
  1000. }
  1001. if (amdgpu_vm_block_size > 24 ||
  1002. (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
  1003. dev_warn(adev->dev, "VM page table size (%d) too large\n",
  1004. amdgpu_vm_block_size);
  1005. goto def_value;
  1006. }
  1007. return;
  1008. def_value:
  1009. amdgpu_vm_block_size = -1;
  1010. }
  1011. static void amdgpu_check_vm_size(struct amdgpu_device *adev)
  1012. {
  1013. /* no need to check the default value */
  1014. if (amdgpu_vm_size == -1)
  1015. return;
  1016. if (!is_power_of_2(amdgpu_vm_size)) {
  1017. dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
  1018. amdgpu_vm_size);
  1019. goto def_value;
  1020. }
  1021. if (amdgpu_vm_size < 1) {
  1022. dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
  1023. amdgpu_vm_size);
  1024. goto def_value;
  1025. }
  1026. /*
  1027. * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
  1028. */
  1029. if (amdgpu_vm_size > 1024) {
  1030. dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
  1031. amdgpu_vm_size);
  1032. goto def_value;
  1033. }
  1034. return;
  1035. def_value:
  1036. amdgpu_vm_size = -1;
  1037. }
  1038. /**
  1039. * amdgpu_check_arguments - validate module params
  1040. *
  1041. * @adev: amdgpu_device pointer
  1042. *
  1043. * Validates certain module parameters and updates
  1044. * the associated values used by the driver (all asics).
  1045. */
  1046. static void amdgpu_check_arguments(struct amdgpu_device *adev)
  1047. {
  1048. if (amdgpu_sched_jobs < 4) {
  1049. dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
  1050. amdgpu_sched_jobs);
  1051. amdgpu_sched_jobs = 4;
  1052. } else if (!is_power_of_2(amdgpu_sched_jobs)){
  1053. dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
  1054. amdgpu_sched_jobs);
  1055. amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
  1056. }
  1057. if (amdgpu_gart_size < 32) {
  1058. /* gart size must be greater or equal to 32M */
  1059. dev_warn(adev->dev, "gart size (%d) too small\n",
  1060. amdgpu_gart_size);
  1061. amdgpu_gart_size = 32;
  1062. }
  1063. if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
  1064. /* gtt size must be greater or equal to 32M */
  1065. dev_warn(adev->dev, "gtt size (%d) too small\n",
  1066. amdgpu_gtt_size);
  1067. amdgpu_gtt_size = -1;
  1068. }
  1069. amdgpu_check_vm_size(adev);
  1070. amdgpu_check_block_size(adev);
  1071. if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
  1072. !is_power_of_2(amdgpu_vram_page_split))) {
  1073. dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
  1074. amdgpu_vram_page_split);
  1075. amdgpu_vram_page_split = 1024;
  1076. }
  1077. }
  1078. /**
  1079. * amdgpu_switcheroo_set_state - set switcheroo state
  1080. *
  1081. * @pdev: pci dev pointer
  1082. * @state: vga_switcheroo state
  1083. *
  1084. * Callback for the switcheroo driver. Suspends or resumes the
  1085. * the asics before or after it is powered up using ACPI methods.
  1086. */
  1087. static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
  1088. {
  1089. struct drm_device *dev = pci_get_drvdata(pdev);
  1090. if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
  1091. return;
  1092. if (state == VGA_SWITCHEROO_ON) {
  1093. pr_info("amdgpu: switched on\n");
  1094. /* don't suspend or resume card normally */
  1095. dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
  1096. amdgpu_device_resume(dev, true, true);
  1097. dev->switch_power_state = DRM_SWITCH_POWER_ON;
  1098. drm_kms_helper_poll_enable(dev);
  1099. } else {
  1100. pr_info("amdgpu: switched off\n");
  1101. drm_kms_helper_poll_disable(dev);
  1102. dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
  1103. amdgpu_device_suspend(dev, true, true);
  1104. dev->switch_power_state = DRM_SWITCH_POWER_OFF;
  1105. }
  1106. }
  1107. /**
  1108. * amdgpu_switcheroo_can_switch - see if switcheroo state can change
  1109. *
  1110. * @pdev: pci dev pointer
  1111. *
  1112. * Callback for the switcheroo driver. Check of the switcheroo
  1113. * state can be changed.
  1114. * Returns true if the state can be changed, false if not.
  1115. */
  1116. static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
  1117. {
  1118. struct drm_device *dev = pci_get_drvdata(pdev);
  1119. /*
  1120. * FIXME: open_count is protected by drm_global_mutex but that would lead to
  1121. * locking inversion with the driver load path. And the access here is
  1122. * completely racy anyway. So don't bother with locking for now.
  1123. */
  1124. return dev->open_count == 0;
  1125. }
  1126. static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
  1127. .set_gpu_state = amdgpu_switcheroo_set_state,
  1128. .reprobe = NULL,
  1129. .can_switch = amdgpu_switcheroo_can_switch,
  1130. };
  1131. int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
  1132. enum amd_ip_block_type block_type,
  1133. enum amd_clockgating_state state)
  1134. {
  1135. int i, r = 0;
  1136. for (i = 0; i < adev->num_ip_blocks; i++) {
  1137. if (!adev->ip_blocks[i].status.valid)
  1138. continue;
  1139. if (adev->ip_blocks[i].version->type != block_type)
  1140. continue;
  1141. if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
  1142. continue;
  1143. r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
  1144. (void *)adev, state);
  1145. if (r)
  1146. DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
  1147. adev->ip_blocks[i].version->funcs->name, r);
  1148. }
  1149. return r;
  1150. }
  1151. int amdgpu_set_powergating_state(struct amdgpu_device *adev,
  1152. enum amd_ip_block_type block_type,
  1153. enum amd_powergating_state state)
  1154. {
  1155. int i, r = 0;
  1156. for (i = 0; i < adev->num_ip_blocks; i++) {
  1157. if (!adev->ip_blocks[i].status.valid)
  1158. continue;
  1159. if (adev->ip_blocks[i].version->type != block_type)
  1160. continue;
  1161. if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
  1162. continue;
  1163. r = adev->ip_blocks[i].version->funcs->set_powergating_state(
  1164. (void *)adev, state);
  1165. if (r)
  1166. DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
  1167. adev->ip_blocks[i].version->funcs->name, r);
  1168. }
  1169. return r;
  1170. }
  1171. void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
  1172. {
  1173. int i;
  1174. for (i = 0; i < adev->num_ip_blocks; i++) {
  1175. if (!adev->ip_blocks[i].status.valid)
  1176. continue;
  1177. if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
  1178. adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
  1179. }
  1180. }
  1181. int amdgpu_wait_for_idle(struct amdgpu_device *adev,
  1182. enum amd_ip_block_type block_type)
  1183. {
  1184. int i, r;
  1185. for (i = 0; i < adev->num_ip_blocks; i++) {
  1186. if (!adev->ip_blocks[i].status.valid)
  1187. continue;
  1188. if (adev->ip_blocks[i].version->type == block_type) {
  1189. r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
  1190. if (r)
  1191. return r;
  1192. break;
  1193. }
  1194. }
  1195. return 0;
  1196. }
  1197. bool amdgpu_is_idle(struct amdgpu_device *adev,
  1198. enum amd_ip_block_type block_type)
  1199. {
  1200. int i;
  1201. for (i = 0; i < adev->num_ip_blocks; i++) {
  1202. if (!adev->ip_blocks[i].status.valid)
  1203. continue;
  1204. if (adev->ip_blocks[i].version->type == block_type)
  1205. return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
  1206. }
  1207. return true;
  1208. }
  1209. struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
  1210. enum amd_ip_block_type type)
  1211. {
  1212. int i;
  1213. for (i = 0; i < adev->num_ip_blocks; i++)
  1214. if (adev->ip_blocks[i].version->type == type)
  1215. return &adev->ip_blocks[i];
  1216. return NULL;
  1217. }
  1218. /**
  1219. * amdgpu_ip_block_version_cmp
  1220. *
  1221. * @adev: amdgpu_device pointer
  1222. * @type: enum amd_ip_block_type
  1223. * @major: major version
  1224. * @minor: minor version
  1225. *
  1226. * return 0 if equal or greater
  1227. * return 1 if smaller or the ip_block doesn't exist
  1228. */
  1229. int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
  1230. enum amd_ip_block_type type,
  1231. u32 major, u32 minor)
  1232. {
  1233. struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
  1234. if (ip_block && ((ip_block->version->major > major) ||
  1235. ((ip_block->version->major == major) &&
  1236. (ip_block->version->minor >= minor))))
  1237. return 0;
  1238. return 1;
  1239. }
  1240. /**
  1241. * amdgpu_ip_block_add
  1242. *
  1243. * @adev: amdgpu_device pointer
  1244. * @ip_block_version: pointer to the IP to add
  1245. *
  1246. * Adds the IP block driver information to the collection of IPs
  1247. * on the asic.
  1248. */
  1249. int amdgpu_ip_block_add(struct amdgpu_device *adev,
  1250. const struct amdgpu_ip_block_version *ip_block_version)
  1251. {
  1252. if (!ip_block_version)
  1253. return -EINVAL;
  1254. DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
  1255. ip_block_version->funcs->name);
  1256. adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
  1257. return 0;
  1258. }
  1259. static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
  1260. {
  1261. adev->enable_virtual_display = false;
  1262. if (amdgpu_virtual_display) {
  1263. struct drm_device *ddev = adev->ddev;
  1264. const char *pci_address_name = pci_name(ddev->pdev);
  1265. char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
  1266. pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
  1267. pciaddstr_tmp = pciaddstr;
  1268. while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
  1269. pciaddname = strsep(&pciaddname_tmp, ",");
  1270. if (!strcmp("all", pciaddname)
  1271. || !strcmp(pci_address_name, pciaddname)) {
  1272. long num_crtc;
  1273. int res = -1;
  1274. adev->enable_virtual_display = true;
  1275. if (pciaddname_tmp)
  1276. res = kstrtol(pciaddname_tmp, 10,
  1277. &num_crtc);
  1278. if (!res) {
  1279. if (num_crtc < 1)
  1280. num_crtc = 1;
  1281. if (num_crtc > 6)
  1282. num_crtc = 6;
  1283. adev->mode_info.num_crtc = num_crtc;
  1284. } else {
  1285. adev->mode_info.num_crtc = 1;
  1286. }
  1287. break;
  1288. }
  1289. }
  1290. DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
  1291. amdgpu_virtual_display, pci_address_name,
  1292. adev->enable_virtual_display, adev->mode_info.num_crtc);
  1293. kfree(pciaddstr);
  1294. }
  1295. }
  1296. static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
  1297. {
  1298. const char *chip_name;
  1299. char fw_name[30];
  1300. int err;
  1301. const struct gpu_info_firmware_header_v1_0 *hdr;
  1302. adev->firmware.gpu_info_fw = NULL;
  1303. switch (adev->asic_type) {
  1304. case CHIP_TOPAZ:
  1305. case CHIP_TONGA:
  1306. case CHIP_FIJI:
  1307. case CHIP_POLARIS11:
  1308. case CHIP_POLARIS10:
  1309. case CHIP_POLARIS12:
  1310. case CHIP_CARRIZO:
  1311. case CHIP_STONEY:
  1312. #ifdef CONFIG_DRM_AMDGPU_SI
  1313. case CHIP_VERDE:
  1314. case CHIP_TAHITI:
  1315. case CHIP_PITCAIRN:
  1316. case CHIP_OLAND:
  1317. case CHIP_HAINAN:
  1318. #endif
  1319. #ifdef CONFIG_DRM_AMDGPU_CIK
  1320. case CHIP_BONAIRE:
  1321. case CHIP_HAWAII:
  1322. case CHIP_KAVERI:
  1323. case CHIP_KABINI:
  1324. case CHIP_MULLINS:
  1325. #endif
  1326. default:
  1327. return 0;
  1328. case CHIP_VEGA10:
  1329. chip_name = "vega10";
  1330. break;
  1331. case CHIP_RAVEN:
  1332. chip_name = "raven";
  1333. break;
  1334. }
  1335. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
  1336. err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
  1337. if (err) {
  1338. dev_err(adev->dev,
  1339. "Failed to load gpu_info firmware \"%s\"\n",
  1340. fw_name);
  1341. goto out;
  1342. }
  1343. err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
  1344. if (err) {
  1345. dev_err(adev->dev,
  1346. "Failed to validate gpu_info firmware \"%s\"\n",
  1347. fw_name);
  1348. goto out;
  1349. }
  1350. hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
  1351. amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
  1352. switch (hdr->version_major) {
  1353. case 1:
  1354. {
  1355. const struct gpu_info_firmware_v1_0 *gpu_info_fw =
  1356. (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
  1357. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  1358. adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
  1359. adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
  1360. adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
  1361. adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
  1362. adev->gfx.config.max_texture_channel_caches =
  1363. le32_to_cpu(gpu_info_fw->gc_num_tccs);
  1364. adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
  1365. adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
  1366. adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
  1367. adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
  1368. adev->gfx.config.double_offchip_lds_buf =
  1369. le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
  1370. adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
  1371. adev->gfx.cu_info.max_waves_per_simd =
  1372. le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
  1373. adev->gfx.cu_info.max_scratch_slots_per_cu =
  1374. le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
  1375. adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
  1376. break;
  1377. }
  1378. default:
  1379. dev_err(adev->dev,
  1380. "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
  1381. err = -EINVAL;
  1382. goto out;
  1383. }
  1384. out:
  1385. return err;
  1386. }
  1387. static int amdgpu_early_init(struct amdgpu_device *adev)
  1388. {
  1389. int i, r;
  1390. amdgpu_device_enable_virtual_display(adev);
  1391. switch (adev->asic_type) {
  1392. case CHIP_TOPAZ:
  1393. case CHIP_TONGA:
  1394. case CHIP_FIJI:
  1395. case CHIP_POLARIS11:
  1396. case CHIP_POLARIS10:
  1397. case CHIP_POLARIS12:
  1398. case CHIP_CARRIZO:
  1399. case CHIP_STONEY:
  1400. if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
  1401. adev->family = AMDGPU_FAMILY_CZ;
  1402. else
  1403. adev->family = AMDGPU_FAMILY_VI;
  1404. r = vi_set_ip_blocks(adev);
  1405. if (r)
  1406. return r;
  1407. break;
  1408. #ifdef CONFIG_DRM_AMDGPU_SI
  1409. case CHIP_VERDE:
  1410. case CHIP_TAHITI:
  1411. case CHIP_PITCAIRN:
  1412. case CHIP_OLAND:
  1413. case CHIP_HAINAN:
  1414. adev->family = AMDGPU_FAMILY_SI;
  1415. r = si_set_ip_blocks(adev);
  1416. if (r)
  1417. return r;
  1418. break;
  1419. #endif
  1420. #ifdef CONFIG_DRM_AMDGPU_CIK
  1421. case CHIP_BONAIRE:
  1422. case CHIP_HAWAII:
  1423. case CHIP_KAVERI:
  1424. case CHIP_KABINI:
  1425. case CHIP_MULLINS:
  1426. if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
  1427. adev->family = AMDGPU_FAMILY_CI;
  1428. else
  1429. adev->family = AMDGPU_FAMILY_KV;
  1430. r = cik_set_ip_blocks(adev);
  1431. if (r)
  1432. return r;
  1433. break;
  1434. #endif
  1435. case CHIP_VEGA10:
  1436. case CHIP_RAVEN:
  1437. if (adev->asic_type == CHIP_RAVEN)
  1438. adev->family = AMDGPU_FAMILY_RV;
  1439. else
  1440. adev->family = AMDGPU_FAMILY_AI;
  1441. r = soc15_set_ip_blocks(adev);
  1442. if (r)
  1443. return r;
  1444. break;
  1445. default:
  1446. /* FIXME: not supported yet */
  1447. return -EINVAL;
  1448. }
  1449. r = amdgpu_device_parse_gpu_info_fw(adev);
  1450. if (r)
  1451. return r;
  1452. if (amdgpu_sriov_vf(adev)) {
  1453. r = amdgpu_virt_request_full_gpu(adev, true);
  1454. if (r)
  1455. return r;
  1456. }
  1457. for (i = 0; i < adev->num_ip_blocks; i++) {
  1458. if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
  1459. DRM_ERROR("disabled ip block: %d <%s>\n",
  1460. i, adev->ip_blocks[i].version->funcs->name);
  1461. adev->ip_blocks[i].status.valid = false;
  1462. } else {
  1463. if (adev->ip_blocks[i].version->funcs->early_init) {
  1464. r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
  1465. if (r == -ENOENT) {
  1466. adev->ip_blocks[i].status.valid = false;
  1467. } else if (r) {
  1468. DRM_ERROR("early_init of IP block <%s> failed %d\n",
  1469. adev->ip_blocks[i].version->funcs->name, r);
  1470. return r;
  1471. } else {
  1472. adev->ip_blocks[i].status.valid = true;
  1473. }
  1474. } else {
  1475. adev->ip_blocks[i].status.valid = true;
  1476. }
  1477. }
  1478. }
  1479. adev->cg_flags &= amdgpu_cg_mask;
  1480. adev->pg_flags &= amdgpu_pg_mask;
  1481. return 0;
  1482. }
  1483. static int amdgpu_init(struct amdgpu_device *adev)
  1484. {
  1485. int i, r;
  1486. for (i = 0; i < adev->num_ip_blocks; i++) {
  1487. if (!adev->ip_blocks[i].status.valid)
  1488. continue;
  1489. r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
  1490. if (r) {
  1491. DRM_ERROR("sw_init of IP block <%s> failed %d\n",
  1492. adev->ip_blocks[i].version->funcs->name, r);
  1493. return r;
  1494. }
  1495. adev->ip_blocks[i].status.sw = true;
  1496. /* need to do gmc hw init early so we can allocate gpu mem */
  1497. if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
  1498. r = amdgpu_vram_scratch_init(adev);
  1499. if (r) {
  1500. DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
  1501. return r;
  1502. }
  1503. r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
  1504. if (r) {
  1505. DRM_ERROR("hw_init %d failed %d\n", i, r);
  1506. return r;
  1507. }
  1508. r = amdgpu_wb_init(adev);
  1509. if (r) {
  1510. DRM_ERROR("amdgpu_wb_init failed %d\n", r);
  1511. return r;
  1512. }
  1513. adev->ip_blocks[i].status.hw = true;
  1514. /* right after GMC hw init, we create CSA */
  1515. if (amdgpu_sriov_vf(adev)) {
  1516. r = amdgpu_allocate_static_csa(adev);
  1517. if (r) {
  1518. DRM_ERROR("allocate CSA failed %d\n", r);
  1519. return r;
  1520. }
  1521. }
  1522. }
  1523. }
  1524. for (i = 0; i < adev->num_ip_blocks; i++) {
  1525. if (!adev->ip_blocks[i].status.sw)
  1526. continue;
  1527. /* gmc hw init is done early */
  1528. if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
  1529. continue;
  1530. r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
  1531. if (r) {
  1532. DRM_ERROR("hw_init of IP block <%s> failed %d\n",
  1533. adev->ip_blocks[i].version->funcs->name, r);
  1534. return r;
  1535. }
  1536. adev->ip_blocks[i].status.hw = true;
  1537. }
  1538. return 0;
  1539. }
  1540. static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
  1541. {
  1542. memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
  1543. }
  1544. static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
  1545. {
  1546. return !!memcmp(adev->gart.ptr, adev->reset_magic,
  1547. AMDGPU_RESET_MAGIC_NUM);
  1548. }
  1549. static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
  1550. {
  1551. int i = 0, r;
  1552. for (i = 0; i < adev->num_ip_blocks; i++) {
  1553. if (!adev->ip_blocks[i].status.valid)
  1554. continue;
  1555. /* skip CG for VCE/UVD, it's handled specially */
  1556. if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
  1557. adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
  1558. /* enable clockgating to save power */
  1559. r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
  1560. AMD_CG_STATE_GATE);
  1561. if (r) {
  1562. DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
  1563. adev->ip_blocks[i].version->funcs->name, r);
  1564. return r;
  1565. }
  1566. }
  1567. }
  1568. return 0;
  1569. }
  1570. static int amdgpu_late_init(struct amdgpu_device *adev)
  1571. {
  1572. int i = 0, r;
  1573. for (i = 0; i < adev->num_ip_blocks; i++) {
  1574. if (!adev->ip_blocks[i].status.valid)
  1575. continue;
  1576. if (adev->ip_blocks[i].version->funcs->late_init) {
  1577. r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
  1578. if (r) {
  1579. DRM_ERROR("late_init of IP block <%s> failed %d\n",
  1580. adev->ip_blocks[i].version->funcs->name, r);
  1581. return r;
  1582. }
  1583. adev->ip_blocks[i].status.late_initialized = true;
  1584. }
  1585. }
  1586. mod_delayed_work(system_wq, &adev->late_init_work,
  1587. msecs_to_jiffies(AMDGPU_RESUME_MS));
  1588. amdgpu_fill_reset_magic(adev);
  1589. return 0;
  1590. }
  1591. static int amdgpu_fini(struct amdgpu_device *adev)
  1592. {
  1593. int i, r;
  1594. /* need to disable SMC first */
  1595. for (i = 0; i < adev->num_ip_blocks; i++) {
  1596. if (!adev->ip_blocks[i].status.hw)
  1597. continue;
  1598. if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
  1599. /* ungate blocks before hw fini so that we can shutdown the blocks safely */
  1600. r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
  1601. AMD_CG_STATE_UNGATE);
  1602. if (r) {
  1603. DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
  1604. adev->ip_blocks[i].version->funcs->name, r);
  1605. return r;
  1606. }
  1607. r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
  1608. /* XXX handle errors */
  1609. if (r) {
  1610. DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
  1611. adev->ip_blocks[i].version->funcs->name, r);
  1612. }
  1613. adev->ip_blocks[i].status.hw = false;
  1614. break;
  1615. }
  1616. }
  1617. for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
  1618. if (!adev->ip_blocks[i].status.hw)
  1619. continue;
  1620. if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
  1621. amdgpu_wb_fini(adev);
  1622. amdgpu_vram_scratch_fini(adev);
  1623. }
  1624. if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
  1625. adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
  1626. /* ungate blocks before hw fini so that we can shutdown the blocks safely */
  1627. r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
  1628. AMD_CG_STATE_UNGATE);
  1629. if (r) {
  1630. DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
  1631. adev->ip_blocks[i].version->funcs->name, r);
  1632. return r;
  1633. }
  1634. }
  1635. r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
  1636. /* XXX handle errors */
  1637. if (r) {
  1638. DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
  1639. adev->ip_blocks[i].version->funcs->name, r);
  1640. }
  1641. adev->ip_blocks[i].status.hw = false;
  1642. }
  1643. for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
  1644. if (!adev->ip_blocks[i].status.sw)
  1645. continue;
  1646. r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
  1647. /* XXX handle errors */
  1648. if (r) {
  1649. DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
  1650. adev->ip_blocks[i].version->funcs->name, r);
  1651. }
  1652. adev->ip_blocks[i].status.sw = false;
  1653. adev->ip_blocks[i].status.valid = false;
  1654. }
  1655. for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
  1656. if (!adev->ip_blocks[i].status.late_initialized)
  1657. continue;
  1658. if (adev->ip_blocks[i].version->funcs->late_fini)
  1659. adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
  1660. adev->ip_blocks[i].status.late_initialized = false;
  1661. }
  1662. if (amdgpu_sriov_vf(adev)) {
  1663. amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
  1664. amdgpu_virt_release_full_gpu(adev, false);
  1665. }
  1666. return 0;
  1667. }
  1668. static void amdgpu_late_init_func_handler(struct work_struct *work)
  1669. {
  1670. struct amdgpu_device *adev =
  1671. container_of(work, struct amdgpu_device, late_init_work.work);
  1672. amdgpu_late_set_cg_state(adev);
  1673. }
  1674. int amdgpu_suspend(struct amdgpu_device *adev)
  1675. {
  1676. int i, r;
  1677. if (amdgpu_sriov_vf(adev))
  1678. amdgpu_virt_request_full_gpu(adev, false);
  1679. /* ungate SMC block first */
  1680. r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
  1681. AMD_CG_STATE_UNGATE);
  1682. if (r) {
  1683. DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
  1684. }
  1685. for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
  1686. if (!adev->ip_blocks[i].status.valid)
  1687. continue;
  1688. /* ungate blocks so that suspend can properly shut them down */
  1689. if (i != AMD_IP_BLOCK_TYPE_SMC) {
  1690. r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
  1691. AMD_CG_STATE_UNGATE);
  1692. if (r) {
  1693. DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
  1694. adev->ip_blocks[i].version->funcs->name, r);
  1695. }
  1696. }
  1697. /* XXX handle errors */
  1698. r = adev->ip_blocks[i].version->funcs->suspend(adev);
  1699. /* XXX handle errors */
  1700. if (r) {
  1701. DRM_ERROR("suspend of IP block <%s> failed %d\n",
  1702. adev->ip_blocks[i].version->funcs->name, r);
  1703. }
  1704. }
  1705. if (amdgpu_sriov_vf(adev))
  1706. amdgpu_virt_release_full_gpu(adev, false);
  1707. return 0;
  1708. }
  1709. static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
  1710. {
  1711. int i, r;
  1712. static enum amd_ip_block_type ip_order[] = {
  1713. AMD_IP_BLOCK_TYPE_GMC,
  1714. AMD_IP_BLOCK_TYPE_COMMON,
  1715. AMD_IP_BLOCK_TYPE_IH,
  1716. };
  1717. for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
  1718. int j;
  1719. struct amdgpu_ip_block *block;
  1720. for (j = 0; j < adev->num_ip_blocks; j++) {
  1721. block = &adev->ip_blocks[j];
  1722. if (block->version->type != ip_order[i] ||
  1723. !block->status.valid)
  1724. continue;
  1725. r = block->version->funcs->hw_init(adev);
  1726. DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
  1727. }
  1728. }
  1729. return 0;
  1730. }
  1731. static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
  1732. {
  1733. int i, r;
  1734. static enum amd_ip_block_type ip_order[] = {
  1735. AMD_IP_BLOCK_TYPE_SMC,
  1736. AMD_IP_BLOCK_TYPE_DCE,
  1737. AMD_IP_BLOCK_TYPE_GFX,
  1738. AMD_IP_BLOCK_TYPE_SDMA,
  1739. AMD_IP_BLOCK_TYPE_UVD,
  1740. AMD_IP_BLOCK_TYPE_VCE
  1741. };
  1742. for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
  1743. int j;
  1744. struct amdgpu_ip_block *block;
  1745. for (j = 0; j < adev->num_ip_blocks; j++) {
  1746. block = &adev->ip_blocks[j];
  1747. if (block->version->type != ip_order[i] ||
  1748. !block->status.valid)
  1749. continue;
  1750. r = block->version->funcs->hw_init(adev);
  1751. DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
  1752. }
  1753. }
  1754. return 0;
  1755. }
  1756. static int amdgpu_resume_phase1(struct amdgpu_device *adev)
  1757. {
  1758. int i, r;
  1759. for (i = 0; i < adev->num_ip_blocks; i++) {
  1760. if (!adev->ip_blocks[i].status.valid)
  1761. continue;
  1762. if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
  1763. adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
  1764. adev->ip_blocks[i].version->type ==
  1765. AMD_IP_BLOCK_TYPE_IH) {
  1766. r = adev->ip_blocks[i].version->funcs->resume(adev);
  1767. if (r) {
  1768. DRM_ERROR("resume of IP block <%s> failed %d\n",
  1769. adev->ip_blocks[i].version->funcs->name, r);
  1770. return r;
  1771. }
  1772. }
  1773. }
  1774. return 0;
  1775. }
  1776. static int amdgpu_resume_phase2(struct amdgpu_device *adev)
  1777. {
  1778. int i, r;
  1779. for (i = 0; i < adev->num_ip_blocks; i++) {
  1780. if (!adev->ip_blocks[i].status.valid)
  1781. continue;
  1782. if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
  1783. adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
  1784. adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
  1785. continue;
  1786. r = adev->ip_blocks[i].version->funcs->resume(adev);
  1787. if (r) {
  1788. DRM_ERROR("resume of IP block <%s> failed %d\n",
  1789. adev->ip_blocks[i].version->funcs->name, r);
  1790. return r;
  1791. }
  1792. }
  1793. return 0;
  1794. }
  1795. static int amdgpu_resume(struct amdgpu_device *adev)
  1796. {
  1797. int r;
  1798. r = amdgpu_resume_phase1(adev);
  1799. if (r)
  1800. return r;
  1801. r = amdgpu_resume_phase2(adev);
  1802. return r;
  1803. }
  1804. static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
  1805. {
  1806. if (adev->is_atom_fw) {
  1807. if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
  1808. adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
  1809. } else {
  1810. if (amdgpu_atombios_has_gpu_virtualization_table(adev))
  1811. adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
  1812. }
  1813. }
  1814. /**
  1815. * amdgpu_device_init - initialize the driver
  1816. *
  1817. * @adev: amdgpu_device pointer
  1818. * @pdev: drm dev pointer
  1819. * @pdev: pci dev pointer
  1820. * @flags: driver flags
  1821. *
  1822. * Initializes the driver info and hw (all asics).
  1823. * Returns 0 for success or an error on failure.
  1824. * Called at driver startup.
  1825. */
  1826. int amdgpu_device_init(struct amdgpu_device *adev,
  1827. struct drm_device *ddev,
  1828. struct pci_dev *pdev,
  1829. uint32_t flags)
  1830. {
  1831. int r, i;
  1832. bool runtime = false;
  1833. u32 max_MBps;
  1834. adev->shutdown = false;
  1835. adev->dev = &pdev->dev;
  1836. adev->ddev = ddev;
  1837. adev->pdev = pdev;
  1838. adev->flags = flags;
  1839. adev->asic_type = flags & AMD_ASIC_MASK;
  1840. adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
  1841. adev->mc.gart_size = 512 * 1024 * 1024;
  1842. adev->accel_working = false;
  1843. adev->num_rings = 0;
  1844. adev->mman.buffer_funcs = NULL;
  1845. adev->mman.buffer_funcs_ring = NULL;
  1846. adev->vm_manager.vm_pte_funcs = NULL;
  1847. adev->vm_manager.vm_pte_num_rings = 0;
  1848. adev->gart.gart_funcs = NULL;
  1849. adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
  1850. adev->smc_rreg = &amdgpu_invalid_rreg;
  1851. adev->smc_wreg = &amdgpu_invalid_wreg;
  1852. adev->pcie_rreg = &amdgpu_invalid_rreg;
  1853. adev->pcie_wreg = &amdgpu_invalid_wreg;
  1854. adev->pciep_rreg = &amdgpu_invalid_rreg;
  1855. adev->pciep_wreg = &amdgpu_invalid_wreg;
  1856. adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
  1857. adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
  1858. adev->didt_rreg = &amdgpu_invalid_rreg;
  1859. adev->didt_wreg = &amdgpu_invalid_wreg;
  1860. adev->gc_cac_rreg = &amdgpu_invalid_rreg;
  1861. adev->gc_cac_wreg = &amdgpu_invalid_wreg;
  1862. adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
  1863. adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
  1864. DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
  1865. amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
  1866. pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
  1867. /* mutex initialization are all done here so we
  1868. * can recall function without having locking issues */
  1869. atomic_set(&adev->irq.ih.lock, 0);
  1870. mutex_init(&adev->firmware.mutex);
  1871. mutex_init(&adev->pm.mutex);
  1872. mutex_init(&adev->gfx.gpu_clock_mutex);
  1873. mutex_init(&adev->srbm_mutex);
  1874. mutex_init(&adev->grbm_idx_mutex);
  1875. mutex_init(&adev->mn_lock);
  1876. hash_init(adev->mn_hash);
  1877. amdgpu_check_arguments(adev);
  1878. spin_lock_init(&adev->mmio_idx_lock);
  1879. spin_lock_init(&adev->smc_idx_lock);
  1880. spin_lock_init(&adev->pcie_idx_lock);
  1881. spin_lock_init(&adev->uvd_ctx_idx_lock);
  1882. spin_lock_init(&adev->didt_idx_lock);
  1883. spin_lock_init(&adev->gc_cac_idx_lock);
  1884. spin_lock_init(&adev->se_cac_idx_lock);
  1885. spin_lock_init(&adev->audio_endpt_idx_lock);
  1886. spin_lock_init(&adev->mm_stats.lock);
  1887. INIT_LIST_HEAD(&adev->shadow_list);
  1888. mutex_init(&adev->shadow_list_lock);
  1889. INIT_LIST_HEAD(&adev->gtt_list);
  1890. spin_lock_init(&adev->gtt_list_lock);
  1891. INIT_LIST_HEAD(&adev->ring_lru_list);
  1892. spin_lock_init(&adev->ring_lru_list_lock);
  1893. INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
  1894. /* Registers mapping */
  1895. /* TODO: block userspace mapping of io register */
  1896. if (adev->asic_type >= CHIP_BONAIRE) {
  1897. adev->rmmio_base = pci_resource_start(adev->pdev, 5);
  1898. adev->rmmio_size = pci_resource_len(adev->pdev, 5);
  1899. } else {
  1900. adev->rmmio_base = pci_resource_start(adev->pdev, 2);
  1901. adev->rmmio_size = pci_resource_len(adev->pdev, 2);
  1902. }
  1903. adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
  1904. if (adev->rmmio == NULL) {
  1905. return -ENOMEM;
  1906. }
  1907. DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
  1908. DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
  1909. if (adev->asic_type >= CHIP_BONAIRE)
  1910. /* doorbell bar mapping */
  1911. amdgpu_doorbell_init(adev);
  1912. /* io port mapping */
  1913. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  1914. if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
  1915. adev->rio_mem_size = pci_resource_len(adev->pdev, i);
  1916. adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
  1917. break;
  1918. }
  1919. }
  1920. if (adev->rio_mem == NULL)
  1921. DRM_INFO("PCI I/O BAR is not found.\n");
  1922. /* early init functions */
  1923. r = amdgpu_early_init(adev);
  1924. if (r)
  1925. return r;
  1926. /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
  1927. /* this will fail for cards that aren't VGA class devices, just
  1928. * ignore it */
  1929. vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
  1930. if (amdgpu_runtime_pm == 1)
  1931. runtime = true;
  1932. if (amdgpu_device_is_px(ddev))
  1933. runtime = true;
  1934. if (!pci_is_thunderbolt_attached(adev->pdev))
  1935. vga_switcheroo_register_client(adev->pdev,
  1936. &amdgpu_switcheroo_ops, runtime);
  1937. if (runtime)
  1938. vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
  1939. /* Read BIOS */
  1940. if (!amdgpu_get_bios(adev)) {
  1941. r = -EINVAL;
  1942. goto failed;
  1943. }
  1944. r = amdgpu_atombios_init(adev);
  1945. if (r) {
  1946. dev_err(adev->dev, "amdgpu_atombios_init failed\n");
  1947. amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
  1948. goto failed;
  1949. }
  1950. /* detect if we are with an SRIOV vbios */
  1951. amdgpu_device_detect_sriov_bios(adev);
  1952. /* Post card if necessary */
  1953. if (amdgpu_vpost_needed(adev)) {
  1954. if (!adev->bios) {
  1955. dev_err(adev->dev, "no vBIOS found\n");
  1956. amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
  1957. r = -EINVAL;
  1958. goto failed;
  1959. }
  1960. DRM_INFO("GPU posting now...\n");
  1961. r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
  1962. if (r) {
  1963. dev_err(adev->dev, "gpu post error!\n");
  1964. amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
  1965. goto failed;
  1966. }
  1967. } else {
  1968. DRM_INFO("GPU post is not needed\n");
  1969. }
  1970. if (adev->is_atom_fw) {
  1971. /* Initialize clocks */
  1972. r = amdgpu_atomfirmware_get_clock_info(adev);
  1973. if (r) {
  1974. dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
  1975. amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
  1976. goto failed;
  1977. }
  1978. } else {
  1979. /* Initialize clocks */
  1980. r = amdgpu_atombios_get_clock_info(adev);
  1981. if (r) {
  1982. dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
  1983. amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
  1984. goto failed;
  1985. }
  1986. /* init i2c buses */
  1987. amdgpu_atombios_i2c_init(adev);
  1988. }
  1989. /* Fence driver */
  1990. r = amdgpu_fence_driver_init(adev);
  1991. if (r) {
  1992. dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
  1993. amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
  1994. goto failed;
  1995. }
  1996. /* init the mode config */
  1997. drm_mode_config_init(adev->ddev);
  1998. r = amdgpu_init(adev);
  1999. if (r) {
  2000. dev_err(adev->dev, "amdgpu_init failed\n");
  2001. amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
  2002. amdgpu_fini(adev);
  2003. goto failed;
  2004. }
  2005. adev->accel_working = true;
  2006. amdgpu_vm_check_compute_bug(adev);
  2007. /* Initialize the buffer migration limit. */
  2008. if (amdgpu_moverate >= 0)
  2009. max_MBps = amdgpu_moverate;
  2010. else
  2011. max_MBps = 8; /* Allow 8 MB/s. */
  2012. /* Get a log2 for easy divisions. */
  2013. adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
  2014. r = amdgpu_ib_pool_init(adev);
  2015. if (r) {
  2016. dev_err(adev->dev, "IB initialization failed (%d).\n", r);
  2017. amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
  2018. goto failed;
  2019. }
  2020. r = amdgpu_ib_ring_tests(adev);
  2021. if (r)
  2022. DRM_ERROR("ib ring test failed (%d).\n", r);
  2023. amdgpu_fbdev_init(adev);
  2024. r = amdgpu_gem_debugfs_init(adev);
  2025. if (r)
  2026. DRM_ERROR("registering gem debugfs failed (%d).\n", r);
  2027. r = amdgpu_debugfs_regs_init(adev);
  2028. if (r)
  2029. DRM_ERROR("registering register debugfs failed (%d).\n", r);
  2030. r = amdgpu_debugfs_test_ib_ring_init(adev);
  2031. if (r)
  2032. DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
  2033. r = amdgpu_debugfs_firmware_init(adev);
  2034. if (r)
  2035. DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
  2036. if ((amdgpu_testing & 1)) {
  2037. if (adev->accel_working)
  2038. amdgpu_test_moves(adev);
  2039. else
  2040. DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
  2041. }
  2042. if (amdgpu_benchmarking) {
  2043. if (adev->accel_working)
  2044. amdgpu_benchmark(adev, amdgpu_benchmarking);
  2045. else
  2046. DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
  2047. }
  2048. /* enable clockgating, etc. after ib tests, etc. since some blocks require
  2049. * explicit gating rather than handling it automatically.
  2050. */
  2051. r = amdgpu_late_init(adev);
  2052. if (r) {
  2053. dev_err(adev->dev, "amdgpu_late_init failed\n");
  2054. amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
  2055. goto failed;
  2056. }
  2057. return 0;
  2058. failed:
  2059. amdgpu_vf_error_trans_all(adev);
  2060. if (runtime)
  2061. vga_switcheroo_fini_domain_pm_ops(adev->dev);
  2062. return r;
  2063. }
  2064. /**
  2065. * amdgpu_device_fini - tear down the driver
  2066. *
  2067. * @adev: amdgpu_device pointer
  2068. *
  2069. * Tear down the driver info (all asics).
  2070. * Called at driver shutdown.
  2071. */
  2072. void amdgpu_device_fini(struct amdgpu_device *adev)
  2073. {
  2074. int r;
  2075. DRM_INFO("amdgpu: finishing device.\n");
  2076. adev->shutdown = true;
  2077. if (adev->mode_info.mode_config_initialized)
  2078. drm_crtc_force_disable_all(adev->ddev);
  2079. /* evict vram memory */
  2080. amdgpu_bo_evict_vram(adev);
  2081. amdgpu_ib_pool_fini(adev);
  2082. amdgpu_fence_driver_fini(adev);
  2083. amdgpu_fbdev_fini(adev);
  2084. r = amdgpu_fini(adev);
  2085. if (adev->firmware.gpu_info_fw) {
  2086. release_firmware(adev->firmware.gpu_info_fw);
  2087. adev->firmware.gpu_info_fw = NULL;
  2088. }
  2089. adev->accel_working = false;
  2090. cancel_delayed_work_sync(&adev->late_init_work);
  2091. /* free i2c buses */
  2092. amdgpu_i2c_fini(adev);
  2093. amdgpu_atombios_fini(adev);
  2094. kfree(adev->bios);
  2095. adev->bios = NULL;
  2096. if (!pci_is_thunderbolt_attached(adev->pdev))
  2097. vga_switcheroo_unregister_client(adev->pdev);
  2098. if (adev->flags & AMD_IS_PX)
  2099. vga_switcheroo_fini_domain_pm_ops(adev->dev);
  2100. vga_client_register(adev->pdev, NULL, NULL, NULL);
  2101. if (adev->rio_mem)
  2102. pci_iounmap(adev->pdev, adev->rio_mem);
  2103. adev->rio_mem = NULL;
  2104. iounmap(adev->rmmio);
  2105. adev->rmmio = NULL;
  2106. if (adev->asic_type >= CHIP_BONAIRE)
  2107. amdgpu_doorbell_fini(adev);
  2108. amdgpu_debugfs_regs_cleanup(adev);
  2109. }
  2110. /*
  2111. * Suspend & resume.
  2112. */
  2113. /**
  2114. * amdgpu_device_suspend - initiate device suspend
  2115. *
  2116. * @pdev: drm dev pointer
  2117. * @state: suspend state
  2118. *
  2119. * Puts the hw in the suspend state (all asics).
  2120. * Returns 0 for success or an error on failure.
  2121. * Called at driver suspend.
  2122. */
  2123. int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
  2124. {
  2125. struct amdgpu_device *adev;
  2126. struct drm_crtc *crtc;
  2127. struct drm_connector *connector;
  2128. int r;
  2129. if (dev == NULL || dev->dev_private == NULL) {
  2130. return -ENODEV;
  2131. }
  2132. adev = dev->dev_private;
  2133. if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  2134. return 0;
  2135. drm_kms_helper_poll_disable(dev);
  2136. /* turn off display hw */
  2137. drm_modeset_lock_all(dev);
  2138. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  2139. drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
  2140. }
  2141. drm_modeset_unlock_all(dev);
  2142. amdgpu_amdkfd_suspend(adev);
  2143. /* unpin the front buffers and cursors */
  2144. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  2145. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2146. struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
  2147. struct amdgpu_bo *robj;
  2148. if (amdgpu_crtc->cursor_bo) {
  2149. struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
  2150. r = amdgpu_bo_reserve(aobj, true);
  2151. if (r == 0) {
  2152. amdgpu_bo_unpin(aobj);
  2153. amdgpu_bo_unreserve(aobj);
  2154. }
  2155. }
  2156. if (rfb == NULL || rfb->obj == NULL) {
  2157. continue;
  2158. }
  2159. robj = gem_to_amdgpu_bo(rfb->obj);
  2160. /* don't unpin kernel fb objects */
  2161. if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
  2162. r = amdgpu_bo_reserve(robj, true);
  2163. if (r == 0) {
  2164. amdgpu_bo_unpin(robj);
  2165. amdgpu_bo_unreserve(robj);
  2166. }
  2167. }
  2168. }
  2169. /* evict vram memory */
  2170. amdgpu_bo_evict_vram(adev);
  2171. amdgpu_fence_driver_suspend(adev);
  2172. r = amdgpu_suspend(adev);
  2173. /* evict remaining vram memory
  2174. * This second call to evict vram is to evict the gart page table
  2175. * using the CPU.
  2176. */
  2177. amdgpu_bo_evict_vram(adev);
  2178. amdgpu_atombios_scratch_regs_save(adev);
  2179. pci_save_state(dev->pdev);
  2180. if (suspend) {
  2181. /* Shut down the device */
  2182. pci_disable_device(dev->pdev);
  2183. pci_set_power_state(dev->pdev, PCI_D3hot);
  2184. } else {
  2185. r = amdgpu_asic_reset(adev);
  2186. if (r)
  2187. DRM_ERROR("amdgpu asic reset failed\n");
  2188. }
  2189. if (fbcon) {
  2190. console_lock();
  2191. amdgpu_fbdev_set_suspend(adev, 1);
  2192. console_unlock();
  2193. }
  2194. return 0;
  2195. }
  2196. /**
  2197. * amdgpu_device_resume - initiate device resume
  2198. *
  2199. * @pdev: drm dev pointer
  2200. *
  2201. * Bring the hw back to operating state (all asics).
  2202. * Returns 0 for success or an error on failure.
  2203. * Called at driver resume.
  2204. */
  2205. int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
  2206. {
  2207. struct drm_connector *connector;
  2208. struct amdgpu_device *adev = dev->dev_private;
  2209. struct drm_crtc *crtc;
  2210. int r = 0;
  2211. if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
  2212. return 0;
  2213. if (fbcon)
  2214. console_lock();
  2215. if (resume) {
  2216. pci_set_power_state(dev->pdev, PCI_D0);
  2217. pci_restore_state(dev->pdev);
  2218. r = pci_enable_device(dev->pdev);
  2219. if (r)
  2220. goto unlock;
  2221. }
  2222. amdgpu_atombios_scratch_regs_restore(adev);
  2223. /* post card */
  2224. if (amdgpu_need_post(adev)) {
  2225. r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
  2226. if (r)
  2227. DRM_ERROR("amdgpu asic init failed\n");
  2228. }
  2229. r = amdgpu_resume(adev);
  2230. if (r) {
  2231. DRM_ERROR("amdgpu_resume failed (%d).\n", r);
  2232. goto unlock;
  2233. }
  2234. amdgpu_fence_driver_resume(adev);
  2235. if (resume) {
  2236. r = amdgpu_ib_ring_tests(adev);
  2237. if (r)
  2238. DRM_ERROR("ib ring test failed (%d).\n", r);
  2239. }
  2240. r = amdgpu_late_init(adev);
  2241. if (r)
  2242. goto unlock;
  2243. /* pin cursors */
  2244. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  2245. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2246. if (amdgpu_crtc->cursor_bo) {
  2247. struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
  2248. r = amdgpu_bo_reserve(aobj, true);
  2249. if (r == 0) {
  2250. r = amdgpu_bo_pin(aobj,
  2251. AMDGPU_GEM_DOMAIN_VRAM,
  2252. &amdgpu_crtc->cursor_addr);
  2253. if (r != 0)
  2254. DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
  2255. amdgpu_bo_unreserve(aobj);
  2256. }
  2257. }
  2258. }
  2259. r = amdgpu_amdkfd_resume(adev);
  2260. if (r)
  2261. return r;
  2262. /* blat the mode back in */
  2263. if (fbcon) {
  2264. drm_helper_resume_force_mode(dev);
  2265. /* turn on display hw */
  2266. drm_modeset_lock_all(dev);
  2267. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  2268. drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
  2269. }
  2270. drm_modeset_unlock_all(dev);
  2271. }
  2272. drm_kms_helper_poll_enable(dev);
  2273. /*
  2274. * Most of the connector probing functions try to acquire runtime pm
  2275. * refs to ensure that the GPU is powered on when connector polling is
  2276. * performed. Since we're calling this from a runtime PM callback,
  2277. * trying to acquire rpm refs will cause us to deadlock.
  2278. *
  2279. * Since we're guaranteed to be holding the rpm lock, it's safe to
  2280. * temporarily disable the rpm helpers so this doesn't deadlock us.
  2281. */
  2282. #ifdef CONFIG_PM
  2283. dev->dev->power.disable_depth++;
  2284. #endif
  2285. drm_helper_hpd_irq_event(dev);
  2286. #ifdef CONFIG_PM
  2287. dev->dev->power.disable_depth--;
  2288. #endif
  2289. if (fbcon)
  2290. amdgpu_fbdev_set_suspend(adev, 0);
  2291. unlock:
  2292. if (fbcon)
  2293. console_unlock();
  2294. return r;
  2295. }
  2296. static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
  2297. {
  2298. int i;
  2299. bool asic_hang = false;
  2300. for (i = 0; i < adev->num_ip_blocks; i++) {
  2301. if (!adev->ip_blocks[i].status.valid)
  2302. continue;
  2303. if (adev->ip_blocks[i].version->funcs->check_soft_reset)
  2304. adev->ip_blocks[i].status.hang =
  2305. adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
  2306. if (adev->ip_blocks[i].status.hang) {
  2307. DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
  2308. asic_hang = true;
  2309. }
  2310. }
  2311. return asic_hang;
  2312. }
  2313. static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
  2314. {
  2315. int i, r = 0;
  2316. for (i = 0; i < adev->num_ip_blocks; i++) {
  2317. if (!adev->ip_blocks[i].status.valid)
  2318. continue;
  2319. if (adev->ip_blocks[i].status.hang &&
  2320. adev->ip_blocks[i].version->funcs->pre_soft_reset) {
  2321. r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
  2322. if (r)
  2323. return r;
  2324. }
  2325. }
  2326. return 0;
  2327. }
  2328. static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
  2329. {
  2330. int i;
  2331. for (i = 0; i < adev->num_ip_blocks; i++) {
  2332. if (!adev->ip_blocks[i].status.valid)
  2333. continue;
  2334. if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
  2335. (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
  2336. (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
  2337. (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
  2338. if (adev->ip_blocks[i].status.hang) {
  2339. DRM_INFO("Some block need full reset!\n");
  2340. return true;
  2341. }
  2342. }
  2343. }
  2344. return false;
  2345. }
  2346. static int amdgpu_soft_reset(struct amdgpu_device *adev)
  2347. {
  2348. int i, r = 0;
  2349. for (i = 0; i < adev->num_ip_blocks; i++) {
  2350. if (!adev->ip_blocks[i].status.valid)
  2351. continue;
  2352. if (adev->ip_blocks[i].status.hang &&
  2353. adev->ip_blocks[i].version->funcs->soft_reset) {
  2354. r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
  2355. if (r)
  2356. return r;
  2357. }
  2358. }
  2359. return 0;
  2360. }
  2361. static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
  2362. {
  2363. int i, r = 0;
  2364. for (i = 0; i < adev->num_ip_blocks; i++) {
  2365. if (!adev->ip_blocks[i].status.valid)
  2366. continue;
  2367. if (adev->ip_blocks[i].status.hang &&
  2368. adev->ip_blocks[i].version->funcs->post_soft_reset)
  2369. r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
  2370. if (r)
  2371. return r;
  2372. }
  2373. return 0;
  2374. }
  2375. bool amdgpu_need_backup(struct amdgpu_device *adev)
  2376. {
  2377. if (adev->flags & AMD_IS_APU)
  2378. return false;
  2379. return amdgpu_lockup_timeout > 0 ? true : false;
  2380. }
  2381. static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
  2382. struct amdgpu_ring *ring,
  2383. struct amdgpu_bo *bo,
  2384. struct dma_fence **fence)
  2385. {
  2386. uint32_t domain;
  2387. int r;
  2388. if (!bo->shadow)
  2389. return 0;
  2390. r = amdgpu_bo_reserve(bo, true);
  2391. if (r)
  2392. return r;
  2393. domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
  2394. /* if bo has been evicted, then no need to recover */
  2395. if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
  2396. r = amdgpu_bo_validate(bo->shadow);
  2397. if (r) {
  2398. DRM_ERROR("bo validate failed!\n");
  2399. goto err;
  2400. }
  2401. r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
  2402. if (r) {
  2403. DRM_ERROR("%p bind failed\n", bo->shadow);
  2404. goto err;
  2405. }
  2406. r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
  2407. NULL, fence, true);
  2408. if (r) {
  2409. DRM_ERROR("recover page table failed!\n");
  2410. goto err;
  2411. }
  2412. }
  2413. err:
  2414. amdgpu_bo_unreserve(bo);
  2415. return r;
  2416. }
  2417. /**
  2418. * amdgpu_sriov_gpu_reset - reset the asic
  2419. *
  2420. * @adev: amdgpu device pointer
  2421. * @job: which job trigger hang
  2422. *
  2423. * Attempt the reset the GPU if it has hung (all asics).
  2424. * for SRIOV case.
  2425. * Returns 0 for success or an error on failure.
  2426. */
  2427. int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
  2428. {
  2429. int i, j, r = 0;
  2430. int resched;
  2431. struct amdgpu_bo *bo, *tmp;
  2432. struct amdgpu_ring *ring;
  2433. struct dma_fence *fence = NULL, *next = NULL;
  2434. mutex_lock(&adev->virt.lock_reset);
  2435. atomic_inc(&adev->gpu_reset_counter);
  2436. adev->gfx.in_reset = true;
  2437. /* block TTM */
  2438. resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
  2439. /* we start from the ring trigger GPU hang */
  2440. j = job ? job->ring->idx : 0;
  2441. /* block scheduler */
  2442. for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
  2443. ring = adev->rings[i % AMDGPU_MAX_RINGS];
  2444. if (!ring || !ring->sched.thread)
  2445. continue;
  2446. kthread_park(ring->sched.thread);
  2447. if (job && j != i)
  2448. continue;
  2449. /* here give the last chance to check if job removed from mirror-list
  2450. * since we already pay some time on kthread_park */
  2451. if (job && list_empty(&job->base.node)) {
  2452. kthread_unpark(ring->sched.thread);
  2453. goto give_up_reset;
  2454. }
  2455. if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
  2456. amd_sched_job_kickout(&job->base);
  2457. /* only do job_reset on the hang ring if @job not NULL */
  2458. amd_sched_hw_job_reset(&ring->sched);
  2459. /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
  2460. amdgpu_fence_driver_force_completion_ring(ring);
  2461. }
  2462. /* request to take full control of GPU before re-initialization */
  2463. if (job)
  2464. amdgpu_virt_reset_gpu(adev);
  2465. else
  2466. amdgpu_virt_request_full_gpu(adev, true);
  2467. /* Resume IP prior to SMC */
  2468. amdgpu_sriov_reinit_early(adev);
  2469. /* we need recover gart prior to run SMC/CP/SDMA resume */
  2470. amdgpu_ttm_recover_gart(adev);
  2471. /* now we are okay to resume SMC/CP/SDMA */
  2472. amdgpu_sriov_reinit_late(adev);
  2473. amdgpu_irq_gpu_reset_resume_helper(adev);
  2474. if (amdgpu_ib_ring_tests(adev))
  2475. dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
  2476. /* release full control of GPU after ib test */
  2477. amdgpu_virt_release_full_gpu(adev, true);
  2478. DRM_INFO("recover vram bo from shadow\n");
  2479. ring = adev->mman.buffer_funcs_ring;
  2480. mutex_lock(&adev->shadow_list_lock);
  2481. list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
  2482. next = NULL;
  2483. amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
  2484. if (fence) {
  2485. r = dma_fence_wait(fence, false);
  2486. if (r) {
  2487. WARN(r, "recovery from shadow isn't completed\n");
  2488. break;
  2489. }
  2490. }
  2491. dma_fence_put(fence);
  2492. fence = next;
  2493. }
  2494. mutex_unlock(&adev->shadow_list_lock);
  2495. if (fence) {
  2496. r = dma_fence_wait(fence, false);
  2497. if (r)
  2498. WARN(r, "recovery from shadow isn't completed\n");
  2499. }
  2500. dma_fence_put(fence);
  2501. for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
  2502. ring = adev->rings[i % AMDGPU_MAX_RINGS];
  2503. if (!ring || !ring->sched.thread)
  2504. continue;
  2505. if (job && j != i) {
  2506. kthread_unpark(ring->sched.thread);
  2507. continue;
  2508. }
  2509. amd_sched_job_recovery(&ring->sched);
  2510. kthread_unpark(ring->sched.thread);
  2511. }
  2512. drm_helper_resume_force_mode(adev->ddev);
  2513. give_up_reset:
  2514. ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
  2515. if (r) {
  2516. /* bad news, how to tell it to userspace ? */
  2517. dev_info(adev->dev, "GPU reset failed\n");
  2518. } else {
  2519. dev_info(adev->dev, "GPU reset successed!\n");
  2520. }
  2521. adev->gfx.in_reset = false;
  2522. mutex_unlock(&adev->virt.lock_reset);
  2523. return r;
  2524. }
  2525. /**
  2526. * amdgpu_gpu_reset - reset the asic
  2527. *
  2528. * @adev: amdgpu device pointer
  2529. *
  2530. * Attempt the reset the GPU if it has hung (all asics).
  2531. * Returns 0 for success or an error on failure.
  2532. */
  2533. int amdgpu_gpu_reset(struct amdgpu_device *adev)
  2534. {
  2535. int i, r;
  2536. int resched;
  2537. bool need_full_reset, vram_lost = false;
  2538. if (!amdgpu_check_soft_reset(adev)) {
  2539. DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
  2540. return 0;
  2541. }
  2542. atomic_inc(&adev->gpu_reset_counter);
  2543. /* block TTM */
  2544. resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
  2545. /* block scheduler */
  2546. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  2547. struct amdgpu_ring *ring = adev->rings[i];
  2548. if (!ring || !ring->sched.thread)
  2549. continue;
  2550. kthread_park(ring->sched.thread);
  2551. amd_sched_hw_job_reset(&ring->sched);
  2552. }
  2553. /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
  2554. amdgpu_fence_driver_force_completion(adev);
  2555. need_full_reset = amdgpu_need_full_reset(adev);
  2556. if (!need_full_reset) {
  2557. amdgpu_pre_soft_reset(adev);
  2558. r = amdgpu_soft_reset(adev);
  2559. amdgpu_post_soft_reset(adev);
  2560. if (r || amdgpu_check_soft_reset(adev)) {
  2561. DRM_INFO("soft reset failed, will fallback to full reset!\n");
  2562. need_full_reset = true;
  2563. }
  2564. }
  2565. if (need_full_reset) {
  2566. r = amdgpu_suspend(adev);
  2567. retry:
  2568. amdgpu_atombios_scratch_regs_save(adev);
  2569. r = amdgpu_asic_reset(adev);
  2570. amdgpu_atombios_scratch_regs_restore(adev);
  2571. /* post card */
  2572. amdgpu_atom_asic_init(adev->mode_info.atom_context);
  2573. if (!r) {
  2574. dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
  2575. r = amdgpu_resume_phase1(adev);
  2576. if (r)
  2577. goto out;
  2578. vram_lost = amdgpu_check_vram_lost(adev);
  2579. if (vram_lost) {
  2580. DRM_ERROR("VRAM is lost!\n");
  2581. atomic_inc(&adev->vram_lost_counter);
  2582. }
  2583. r = amdgpu_ttm_recover_gart(adev);
  2584. if (r)
  2585. goto out;
  2586. r = amdgpu_resume_phase2(adev);
  2587. if (r)
  2588. goto out;
  2589. if (vram_lost)
  2590. amdgpu_fill_reset_magic(adev);
  2591. }
  2592. }
  2593. out:
  2594. if (!r) {
  2595. amdgpu_irq_gpu_reset_resume_helper(adev);
  2596. r = amdgpu_ib_ring_tests(adev);
  2597. if (r) {
  2598. dev_err(adev->dev, "ib ring test failed (%d).\n", r);
  2599. r = amdgpu_suspend(adev);
  2600. need_full_reset = true;
  2601. goto retry;
  2602. }
  2603. /**
  2604. * recovery vm page tables, since we cannot depend on VRAM is
  2605. * consistent after gpu full reset.
  2606. */
  2607. if (need_full_reset && amdgpu_need_backup(adev)) {
  2608. struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
  2609. struct amdgpu_bo *bo, *tmp;
  2610. struct dma_fence *fence = NULL, *next = NULL;
  2611. DRM_INFO("recover vram bo from shadow\n");
  2612. mutex_lock(&adev->shadow_list_lock);
  2613. list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
  2614. next = NULL;
  2615. amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
  2616. if (fence) {
  2617. r = dma_fence_wait(fence, false);
  2618. if (r) {
  2619. WARN(r, "recovery from shadow isn't completed\n");
  2620. break;
  2621. }
  2622. }
  2623. dma_fence_put(fence);
  2624. fence = next;
  2625. }
  2626. mutex_unlock(&adev->shadow_list_lock);
  2627. if (fence) {
  2628. r = dma_fence_wait(fence, false);
  2629. if (r)
  2630. WARN(r, "recovery from shadow isn't completed\n");
  2631. }
  2632. dma_fence_put(fence);
  2633. }
  2634. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  2635. struct amdgpu_ring *ring = adev->rings[i];
  2636. if (!ring || !ring->sched.thread)
  2637. continue;
  2638. amd_sched_job_recovery(&ring->sched);
  2639. kthread_unpark(ring->sched.thread);
  2640. }
  2641. } else {
  2642. dev_err(adev->dev, "asic resume failed (%d).\n", r);
  2643. amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
  2644. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  2645. if (adev->rings[i] && adev->rings[i]->sched.thread) {
  2646. kthread_unpark(adev->rings[i]->sched.thread);
  2647. }
  2648. }
  2649. }
  2650. drm_helper_resume_force_mode(adev->ddev);
  2651. ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
  2652. if (r) {
  2653. /* bad news, how to tell it to userspace ? */
  2654. dev_info(adev->dev, "GPU reset failed\n");
  2655. amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
  2656. }
  2657. else {
  2658. dev_info(adev->dev, "GPU reset successed!\n");
  2659. }
  2660. amdgpu_vf_error_trans_all(adev);
  2661. return r;
  2662. }
  2663. void amdgpu_get_pcie_info(struct amdgpu_device *adev)
  2664. {
  2665. u32 mask;
  2666. int ret;
  2667. if (amdgpu_pcie_gen_cap)
  2668. adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
  2669. if (amdgpu_pcie_lane_cap)
  2670. adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
  2671. /* covers APUs as well */
  2672. if (pci_is_root_bus(adev->pdev->bus)) {
  2673. if (adev->pm.pcie_gen_mask == 0)
  2674. adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
  2675. if (adev->pm.pcie_mlw_mask == 0)
  2676. adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
  2677. return;
  2678. }
  2679. if (adev->pm.pcie_gen_mask == 0) {
  2680. ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
  2681. if (!ret) {
  2682. adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
  2683. CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  2684. CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
  2685. if (mask & DRM_PCIE_SPEED_25)
  2686. adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
  2687. if (mask & DRM_PCIE_SPEED_50)
  2688. adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
  2689. if (mask & DRM_PCIE_SPEED_80)
  2690. adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
  2691. } else {
  2692. adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
  2693. }
  2694. }
  2695. if (adev->pm.pcie_mlw_mask == 0) {
  2696. ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
  2697. if (!ret) {
  2698. switch (mask) {
  2699. case 32:
  2700. adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
  2701. CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
  2702. CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
  2703. CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
  2704. CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
  2705. CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
  2706. CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
  2707. break;
  2708. case 16:
  2709. adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
  2710. CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
  2711. CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
  2712. CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
  2713. CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
  2714. CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
  2715. break;
  2716. case 12:
  2717. adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
  2718. CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
  2719. CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
  2720. CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
  2721. CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
  2722. break;
  2723. case 8:
  2724. adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
  2725. CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
  2726. CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
  2727. CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
  2728. break;
  2729. case 4:
  2730. adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
  2731. CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
  2732. CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
  2733. break;
  2734. case 2:
  2735. adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
  2736. CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
  2737. break;
  2738. case 1:
  2739. adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
  2740. break;
  2741. default:
  2742. break;
  2743. }
  2744. } else {
  2745. adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
  2746. }
  2747. }
  2748. }
  2749. /*
  2750. * Debugfs
  2751. */
  2752. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  2753. const struct drm_info_list *files,
  2754. unsigned nfiles)
  2755. {
  2756. unsigned i;
  2757. for (i = 0; i < adev->debugfs_count; i++) {
  2758. if (adev->debugfs[i].files == files) {
  2759. /* Already registered */
  2760. return 0;
  2761. }
  2762. }
  2763. i = adev->debugfs_count + 1;
  2764. if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
  2765. DRM_ERROR("Reached maximum number of debugfs components.\n");
  2766. DRM_ERROR("Report so we increase "
  2767. "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
  2768. return -EINVAL;
  2769. }
  2770. adev->debugfs[adev->debugfs_count].files = files;
  2771. adev->debugfs[adev->debugfs_count].num_files = nfiles;
  2772. adev->debugfs_count = i;
  2773. #if defined(CONFIG_DEBUG_FS)
  2774. drm_debugfs_create_files(files, nfiles,
  2775. adev->ddev->primary->debugfs_root,
  2776. adev->ddev->primary);
  2777. #endif
  2778. return 0;
  2779. }
  2780. #if defined(CONFIG_DEBUG_FS)
  2781. static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
  2782. size_t size, loff_t *pos)
  2783. {
  2784. struct amdgpu_device *adev = file_inode(f)->i_private;
  2785. ssize_t result = 0;
  2786. int r;
  2787. bool pm_pg_lock, use_bank;
  2788. unsigned instance_bank, sh_bank, se_bank;
  2789. if (size & 0x3 || *pos & 0x3)
  2790. return -EINVAL;
  2791. /* are we reading registers for which a PG lock is necessary? */
  2792. pm_pg_lock = (*pos >> 23) & 1;
  2793. if (*pos & (1ULL << 62)) {
  2794. se_bank = (*pos >> 24) & 0x3FF;
  2795. sh_bank = (*pos >> 34) & 0x3FF;
  2796. instance_bank = (*pos >> 44) & 0x3FF;
  2797. if (se_bank == 0x3FF)
  2798. se_bank = 0xFFFFFFFF;
  2799. if (sh_bank == 0x3FF)
  2800. sh_bank = 0xFFFFFFFF;
  2801. if (instance_bank == 0x3FF)
  2802. instance_bank = 0xFFFFFFFF;
  2803. use_bank = 1;
  2804. } else {
  2805. use_bank = 0;
  2806. }
  2807. *pos &= (1UL << 22) - 1;
  2808. if (use_bank) {
  2809. if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
  2810. (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
  2811. return -EINVAL;
  2812. mutex_lock(&adev->grbm_idx_mutex);
  2813. amdgpu_gfx_select_se_sh(adev, se_bank,
  2814. sh_bank, instance_bank);
  2815. }
  2816. if (pm_pg_lock)
  2817. mutex_lock(&adev->pm.mutex);
  2818. while (size) {
  2819. uint32_t value;
  2820. if (*pos > adev->rmmio_size)
  2821. goto end;
  2822. value = RREG32(*pos >> 2);
  2823. r = put_user(value, (uint32_t *)buf);
  2824. if (r) {
  2825. result = r;
  2826. goto end;
  2827. }
  2828. result += 4;
  2829. buf += 4;
  2830. *pos += 4;
  2831. size -= 4;
  2832. }
  2833. end:
  2834. if (use_bank) {
  2835. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  2836. mutex_unlock(&adev->grbm_idx_mutex);
  2837. }
  2838. if (pm_pg_lock)
  2839. mutex_unlock(&adev->pm.mutex);
  2840. return result;
  2841. }
  2842. static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
  2843. size_t size, loff_t *pos)
  2844. {
  2845. struct amdgpu_device *adev = file_inode(f)->i_private;
  2846. ssize_t result = 0;
  2847. int r;
  2848. bool pm_pg_lock, use_bank;
  2849. unsigned instance_bank, sh_bank, se_bank;
  2850. if (size & 0x3 || *pos & 0x3)
  2851. return -EINVAL;
  2852. /* are we reading registers for which a PG lock is necessary? */
  2853. pm_pg_lock = (*pos >> 23) & 1;
  2854. if (*pos & (1ULL << 62)) {
  2855. se_bank = (*pos >> 24) & 0x3FF;
  2856. sh_bank = (*pos >> 34) & 0x3FF;
  2857. instance_bank = (*pos >> 44) & 0x3FF;
  2858. if (se_bank == 0x3FF)
  2859. se_bank = 0xFFFFFFFF;
  2860. if (sh_bank == 0x3FF)
  2861. sh_bank = 0xFFFFFFFF;
  2862. if (instance_bank == 0x3FF)
  2863. instance_bank = 0xFFFFFFFF;
  2864. use_bank = 1;
  2865. } else {
  2866. use_bank = 0;
  2867. }
  2868. *pos &= (1UL << 22) - 1;
  2869. if (use_bank) {
  2870. if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
  2871. (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
  2872. return -EINVAL;
  2873. mutex_lock(&adev->grbm_idx_mutex);
  2874. amdgpu_gfx_select_se_sh(adev, se_bank,
  2875. sh_bank, instance_bank);
  2876. }
  2877. if (pm_pg_lock)
  2878. mutex_lock(&adev->pm.mutex);
  2879. while (size) {
  2880. uint32_t value;
  2881. if (*pos > adev->rmmio_size)
  2882. return result;
  2883. r = get_user(value, (uint32_t *)buf);
  2884. if (r)
  2885. return r;
  2886. WREG32(*pos >> 2, value);
  2887. result += 4;
  2888. buf += 4;
  2889. *pos += 4;
  2890. size -= 4;
  2891. }
  2892. if (use_bank) {
  2893. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  2894. mutex_unlock(&adev->grbm_idx_mutex);
  2895. }
  2896. if (pm_pg_lock)
  2897. mutex_unlock(&adev->pm.mutex);
  2898. return result;
  2899. }
  2900. static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
  2901. size_t size, loff_t *pos)
  2902. {
  2903. struct amdgpu_device *adev = file_inode(f)->i_private;
  2904. ssize_t result = 0;
  2905. int r;
  2906. if (size & 0x3 || *pos & 0x3)
  2907. return -EINVAL;
  2908. while (size) {
  2909. uint32_t value;
  2910. value = RREG32_PCIE(*pos >> 2);
  2911. r = put_user(value, (uint32_t *)buf);
  2912. if (r)
  2913. return r;
  2914. result += 4;
  2915. buf += 4;
  2916. *pos += 4;
  2917. size -= 4;
  2918. }
  2919. return result;
  2920. }
  2921. static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
  2922. size_t size, loff_t *pos)
  2923. {
  2924. struct amdgpu_device *adev = file_inode(f)->i_private;
  2925. ssize_t result = 0;
  2926. int r;
  2927. if (size & 0x3 || *pos & 0x3)
  2928. return -EINVAL;
  2929. while (size) {
  2930. uint32_t value;
  2931. r = get_user(value, (uint32_t *)buf);
  2932. if (r)
  2933. return r;
  2934. WREG32_PCIE(*pos >> 2, value);
  2935. result += 4;
  2936. buf += 4;
  2937. *pos += 4;
  2938. size -= 4;
  2939. }
  2940. return result;
  2941. }
  2942. static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
  2943. size_t size, loff_t *pos)
  2944. {
  2945. struct amdgpu_device *adev = file_inode(f)->i_private;
  2946. ssize_t result = 0;
  2947. int r;
  2948. if (size & 0x3 || *pos & 0x3)
  2949. return -EINVAL;
  2950. while (size) {
  2951. uint32_t value;
  2952. value = RREG32_DIDT(*pos >> 2);
  2953. r = put_user(value, (uint32_t *)buf);
  2954. if (r)
  2955. return r;
  2956. result += 4;
  2957. buf += 4;
  2958. *pos += 4;
  2959. size -= 4;
  2960. }
  2961. return result;
  2962. }
  2963. static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
  2964. size_t size, loff_t *pos)
  2965. {
  2966. struct amdgpu_device *adev = file_inode(f)->i_private;
  2967. ssize_t result = 0;
  2968. int r;
  2969. if (size & 0x3 || *pos & 0x3)
  2970. return -EINVAL;
  2971. while (size) {
  2972. uint32_t value;
  2973. r = get_user(value, (uint32_t *)buf);
  2974. if (r)
  2975. return r;
  2976. WREG32_DIDT(*pos >> 2, value);
  2977. result += 4;
  2978. buf += 4;
  2979. *pos += 4;
  2980. size -= 4;
  2981. }
  2982. return result;
  2983. }
  2984. static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
  2985. size_t size, loff_t *pos)
  2986. {
  2987. struct amdgpu_device *adev = file_inode(f)->i_private;
  2988. ssize_t result = 0;
  2989. int r;
  2990. if (size & 0x3 || *pos & 0x3)
  2991. return -EINVAL;
  2992. while (size) {
  2993. uint32_t value;
  2994. value = RREG32_SMC(*pos);
  2995. r = put_user(value, (uint32_t *)buf);
  2996. if (r)
  2997. return r;
  2998. result += 4;
  2999. buf += 4;
  3000. *pos += 4;
  3001. size -= 4;
  3002. }
  3003. return result;
  3004. }
  3005. static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
  3006. size_t size, loff_t *pos)
  3007. {
  3008. struct amdgpu_device *adev = file_inode(f)->i_private;
  3009. ssize_t result = 0;
  3010. int r;
  3011. if (size & 0x3 || *pos & 0x3)
  3012. return -EINVAL;
  3013. while (size) {
  3014. uint32_t value;
  3015. r = get_user(value, (uint32_t *)buf);
  3016. if (r)
  3017. return r;
  3018. WREG32_SMC(*pos, value);
  3019. result += 4;
  3020. buf += 4;
  3021. *pos += 4;
  3022. size -= 4;
  3023. }
  3024. return result;
  3025. }
  3026. static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
  3027. size_t size, loff_t *pos)
  3028. {
  3029. struct amdgpu_device *adev = file_inode(f)->i_private;
  3030. ssize_t result = 0;
  3031. int r;
  3032. uint32_t *config, no_regs = 0;
  3033. if (size & 0x3 || *pos & 0x3)
  3034. return -EINVAL;
  3035. config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
  3036. if (!config)
  3037. return -ENOMEM;
  3038. /* version, increment each time something is added */
  3039. config[no_regs++] = 3;
  3040. config[no_regs++] = adev->gfx.config.max_shader_engines;
  3041. config[no_regs++] = adev->gfx.config.max_tile_pipes;
  3042. config[no_regs++] = adev->gfx.config.max_cu_per_sh;
  3043. config[no_regs++] = adev->gfx.config.max_sh_per_se;
  3044. config[no_regs++] = adev->gfx.config.max_backends_per_se;
  3045. config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
  3046. config[no_regs++] = adev->gfx.config.max_gprs;
  3047. config[no_regs++] = adev->gfx.config.max_gs_threads;
  3048. config[no_regs++] = adev->gfx.config.max_hw_contexts;
  3049. config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
  3050. config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
  3051. config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
  3052. config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
  3053. config[no_regs++] = adev->gfx.config.num_tile_pipes;
  3054. config[no_regs++] = adev->gfx.config.backend_enable_mask;
  3055. config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
  3056. config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
  3057. config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
  3058. config[no_regs++] = adev->gfx.config.num_gpus;
  3059. config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
  3060. config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
  3061. config[no_regs++] = adev->gfx.config.gb_addr_config;
  3062. config[no_regs++] = adev->gfx.config.num_rbs;
  3063. /* rev==1 */
  3064. config[no_regs++] = adev->rev_id;
  3065. config[no_regs++] = adev->pg_flags;
  3066. config[no_regs++] = adev->cg_flags;
  3067. /* rev==2 */
  3068. config[no_regs++] = adev->family;
  3069. config[no_regs++] = adev->external_rev_id;
  3070. /* rev==3 */
  3071. config[no_regs++] = adev->pdev->device;
  3072. config[no_regs++] = adev->pdev->revision;
  3073. config[no_regs++] = adev->pdev->subsystem_device;
  3074. config[no_regs++] = adev->pdev->subsystem_vendor;
  3075. while (size && (*pos < no_regs * 4)) {
  3076. uint32_t value;
  3077. value = config[*pos >> 2];
  3078. r = put_user(value, (uint32_t *)buf);
  3079. if (r) {
  3080. kfree(config);
  3081. return r;
  3082. }
  3083. result += 4;
  3084. buf += 4;
  3085. *pos += 4;
  3086. size -= 4;
  3087. }
  3088. kfree(config);
  3089. return result;
  3090. }
  3091. static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
  3092. size_t size, loff_t *pos)
  3093. {
  3094. struct amdgpu_device *adev = file_inode(f)->i_private;
  3095. int idx, x, outsize, r, valuesize;
  3096. uint32_t values[16];
  3097. if (size & 3 || *pos & 0x3)
  3098. return -EINVAL;
  3099. if (amdgpu_dpm == 0)
  3100. return -EINVAL;
  3101. /* convert offset to sensor number */
  3102. idx = *pos >> 2;
  3103. valuesize = sizeof(values);
  3104. if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
  3105. r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
  3106. else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
  3107. r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
  3108. &valuesize);
  3109. else
  3110. return -EINVAL;
  3111. if (size > valuesize)
  3112. return -EINVAL;
  3113. outsize = 0;
  3114. x = 0;
  3115. if (!r) {
  3116. while (size) {
  3117. r = put_user(values[x++], (int32_t *)buf);
  3118. buf += 4;
  3119. size -= 4;
  3120. outsize += 4;
  3121. }
  3122. }
  3123. return !r ? outsize : r;
  3124. }
  3125. static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
  3126. size_t size, loff_t *pos)
  3127. {
  3128. struct amdgpu_device *adev = f->f_inode->i_private;
  3129. int r, x;
  3130. ssize_t result=0;
  3131. uint32_t offset, se, sh, cu, wave, simd, data[32];
  3132. if (size & 3 || *pos & 3)
  3133. return -EINVAL;
  3134. /* decode offset */
  3135. offset = (*pos & 0x7F);
  3136. se = ((*pos >> 7) & 0xFF);
  3137. sh = ((*pos >> 15) & 0xFF);
  3138. cu = ((*pos >> 23) & 0xFF);
  3139. wave = ((*pos >> 31) & 0xFF);
  3140. simd = ((*pos >> 37) & 0xFF);
  3141. /* switch to the specific se/sh/cu */
  3142. mutex_lock(&adev->grbm_idx_mutex);
  3143. amdgpu_gfx_select_se_sh(adev, se, sh, cu);
  3144. x = 0;
  3145. if (adev->gfx.funcs->read_wave_data)
  3146. adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
  3147. amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
  3148. mutex_unlock(&adev->grbm_idx_mutex);
  3149. if (!x)
  3150. return -EINVAL;
  3151. while (size && (offset < x * 4)) {
  3152. uint32_t value;
  3153. value = data[offset >> 2];
  3154. r = put_user(value, (uint32_t *)buf);
  3155. if (r)
  3156. return r;
  3157. result += 4;
  3158. buf += 4;
  3159. offset += 4;
  3160. size -= 4;
  3161. }
  3162. return result;
  3163. }
  3164. static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
  3165. size_t size, loff_t *pos)
  3166. {
  3167. struct amdgpu_device *adev = f->f_inode->i_private;
  3168. int r;
  3169. ssize_t result = 0;
  3170. uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
  3171. if (size & 3 || *pos & 3)
  3172. return -EINVAL;
  3173. /* decode offset */
  3174. offset = (*pos & 0xFFF); /* in dwords */
  3175. se = ((*pos >> 12) & 0xFF);
  3176. sh = ((*pos >> 20) & 0xFF);
  3177. cu = ((*pos >> 28) & 0xFF);
  3178. wave = ((*pos >> 36) & 0xFF);
  3179. simd = ((*pos >> 44) & 0xFF);
  3180. thread = ((*pos >> 52) & 0xFF);
  3181. bank = ((*pos >> 60) & 1);
  3182. data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
  3183. if (!data)
  3184. return -ENOMEM;
  3185. /* switch to the specific se/sh/cu */
  3186. mutex_lock(&adev->grbm_idx_mutex);
  3187. amdgpu_gfx_select_se_sh(adev, se, sh, cu);
  3188. if (bank == 0) {
  3189. if (adev->gfx.funcs->read_wave_vgprs)
  3190. adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
  3191. } else {
  3192. if (adev->gfx.funcs->read_wave_sgprs)
  3193. adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
  3194. }
  3195. amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
  3196. mutex_unlock(&adev->grbm_idx_mutex);
  3197. while (size) {
  3198. uint32_t value;
  3199. value = data[offset++];
  3200. r = put_user(value, (uint32_t *)buf);
  3201. if (r) {
  3202. result = r;
  3203. goto err;
  3204. }
  3205. result += 4;
  3206. buf += 4;
  3207. size -= 4;
  3208. }
  3209. err:
  3210. kfree(data);
  3211. return result;
  3212. }
  3213. static const struct file_operations amdgpu_debugfs_regs_fops = {
  3214. .owner = THIS_MODULE,
  3215. .read = amdgpu_debugfs_regs_read,
  3216. .write = amdgpu_debugfs_regs_write,
  3217. .llseek = default_llseek
  3218. };
  3219. static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
  3220. .owner = THIS_MODULE,
  3221. .read = amdgpu_debugfs_regs_didt_read,
  3222. .write = amdgpu_debugfs_regs_didt_write,
  3223. .llseek = default_llseek
  3224. };
  3225. static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
  3226. .owner = THIS_MODULE,
  3227. .read = amdgpu_debugfs_regs_pcie_read,
  3228. .write = amdgpu_debugfs_regs_pcie_write,
  3229. .llseek = default_llseek
  3230. };
  3231. static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
  3232. .owner = THIS_MODULE,
  3233. .read = amdgpu_debugfs_regs_smc_read,
  3234. .write = amdgpu_debugfs_regs_smc_write,
  3235. .llseek = default_llseek
  3236. };
  3237. static const struct file_operations amdgpu_debugfs_gca_config_fops = {
  3238. .owner = THIS_MODULE,
  3239. .read = amdgpu_debugfs_gca_config_read,
  3240. .llseek = default_llseek
  3241. };
  3242. static const struct file_operations amdgpu_debugfs_sensors_fops = {
  3243. .owner = THIS_MODULE,
  3244. .read = amdgpu_debugfs_sensor_read,
  3245. .llseek = default_llseek
  3246. };
  3247. static const struct file_operations amdgpu_debugfs_wave_fops = {
  3248. .owner = THIS_MODULE,
  3249. .read = amdgpu_debugfs_wave_read,
  3250. .llseek = default_llseek
  3251. };
  3252. static const struct file_operations amdgpu_debugfs_gpr_fops = {
  3253. .owner = THIS_MODULE,
  3254. .read = amdgpu_debugfs_gpr_read,
  3255. .llseek = default_llseek
  3256. };
  3257. static const struct file_operations *debugfs_regs[] = {
  3258. &amdgpu_debugfs_regs_fops,
  3259. &amdgpu_debugfs_regs_didt_fops,
  3260. &amdgpu_debugfs_regs_pcie_fops,
  3261. &amdgpu_debugfs_regs_smc_fops,
  3262. &amdgpu_debugfs_gca_config_fops,
  3263. &amdgpu_debugfs_sensors_fops,
  3264. &amdgpu_debugfs_wave_fops,
  3265. &amdgpu_debugfs_gpr_fops,
  3266. };
  3267. static const char *debugfs_regs_names[] = {
  3268. "amdgpu_regs",
  3269. "amdgpu_regs_didt",
  3270. "amdgpu_regs_pcie",
  3271. "amdgpu_regs_smc",
  3272. "amdgpu_gca_config",
  3273. "amdgpu_sensors",
  3274. "amdgpu_wave",
  3275. "amdgpu_gpr",
  3276. };
  3277. static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
  3278. {
  3279. struct drm_minor *minor = adev->ddev->primary;
  3280. struct dentry *ent, *root = minor->debugfs_root;
  3281. unsigned i, j;
  3282. for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
  3283. ent = debugfs_create_file(debugfs_regs_names[i],
  3284. S_IFREG | S_IRUGO, root,
  3285. adev, debugfs_regs[i]);
  3286. if (IS_ERR(ent)) {
  3287. for (j = 0; j < i; j++) {
  3288. debugfs_remove(adev->debugfs_regs[i]);
  3289. adev->debugfs_regs[i] = NULL;
  3290. }
  3291. return PTR_ERR(ent);
  3292. }
  3293. if (!i)
  3294. i_size_write(ent->d_inode, adev->rmmio_size);
  3295. adev->debugfs_regs[i] = ent;
  3296. }
  3297. return 0;
  3298. }
  3299. static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
  3300. {
  3301. unsigned i;
  3302. for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
  3303. if (adev->debugfs_regs[i]) {
  3304. debugfs_remove(adev->debugfs_regs[i]);
  3305. adev->debugfs_regs[i] = NULL;
  3306. }
  3307. }
  3308. }
  3309. static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
  3310. {
  3311. struct drm_info_node *node = (struct drm_info_node *) m->private;
  3312. struct drm_device *dev = node->minor->dev;
  3313. struct amdgpu_device *adev = dev->dev_private;
  3314. int r = 0, i;
  3315. /* hold on the scheduler */
  3316. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  3317. struct amdgpu_ring *ring = adev->rings[i];
  3318. if (!ring || !ring->sched.thread)
  3319. continue;
  3320. kthread_park(ring->sched.thread);
  3321. }
  3322. seq_printf(m, "run ib test:\n");
  3323. r = amdgpu_ib_ring_tests(adev);
  3324. if (r)
  3325. seq_printf(m, "ib ring tests failed (%d).\n", r);
  3326. else
  3327. seq_printf(m, "ib ring tests passed.\n");
  3328. /* go on the scheduler */
  3329. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  3330. struct amdgpu_ring *ring = adev->rings[i];
  3331. if (!ring || !ring->sched.thread)
  3332. continue;
  3333. kthread_unpark(ring->sched.thread);
  3334. }
  3335. return 0;
  3336. }
  3337. static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
  3338. {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
  3339. };
  3340. static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
  3341. {
  3342. return amdgpu_debugfs_add_files(adev,
  3343. amdgpu_debugfs_test_ib_ring_list, 1);
  3344. }
  3345. int amdgpu_debugfs_init(struct drm_minor *minor)
  3346. {
  3347. return 0;
  3348. }
  3349. #else
  3350. static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
  3351. {
  3352. return 0;
  3353. }
  3354. static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
  3355. {
  3356. return 0;
  3357. }
  3358. static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
  3359. #endif