gfx_v8_0.c 229 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "amdgpu_gfx.h"
  27. #include "vi.h"
  28. #include "vid.h"
  29. #include "amdgpu_ucode.h"
  30. #include "amdgpu_atombios.h"
  31. #include "atombios_i2c.h"
  32. #include "clearstate_vi.h"
  33. #include "gmc/gmc_8_2_d.h"
  34. #include "gmc/gmc_8_2_sh_mask.h"
  35. #include "oss/oss_3_0_d.h"
  36. #include "oss/oss_3_0_sh_mask.h"
  37. #include "bif/bif_5_0_d.h"
  38. #include "bif/bif_5_0_sh_mask.h"
  39. #include "gca/gfx_8_0_d.h"
  40. #include "gca/gfx_8_0_enum.h"
  41. #include "gca/gfx_8_0_sh_mask.h"
  42. #include "gca/gfx_8_0_enum.h"
  43. #include "dce/dce_10_0_d.h"
  44. #include "dce/dce_10_0_sh_mask.h"
  45. #include "smu/smu_7_1_3_d.h"
  46. #define GFX8_NUM_GFX_RINGS 1
  47. #define GFX8_NUM_COMPUTE_RINGS 8
  48. #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
  49. #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
  50. #define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
  51. #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
  52. #define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
  53. #define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
  54. #define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
  55. #define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
  56. #define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
  57. #define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
  58. #define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
  59. #define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
  60. #define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
  61. #define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK 0x00000001L
  62. #define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK 0x00000002L
  63. #define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK 0x00000004L
  64. #define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK 0x00000008L
  65. #define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK 0x00000010L
  66. #define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK 0x00000020L
  67. /* BPM SERDES CMD */
  68. #define SET_BPM_SERDES_CMD 1
  69. #define CLE_BPM_SERDES_CMD 0
  70. /* BPM Register Address*/
  71. enum {
  72. BPM_REG_CGLS_EN = 0, /* Enable/Disable CGLS */
  73. BPM_REG_CGLS_ON, /* ON/OFF CGLS: shall be controlled by RLC FW */
  74. BPM_REG_CGCG_OVERRIDE, /* Set/Clear CGCG Override */
  75. BPM_REG_MGCG_OVERRIDE, /* Set/Clear MGCG Override */
  76. BPM_REG_FGCG_OVERRIDE, /* Set/Clear FGCG Override */
  77. BPM_REG_FGCG_MAX
  78. };
  79. #define RLC_FormatDirectRegListLength 14
  80. MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
  81. MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
  82. MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
  83. MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
  84. MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
  85. MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
  86. MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
  87. MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
  88. MODULE_FIRMWARE("amdgpu/stoney_me.bin");
  89. MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
  90. MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
  91. MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
  92. MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
  93. MODULE_FIRMWARE("amdgpu/tonga_me.bin");
  94. MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
  95. MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
  96. MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
  97. MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
  98. MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
  99. MODULE_FIRMWARE("amdgpu/topaz_me.bin");
  100. MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
  101. MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
  102. MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
  103. MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
  104. MODULE_FIRMWARE("amdgpu/fiji_me.bin");
  105. MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
  106. MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
  107. MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
  108. MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
  109. MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
  110. MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
  111. MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
  112. MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
  113. MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
  114. MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
  115. MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
  116. MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
  117. MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
  118. MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
  119. MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
  120. static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
  121. {
  122. {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
  123. {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
  124. {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
  125. {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
  126. {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
  127. {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
  128. {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
  129. {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
  130. {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
  131. {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
  132. {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
  133. {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
  134. {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
  135. {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
  136. {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
  137. {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
  138. };
  139. static const u32 golden_settings_tonga_a11[] =
  140. {
  141. mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
  142. mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
  143. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  144. mmGB_GPU_ID, 0x0000000f, 0x00000000,
  145. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  146. mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
  147. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  148. mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
  149. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  150. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  151. mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
  152. mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
  153. mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
  154. mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
  155. mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
  156. };
  157. static const u32 tonga_golden_common_all[] =
  158. {
  159. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  160. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
  161. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
  162. mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
  163. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  164. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  165. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  166. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
  167. };
  168. static const u32 tonga_mgcg_cgcg_init[] =
  169. {
  170. mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
  171. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  172. mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  173. mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
  174. mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
  175. mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
  176. mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
  177. mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
  178. mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
  179. mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
  180. mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
  181. mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
  182. mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
  183. mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
  184. mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
  185. mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
  186. mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
  187. mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
  188. mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
  189. mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
  190. mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
  191. mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
  192. mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
  193. mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
  194. mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
  195. mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
  196. mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
  197. mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  198. mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  199. mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
  200. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  201. mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  202. mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  203. mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
  204. mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  205. mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  206. mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  207. mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  208. mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
  209. mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  210. mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  211. mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  212. mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  213. mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
  214. mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  215. mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  216. mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  217. mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  218. mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
  219. mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  220. mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  221. mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  222. mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  223. mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
  224. mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  225. mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  226. mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  227. mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  228. mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
  229. mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  230. mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  231. mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  232. mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  233. mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
  234. mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  235. mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  236. mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  237. mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  238. mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
  239. mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  240. mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  241. mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
  242. mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
  243. mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
  244. mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
  245. };
  246. static const u32 golden_settings_polaris11_a11[] =
  247. {
  248. mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
  249. mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
  250. mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
  251. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  252. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  253. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  254. mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
  255. mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
  256. mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
  257. mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
  258. mmSQ_CONFIG, 0x07f80000, 0x01180000,
  259. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  260. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  261. mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
  262. mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
  263. mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
  264. mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
  265. };
  266. static const u32 polaris11_golden_common_all[] =
  267. {
  268. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  269. mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
  270. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  271. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  272. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  273. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
  274. };
  275. static const u32 golden_settings_polaris10_a11[] =
  276. {
  277. mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
  278. mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
  279. mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
  280. mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
  281. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  282. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  283. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  284. mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
  285. mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
  286. mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
  287. mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
  288. mmSQ_CONFIG, 0x07f80000, 0x07180000,
  289. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  290. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  291. mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
  292. mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
  293. mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
  294. };
  295. static const u32 polaris10_golden_common_all[] =
  296. {
  297. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  298. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
  299. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
  300. mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
  301. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  302. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  303. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  304. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
  305. };
  306. static const u32 fiji_golden_common_all[] =
  307. {
  308. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  309. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
  310. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
  311. mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
  312. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  313. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  314. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  315. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
  316. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  317. mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
  318. };
  319. static const u32 golden_settings_fiji_a10[] =
  320. {
  321. mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
  322. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  323. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  324. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  325. mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
  326. mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
  327. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  328. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  329. mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
  330. mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
  331. mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
  332. };
  333. static const u32 fiji_mgcg_cgcg_init[] =
  334. {
  335. mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
  336. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  337. mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  338. mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
  339. mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
  340. mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
  341. mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
  342. mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
  343. mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
  344. mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
  345. mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
  346. mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
  347. mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
  348. mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
  349. mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
  350. mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
  351. mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
  352. mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
  353. mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
  354. mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
  355. mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
  356. mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
  357. mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
  358. mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
  359. mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
  360. mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
  361. mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
  362. mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  363. mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  364. mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
  365. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  366. mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
  367. mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
  368. mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
  369. mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
  370. };
  371. static const u32 golden_settings_iceland_a11[] =
  372. {
  373. mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
  374. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  375. mmDB_DEBUG3, 0xc0000000, 0xc0000000,
  376. mmGB_GPU_ID, 0x0000000f, 0x00000000,
  377. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  378. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  379. mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
  380. mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
  381. mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
  382. mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
  383. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  384. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  385. mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
  386. mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
  387. mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
  388. mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
  389. };
  390. static const u32 iceland_golden_common_all[] =
  391. {
  392. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  393. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
  394. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
  395. mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
  396. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  397. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  398. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  399. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
  400. };
  401. static const u32 iceland_mgcg_cgcg_init[] =
  402. {
  403. mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
  404. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  405. mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  406. mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
  407. mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
  408. mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
  409. mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
  410. mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
  411. mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
  412. mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
  413. mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
  414. mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
  415. mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
  416. mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
  417. mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
  418. mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
  419. mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
  420. mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
  421. mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
  422. mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
  423. mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
  424. mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
  425. mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
  426. mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
  427. mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
  428. mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
  429. mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
  430. mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  431. mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  432. mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
  433. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  434. mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  435. mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  436. mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
  437. mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  438. mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  439. mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  440. mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  441. mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
  442. mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  443. mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  444. mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  445. mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  446. mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
  447. mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  448. mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  449. mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  450. mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  451. mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
  452. mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  453. mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  454. mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  455. mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  456. mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
  457. mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  458. mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  459. mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  460. mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  461. mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
  462. mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  463. mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  464. mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
  465. mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
  466. mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
  467. };
  468. static const u32 cz_golden_settings_a11[] =
  469. {
  470. mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
  471. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  472. mmGB_GPU_ID, 0x0000000f, 0x00000000,
  473. mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
  474. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  475. mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
  476. mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
  477. mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
  478. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  479. mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
  480. mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
  481. mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
  482. };
  483. static const u32 cz_golden_common_all[] =
  484. {
  485. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  486. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
  487. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
  488. mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
  489. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  490. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  491. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  492. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
  493. };
  494. static const u32 cz_mgcg_cgcg_init[] =
  495. {
  496. mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
  497. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  498. mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  499. mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
  500. mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
  501. mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
  502. mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
  503. mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
  504. mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
  505. mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
  506. mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
  507. mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
  508. mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
  509. mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
  510. mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
  511. mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
  512. mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
  513. mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
  514. mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
  515. mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
  516. mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
  517. mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
  518. mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
  519. mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
  520. mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
  521. mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
  522. mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
  523. mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  524. mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  525. mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
  526. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  527. mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  528. mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  529. mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
  530. mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  531. mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  532. mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  533. mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  534. mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
  535. mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  536. mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  537. mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  538. mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  539. mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
  540. mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  541. mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  542. mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  543. mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  544. mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
  545. mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  546. mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  547. mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  548. mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  549. mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
  550. mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  551. mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  552. mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  553. mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  554. mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
  555. mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  556. mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  557. mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  558. mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  559. mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
  560. mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  561. mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  562. mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  563. mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  564. mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
  565. mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  566. mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  567. mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
  568. mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
  569. mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
  570. mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
  571. };
  572. static const u32 stoney_golden_settings_a11[] =
  573. {
  574. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  575. mmGB_GPU_ID, 0x0000000f, 0x00000000,
  576. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  577. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  578. mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
  579. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  580. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  581. mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
  582. mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
  583. mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
  584. };
  585. static const u32 stoney_golden_common_all[] =
  586. {
  587. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  588. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
  589. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
  590. mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
  591. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  592. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  593. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  594. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
  595. };
  596. static const u32 stoney_mgcg_cgcg_init[] =
  597. {
  598. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  599. mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
  600. mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
  601. mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
  602. mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
  603. mmATC_MISC_CG, 0xffffffff, 0x000c0200,
  604. };
  605. static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
  606. static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
  607. static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
  608. static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
  609. static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
  610. static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
  611. static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
  612. {
  613. switch (adev->asic_type) {
  614. case CHIP_TOPAZ:
  615. amdgpu_program_register_sequence(adev,
  616. iceland_mgcg_cgcg_init,
  617. (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
  618. amdgpu_program_register_sequence(adev,
  619. golden_settings_iceland_a11,
  620. (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
  621. amdgpu_program_register_sequence(adev,
  622. iceland_golden_common_all,
  623. (const u32)ARRAY_SIZE(iceland_golden_common_all));
  624. break;
  625. case CHIP_FIJI:
  626. amdgpu_program_register_sequence(adev,
  627. fiji_mgcg_cgcg_init,
  628. (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
  629. amdgpu_program_register_sequence(adev,
  630. golden_settings_fiji_a10,
  631. (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
  632. amdgpu_program_register_sequence(adev,
  633. fiji_golden_common_all,
  634. (const u32)ARRAY_SIZE(fiji_golden_common_all));
  635. break;
  636. case CHIP_TONGA:
  637. amdgpu_program_register_sequence(adev,
  638. tonga_mgcg_cgcg_init,
  639. (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
  640. amdgpu_program_register_sequence(adev,
  641. golden_settings_tonga_a11,
  642. (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
  643. amdgpu_program_register_sequence(adev,
  644. tonga_golden_common_all,
  645. (const u32)ARRAY_SIZE(tonga_golden_common_all));
  646. break;
  647. case CHIP_POLARIS11:
  648. amdgpu_program_register_sequence(adev,
  649. golden_settings_polaris11_a11,
  650. (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
  651. amdgpu_program_register_sequence(adev,
  652. polaris11_golden_common_all,
  653. (const u32)ARRAY_SIZE(polaris11_golden_common_all));
  654. break;
  655. case CHIP_POLARIS10:
  656. amdgpu_program_register_sequence(adev,
  657. golden_settings_polaris10_a11,
  658. (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
  659. amdgpu_program_register_sequence(adev,
  660. polaris10_golden_common_all,
  661. (const u32)ARRAY_SIZE(polaris10_golden_common_all));
  662. WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
  663. if (adev->pdev->revision == 0xc7 &&
  664. ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
  665. (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
  666. (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
  667. amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
  668. amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
  669. }
  670. break;
  671. case CHIP_CARRIZO:
  672. amdgpu_program_register_sequence(adev,
  673. cz_mgcg_cgcg_init,
  674. (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
  675. amdgpu_program_register_sequence(adev,
  676. cz_golden_settings_a11,
  677. (const u32)ARRAY_SIZE(cz_golden_settings_a11));
  678. amdgpu_program_register_sequence(adev,
  679. cz_golden_common_all,
  680. (const u32)ARRAY_SIZE(cz_golden_common_all));
  681. break;
  682. case CHIP_STONEY:
  683. amdgpu_program_register_sequence(adev,
  684. stoney_mgcg_cgcg_init,
  685. (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
  686. amdgpu_program_register_sequence(adev,
  687. stoney_golden_settings_a11,
  688. (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
  689. amdgpu_program_register_sequence(adev,
  690. stoney_golden_common_all,
  691. (const u32)ARRAY_SIZE(stoney_golden_common_all));
  692. break;
  693. default:
  694. break;
  695. }
  696. }
  697. static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
  698. {
  699. int i;
  700. adev->gfx.scratch.num_reg = 7;
  701. adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
  702. for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
  703. adev->gfx.scratch.free[i] = true;
  704. adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
  705. }
  706. }
  707. static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
  708. {
  709. struct amdgpu_device *adev = ring->adev;
  710. uint32_t scratch;
  711. uint32_t tmp = 0;
  712. unsigned i;
  713. int r;
  714. r = amdgpu_gfx_scratch_get(adev, &scratch);
  715. if (r) {
  716. DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
  717. return r;
  718. }
  719. WREG32(scratch, 0xCAFEDEAD);
  720. r = amdgpu_ring_alloc(ring, 3);
  721. if (r) {
  722. DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
  723. ring->idx, r);
  724. amdgpu_gfx_scratch_free(adev, scratch);
  725. return r;
  726. }
  727. amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  728. amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
  729. amdgpu_ring_write(ring, 0xDEADBEEF);
  730. amdgpu_ring_commit(ring);
  731. for (i = 0; i < adev->usec_timeout; i++) {
  732. tmp = RREG32(scratch);
  733. if (tmp == 0xDEADBEEF)
  734. break;
  735. DRM_UDELAY(1);
  736. }
  737. if (i < adev->usec_timeout) {
  738. DRM_INFO("ring test on %d succeeded in %d usecs\n",
  739. ring->idx, i);
  740. } else {
  741. DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
  742. ring->idx, scratch, tmp);
  743. r = -EINVAL;
  744. }
  745. amdgpu_gfx_scratch_free(adev, scratch);
  746. return r;
  747. }
  748. static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
  749. {
  750. struct amdgpu_device *adev = ring->adev;
  751. struct amdgpu_ib ib;
  752. struct fence *f = NULL;
  753. uint32_t scratch;
  754. uint32_t tmp = 0;
  755. long r;
  756. r = amdgpu_gfx_scratch_get(adev, &scratch);
  757. if (r) {
  758. DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
  759. return r;
  760. }
  761. WREG32(scratch, 0xCAFEDEAD);
  762. memset(&ib, 0, sizeof(ib));
  763. r = amdgpu_ib_get(adev, NULL, 256, &ib);
  764. if (r) {
  765. DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
  766. goto err1;
  767. }
  768. ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
  769. ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
  770. ib.ptr[2] = 0xDEADBEEF;
  771. ib.length_dw = 3;
  772. r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
  773. if (r)
  774. goto err2;
  775. r = fence_wait_timeout(f, false, timeout);
  776. if (r == 0) {
  777. DRM_ERROR("amdgpu: IB test timed out.\n");
  778. r = -ETIMEDOUT;
  779. goto err2;
  780. } else if (r < 0) {
  781. DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
  782. goto err2;
  783. }
  784. tmp = RREG32(scratch);
  785. if (tmp == 0xDEADBEEF) {
  786. DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
  787. r = 0;
  788. } else {
  789. DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
  790. scratch, tmp);
  791. r = -EINVAL;
  792. }
  793. err2:
  794. amdgpu_ib_free(adev, &ib, NULL);
  795. fence_put(f);
  796. err1:
  797. amdgpu_gfx_scratch_free(adev, scratch);
  798. return r;
  799. }
  800. static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
  801. release_firmware(adev->gfx.pfp_fw);
  802. adev->gfx.pfp_fw = NULL;
  803. release_firmware(adev->gfx.me_fw);
  804. adev->gfx.me_fw = NULL;
  805. release_firmware(adev->gfx.ce_fw);
  806. adev->gfx.ce_fw = NULL;
  807. release_firmware(adev->gfx.rlc_fw);
  808. adev->gfx.rlc_fw = NULL;
  809. release_firmware(adev->gfx.mec_fw);
  810. adev->gfx.mec_fw = NULL;
  811. if ((adev->asic_type != CHIP_STONEY) &&
  812. (adev->asic_type != CHIP_TOPAZ))
  813. release_firmware(adev->gfx.mec2_fw);
  814. adev->gfx.mec2_fw = NULL;
  815. kfree(adev->gfx.rlc.register_list_format);
  816. }
  817. static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
  818. {
  819. const char *chip_name;
  820. char fw_name[30];
  821. int err;
  822. struct amdgpu_firmware_info *info = NULL;
  823. const struct common_firmware_header *header = NULL;
  824. const struct gfx_firmware_header_v1_0 *cp_hdr;
  825. const struct rlc_firmware_header_v2_0 *rlc_hdr;
  826. unsigned int *tmp = NULL, i;
  827. DRM_DEBUG("\n");
  828. switch (adev->asic_type) {
  829. case CHIP_TOPAZ:
  830. chip_name = "topaz";
  831. break;
  832. case CHIP_TONGA:
  833. chip_name = "tonga";
  834. break;
  835. case CHIP_CARRIZO:
  836. chip_name = "carrizo";
  837. break;
  838. case CHIP_FIJI:
  839. chip_name = "fiji";
  840. break;
  841. case CHIP_POLARIS11:
  842. chip_name = "polaris11";
  843. break;
  844. case CHIP_POLARIS10:
  845. chip_name = "polaris10";
  846. break;
  847. case CHIP_STONEY:
  848. chip_name = "stoney";
  849. break;
  850. default:
  851. BUG();
  852. }
  853. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
  854. err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
  855. if (err)
  856. goto out;
  857. err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
  858. if (err)
  859. goto out;
  860. cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
  861. adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
  862. adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
  863. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
  864. err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
  865. if (err)
  866. goto out;
  867. err = amdgpu_ucode_validate(adev->gfx.me_fw);
  868. if (err)
  869. goto out;
  870. cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
  871. adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
  872. adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
  873. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
  874. err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
  875. if (err)
  876. goto out;
  877. err = amdgpu_ucode_validate(adev->gfx.ce_fw);
  878. if (err)
  879. goto out;
  880. cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
  881. adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
  882. adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
  883. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
  884. err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
  885. if (err)
  886. goto out;
  887. err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
  888. rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
  889. adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
  890. adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
  891. adev->gfx.rlc.save_and_restore_offset =
  892. le32_to_cpu(rlc_hdr->save_and_restore_offset);
  893. adev->gfx.rlc.clear_state_descriptor_offset =
  894. le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
  895. adev->gfx.rlc.avail_scratch_ram_locations =
  896. le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
  897. adev->gfx.rlc.reg_restore_list_size =
  898. le32_to_cpu(rlc_hdr->reg_restore_list_size);
  899. adev->gfx.rlc.reg_list_format_start =
  900. le32_to_cpu(rlc_hdr->reg_list_format_start);
  901. adev->gfx.rlc.reg_list_format_separate_start =
  902. le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
  903. adev->gfx.rlc.starting_offsets_start =
  904. le32_to_cpu(rlc_hdr->starting_offsets_start);
  905. adev->gfx.rlc.reg_list_format_size_bytes =
  906. le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
  907. adev->gfx.rlc.reg_list_size_bytes =
  908. le32_to_cpu(rlc_hdr->reg_list_size_bytes);
  909. adev->gfx.rlc.register_list_format =
  910. kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
  911. adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
  912. if (!adev->gfx.rlc.register_list_format) {
  913. err = -ENOMEM;
  914. goto out;
  915. }
  916. tmp = (unsigned int *)((uintptr_t)rlc_hdr +
  917. le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
  918. for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
  919. adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
  920. adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
  921. tmp = (unsigned int *)((uintptr_t)rlc_hdr +
  922. le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
  923. for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
  924. adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
  925. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
  926. err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
  927. if (err)
  928. goto out;
  929. err = amdgpu_ucode_validate(adev->gfx.mec_fw);
  930. if (err)
  931. goto out;
  932. cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
  933. adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
  934. adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
  935. if ((adev->asic_type != CHIP_STONEY) &&
  936. (adev->asic_type != CHIP_TOPAZ)) {
  937. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
  938. err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
  939. if (!err) {
  940. err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
  941. if (err)
  942. goto out;
  943. cp_hdr = (const struct gfx_firmware_header_v1_0 *)
  944. adev->gfx.mec2_fw->data;
  945. adev->gfx.mec2_fw_version =
  946. le32_to_cpu(cp_hdr->header.ucode_version);
  947. adev->gfx.mec2_feature_version =
  948. le32_to_cpu(cp_hdr->ucode_feature_version);
  949. } else {
  950. err = 0;
  951. adev->gfx.mec2_fw = NULL;
  952. }
  953. }
  954. if (adev->firmware.smu_load) {
  955. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
  956. info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
  957. info->fw = adev->gfx.pfp_fw;
  958. header = (const struct common_firmware_header *)info->fw->data;
  959. adev->firmware.fw_size +=
  960. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  961. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
  962. info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
  963. info->fw = adev->gfx.me_fw;
  964. header = (const struct common_firmware_header *)info->fw->data;
  965. adev->firmware.fw_size +=
  966. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  967. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
  968. info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
  969. info->fw = adev->gfx.ce_fw;
  970. header = (const struct common_firmware_header *)info->fw->data;
  971. adev->firmware.fw_size +=
  972. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  973. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
  974. info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
  975. info->fw = adev->gfx.rlc_fw;
  976. header = (const struct common_firmware_header *)info->fw->data;
  977. adev->firmware.fw_size +=
  978. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  979. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
  980. info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
  981. info->fw = adev->gfx.mec_fw;
  982. header = (const struct common_firmware_header *)info->fw->data;
  983. adev->firmware.fw_size +=
  984. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  985. /* we need account JT in */
  986. cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
  987. adev->firmware.fw_size +=
  988. ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
  989. if (amdgpu_sriov_vf(adev)) {
  990. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
  991. info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
  992. info->fw = adev->gfx.mec_fw;
  993. adev->firmware.fw_size +=
  994. ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
  995. }
  996. if (adev->gfx.mec2_fw) {
  997. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
  998. info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
  999. info->fw = adev->gfx.mec2_fw;
  1000. header = (const struct common_firmware_header *)info->fw->data;
  1001. adev->firmware.fw_size +=
  1002. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  1003. }
  1004. }
  1005. out:
  1006. if (err) {
  1007. dev_err(adev->dev,
  1008. "gfx8: Failed to load firmware \"%s\"\n",
  1009. fw_name);
  1010. release_firmware(adev->gfx.pfp_fw);
  1011. adev->gfx.pfp_fw = NULL;
  1012. release_firmware(adev->gfx.me_fw);
  1013. adev->gfx.me_fw = NULL;
  1014. release_firmware(adev->gfx.ce_fw);
  1015. adev->gfx.ce_fw = NULL;
  1016. release_firmware(adev->gfx.rlc_fw);
  1017. adev->gfx.rlc_fw = NULL;
  1018. release_firmware(adev->gfx.mec_fw);
  1019. adev->gfx.mec_fw = NULL;
  1020. release_firmware(adev->gfx.mec2_fw);
  1021. adev->gfx.mec2_fw = NULL;
  1022. }
  1023. return err;
  1024. }
  1025. static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
  1026. volatile u32 *buffer)
  1027. {
  1028. u32 count = 0, i;
  1029. const struct cs_section_def *sect = NULL;
  1030. const struct cs_extent_def *ext = NULL;
  1031. if (adev->gfx.rlc.cs_data == NULL)
  1032. return;
  1033. if (buffer == NULL)
  1034. return;
  1035. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  1036. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  1037. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  1038. buffer[count++] = cpu_to_le32(0x80000000);
  1039. buffer[count++] = cpu_to_le32(0x80000000);
  1040. for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
  1041. for (ext = sect->section; ext->extent != NULL; ++ext) {
  1042. if (sect->id == SECT_CONTEXT) {
  1043. buffer[count++] =
  1044. cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
  1045. buffer[count++] = cpu_to_le32(ext->reg_index -
  1046. PACKET3_SET_CONTEXT_REG_START);
  1047. for (i = 0; i < ext->reg_count; i++)
  1048. buffer[count++] = cpu_to_le32(ext->extent[i]);
  1049. } else {
  1050. return;
  1051. }
  1052. }
  1053. }
  1054. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  1055. buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
  1056. PACKET3_SET_CONTEXT_REG_START);
  1057. buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
  1058. buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
  1059. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  1060. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
  1061. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
  1062. buffer[count++] = cpu_to_le32(0);
  1063. }
  1064. static void cz_init_cp_jump_table(struct amdgpu_device *adev)
  1065. {
  1066. const __le32 *fw_data;
  1067. volatile u32 *dst_ptr;
  1068. int me, i, max_me = 4;
  1069. u32 bo_offset = 0;
  1070. u32 table_offset, table_size;
  1071. if (adev->asic_type == CHIP_CARRIZO)
  1072. max_me = 5;
  1073. /* write the cp table buffer */
  1074. dst_ptr = adev->gfx.rlc.cp_table_ptr;
  1075. for (me = 0; me < max_me; me++) {
  1076. if (me == 0) {
  1077. const struct gfx_firmware_header_v1_0 *hdr =
  1078. (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
  1079. fw_data = (const __le32 *)
  1080. (adev->gfx.ce_fw->data +
  1081. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  1082. table_offset = le32_to_cpu(hdr->jt_offset);
  1083. table_size = le32_to_cpu(hdr->jt_size);
  1084. } else if (me == 1) {
  1085. const struct gfx_firmware_header_v1_0 *hdr =
  1086. (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
  1087. fw_data = (const __le32 *)
  1088. (adev->gfx.pfp_fw->data +
  1089. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  1090. table_offset = le32_to_cpu(hdr->jt_offset);
  1091. table_size = le32_to_cpu(hdr->jt_size);
  1092. } else if (me == 2) {
  1093. const struct gfx_firmware_header_v1_0 *hdr =
  1094. (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
  1095. fw_data = (const __le32 *)
  1096. (adev->gfx.me_fw->data +
  1097. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  1098. table_offset = le32_to_cpu(hdr->jt_offset);
  1099. table_size = le32_to_cpu(hdr->jt_size);
  1100. } else if (me == 3) {
  1101. const struct gfx_firmware_header_v1_0 *hdr =
  1102. (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
  1103. fw_data = (const __le32 *)
  1104. (adev->gfx.mec_fw->data +
  1105. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  1106. table_offset = le32_to_cpu(hdr->jt_offset);
  1107. table_size = le32_to_cpu(hdr->jt_size);
  1108. } else if (me == 4) {
  1109. const struct gfx_firmware_header_v1_0 *hdr =
  1110. (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
  1111. fw_data = (const __le32 *)
  1112. (adev->gfx.mec2_fw->data +
  1113. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  1114. table_offset = le32_to_cpu(hdr->jt_offset);
  1115. table_size = le32_to_cpu(hdr->jt_size);
  1116. }
  1117. for (i = 0; i < table_size; i ++) {
  1118. dst_ptr[bo_offset + i] =
  1119. cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
  1120. }
  1121. bo_offset += table_size;
  1122. }
  1123. }
  1124. static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
  1125. {
  1126. int r;
  1127. /* clear state block */
  1128. if (adev->gfx.rlc.clear_state_obj) {
  1129. r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
  1130. if (unlikely(r != 0))
  1131. dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
  1132. amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
  1133. amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
  1134. amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
  1135. adev->gfx.rlc.clear_state_obj = NULL;
  1136. }
  1137. /* jump table block */
  1138. if (adev->gfx.rlc.cp_table_obj) {
  1139. r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
  1140. if (unlikely(r != 0))
  1141. dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
  1142. amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
  1143. amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
  1144. amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
  1145. adev->gfx.rlc.cp_table_obj = NULL;
  1146. }
  1147. }
  1148. static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
  1149. {
  1150. volatile u32 *dst_ptr;
  1151. u32 dws;
  1152. const struct cs_section_def *cs_data;
  1153. int r;
  1154. adev->gfx.rlc.cs_data = vi_cs_data;
  1155. cs_data = adev->gfx.rlc.cs_data;
  1156. if (cs_data) {
  1157. /* clear state block */
  1158. adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
  1159. if (adev->gfx.rlc.clear_state_obj == NULL) {
  1160. r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
  1161. AMDGPU_GEM_DOMAIN_VRAM,
  1162. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  1163. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
  1164. NULL, NULL,
  1165. &adev->gfx.rlc.clear_state_obj);
  1166. if (r) {
  1167. dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
  1168. gfx_v8_0_rlc_fini(adev);
  1169. return r;
  1170. }
  1171. }
  1172. r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
  1173. if (unlikely(r != 0)) {
  1174. gfx_v8_0_rlc_fini(adev);
  1175. return r;
  1176. }
  1177. r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
  1178. &adev->gfx.rlc.clear_state_gpu_addr);
  1179. if (r) {
  1180. amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
  1181. dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
  1182. gfx_v8_0_rlc_fini(adev);
  1183. return r;
  1184. }
  1185. r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
  1186. if (r) {
  1187. dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
  1188. gfx_v8_0_rlc_fini(adev);
  1189. return r;
  1190. }
  1191. /* set up the cs buffer */
  1192. dst_ptr = adev->gfx.rlc.cs_ptr;
  1193. gfx_v8_0_get_csb_buffer(adev, dst_ptr);
  1194. amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
  1195. amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
  1196. }
  1197. if ((adev->asic_type == CHIP_CARRIZO) ||
  1198. (adev->asic_type == CHIP_STONEY)) {
  1199. adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
  1200. if (adev->gfx.rlc.cp_table_obj == NULL) {
  1201. r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
  1202. AMDGPU_GEM_DOMAIN_VRAM,
  1203. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  1204. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
  1205. NULL, NULL,
  1206. &adev->gfx.rlc.cp_table_obj);
  1207. if (r) {
  1208. dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
  1209. return r;
  1210. }
  1211. }
  1212. r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
  1213. if (unlikely(r != 0)) {
  1214. dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
  1215. return r;
  1216. }
  1217. r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
  1218. &adev->gfx.rlc.cp_table_gpu_addr);
  1219. if (r) {
  1220. amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
  1221. dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
  1222. return r;
  1223. }
  1224. r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
  1225. if (r) {
  1226. dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
  1227. return r;
  1228. }
  1229. cz_init_cp_jump_table(adev);
  1230. amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
  1231. amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
  1232. }
  1233. return 0;
  1234. }
  1235. static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
  1236. {
  1237. int r;
  1238. if (adev->gfx.mec.hpd_eop_obj) {
  1239. r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
  1240. if (unlikely(r != 0))
  1241. dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
  1242. amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
  1243. amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
  1244. amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
  1245. adev->gfx.mec.hpd_eop_obj = NULL;
  1246. }
  1247. }
  1248. #define MEC_HPD_SIZE 2048
  1249. static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
  1250. {
  1251. int r;
  1252. u32 *hpd;
  1253. /*
  1254. * we assign only 1 pipe because all other pipes will
  1255. * be handled by KFD
  1256. */
  1257. adev->gfx.mec.num_mec = 1;
  1258. adev->gfx.mec.num_pipe = 1;
  1259. adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
  1260. if (adev->gfx.mec.hpd_eop_obj == NULL) {
  1261. r = amdgpu_bo_create(adev,
  1262. adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
  1263. PAGE_SIZE, true,
  1264. AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  1265. &adev->gfx.mec.hpd_eop_obj);
  1266. if (r) {
  1267. dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
  1268. return r;
  1269. }
  1270. }
  1271. r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
  1272. if (unlikely(r != 0)) {
  1273. gfx_v8_0_mec_fini(adev);
  1274. return r;
  1275. }
  1276. r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
  1277. &adev->gfx.mec.hpd_eop_gpu_addr);
  1278. if (r) {
  1279. dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
  1280. gfx_v8_0_mec_fini(adev);
  1281. return r;
  1282. }
  1283. r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
  1284. if (r) {
  1285. dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
  1286. gfx_v8_0_mec_fini(adev);
  1287. return r;
  1288. }
  1289. memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
  1290. amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
  1291. amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
  1292. return 0;
  1293. }
  1294. static const u32 vgpr_init_compute_shader[] =
  1295. {
  1296. 0x7e000209, 0x7e020208,
  1297. 0x7e040207, 0x7e060206,
  1298. 0x7e080205, 0x7e0a0204,
  1299. 0x7e0c0203, 0x7e0e0202,
  1300. 0x7e100201, 0x7e120200,
  1301. 0x7e140209, 0x7e160208,
  1302. 0x7e180207, 0x7e1a0206,
  1303. 0x7e1c0205, 0x7e1e0204,
  1304. 0x7e200203, 0x7e220202,
  1305. 0x7e240201, 0x7e260200,
  1306. 0x7e280209, 0x7e2a0208,
  1307. 0x7e2c0207, 0x7e2e0206,
  1308. 0x7e300205, 0x7e320204,
  1309. 0x7e340203, 0x7e360202,
  1310. 0x7e380201, 0x7e3a0200,
  1311. 0x7e3c0209, 0x7e3e0208,
  1312. 0x7e400207, 0x7e420206,
  1313. 0x7e440205, 0x7e460204,
  1314. 0x7e480203, 0x7e4a0202,
  1315. 0x7e4c0201, 0x7e4e0200,
  1316. 0x7e500209, 0x7e520208,
  1317. 0x7e540207, 0x7e560206,
  1318. 0x7e580205, 0x7e5a0204,
  1319. 0x7e5c0203, 0x7e5e0202,
  1320. 0x7e600201, 0x7e620200,
  1321. 0x7e640209, 0x7e660208,
  1322. 0x7e680207, 0x7e6a0206,
  1323. 0x7e6c0205, 0x7e6e0204,
  1324. 0x7e700203, 0x7e720202,
  1325. 0x7e740201, 0x7e760200,
  1326. 0x7e780209, 0x7e7a0208,
  1327. 0x7e7c0207, 0x7e7e0206,
  1328. 0xbf8a0000, 0xbf810000,
  1329. };
  1330. static const u32 sgpr_init_compute_shader[] =
  1331. {
  1332. 0xbe8a0100, 0xbe8c0102,
  1333. 0xbe8e0104, 0xbe900106,
  1334. 0xbe920108, 0xbe940100,
  1335. 0xbe960102, 0xbe980104,
  1336. 0xbe9a0106, 0xbe9c0108,
  1337. 0xbe9e0100, 0xbea00102,
  1338. 0xbea20104, 0xbea40106,
  1339. 0xbea60108, 0xbea80100,
  1340. 0xbeaa0102, 0xbeac0104,
  1341. 0xbeae0106, 0xbeb00108,
  1342. 0xbeb20100, 0xbeb40102,
  1343. 0xbeb60104, 0xbeb80106,
  1344. 0xbeba0108, 0xbebc0100,
  1345. 0xbebe0102, 0xbec00104,
  1346. 0xbec20106, 0xbec40108,
  1347. 0xbec60100, 0xbec80102,
  1348. 0xbee60004, 0xbee70005,
  1349. 0xbeea0006, 0xbeeb0007,
  1350. 0xbee80008, 0xbee90009,
  1351. 0xbefc0000, 0xbf8a0000,
  1352. 0xbf810000, 0x00000000,
  1353. };
  1354. static const u32 vgpr_init_regs[] =
  1355. {
  1356. mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
  1357. mmCOMPUTE_RESOURCE_LIMITS, 0,
  1358. mmCOMPUTE_NUM_THREAD_X, 256*4,
  1359. mmCOMPUTE_NUM_THREAD_Y, 1,
  1360. mmCOMPUTE_NUM_THREAD_Z, 1,
  1361. mmCOMPUTE_PGM_RSRC2, 20,
  1362. mmCOMPUTE_USER_DATA_0, 0xedcedc00,
  1363. mmCOMPUTE_USER_DATA_1, 0xedcedc01,
  1364. mmCOMPUTE_USER_DATA_2, 0xedcedc02,
  1365. mmCOMPUTE_USER_DATA_3, 0xedcedc03,
  1366. mmCOMPUTE_USER_DATA_4, 0xedcedc04,
  1367. mmCOMPUTE_USER_DATA_5, 0xedcedc05,
  1368. mmCOMPUTE_USER_DATA_6, 0xedcedc06,
  1369. mmCOMPUTE_USER_DATA_7, 0xedcedc07,
  1370. mmCOMPUTE_USER_DATA_8, 0xedcedc08,
  1371. mmCOMPUTE_USER_DATA_9, 0xedcedc09,
  1372. };
  1373. static const u32 sgpr1_init_regs[] =
  1374. {
  1375. mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
  1376. mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
  1377. mmCOMPUTE_NUM_THREAD_X, 256*5,
  1378. mmCOMPUTE_NUM_THREAD_Y, 1,
  1379. mmCOMPUTE_NUM_THREAD_Z, 1,
  1380. mmCOMPUTE_PGM_RSRC2, 20,
  1381. mmCOMPUTE_USER_DATA_0, 0xedcedc00,
  1382. mmCOMPUTE_USER_DATA_1, 0xedcedc01,
  1383. mmCOMPUTE_USER_DATA_2, 0xedcedc02,
  1384. mmCOMPUTE_USER_DATA_3, 0xedcedc03,
  1385. mmCOMPUTE_USER_DATA_4, 0xedcedc04,
  1386. mmCOMPUTE_USER_DATA_5, 0xedcedc05,
  1387. mmCOMPUTE_USER_DATA_6, 0xedcedc06,
  1388. mmCOMPUTE_USER_DATA_7, 0xedcedc07,
  1389. mmCOMPUTE_USER_DATA_8, 0xedcedc08,
  1390. mmCOMPUTE_USER_DATA_9, 0xedcedc09,
  1391. };
  1392. static const u32 sgpr2_init_regs[] =
  1393. {
  1394. mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
  1395. mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
  1396. mmCOMPUTE_NUM_THREAD_X, 256*5,
  1397. mmCOMPUTE_NUM_THREAD_Y, 1,
  1398. mmCOMPUTE_NUM_THREAD_Z, 1,
  1399. mmCOMPUTE_PGM_RSRC2, 20,
  1400. mmCOMPUTE_USER_DATA_0, 0xedcedc00,
  1401. mmCOMPUTE_USER_DATA_1, 0xedcedc01,
  1402. mmCOMPUTE_USER_DATA_2, 0xedcedc02,
  1403. mmCOMPUTE_USER_DATA_3, 0xedcedc03,
  1404. mmCOMPUTE_USER_DATA_4, 0xedcedc04,
  1405. mmCOMPUTE_USER_DATA_5, 0xedcedc05,
  1406. mmCOMPUTE_USER_DATA_6, 0xedcedc06,
  1407. mmCOMPUTE_USER_DATA_7, 0xedcedc07,
  1408. mmCOMPUTE_USER_DATA_8, 0xedcedc08,
  1409. mmCOMPUTE_USER_DATA_9, 0xedcedc09,
  1410. };
  1411. static const u32 sec_ded_counter_registers[] =
  1412. {
  1413. mmCPC_EDC_ATC_CNT,
  1414. mmCPC_EDC_SCRATCH_CNT,
  1415. mmCPC_EDC_UCODE_CNT,
  1416. mmCPF_EDC_ATC_CNT,
  1417. mmCPF_EDC_ROQ_CNT,
  1418. mmCPF_EDC_TAG_CNT,
  1419. mmCPG_EDC_ATC_CNT,
  1420. mmCPG_EDC_DMA_CNT,
  1421. mmCPG_EDC_TAG_CNT,
  1422. mmDC_EDC_CSINVOC_CNT,
  1423. mmDC_EDC_RESTORE_CNT,
  1424. mmDC_EDC_STATE_CNT,
  1425. mmGDS_EDC_CNT,
  1426. mmGDS_EDC_GRBM_CNT,
  1427. mmGDS_EDC_OA_DED,
  1428. mmSPI_EDC_CNT,
  1429. mmSQC_ATC_EDC_GATCL1_CNT,
  1430. mmSQC_EDC_CNT,
  1431. mmSQ_EDC_DED_CNT,
  1432. mmSQ_EDC_INFO,
  1433. mmSQ_EDC_SEC_CNT,
  1434. mmTCC_EDC_CNT,
  1435. mmTCP_ATC_EDC_GATCL1_CNT,
  1436. mmTCP_EDC_CNT,
  1437. mmTD_EDC_CNT
  1438. };
  1439. static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
  1440. {
  1441. struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
  1442. struct amdgpu_ib ib;
  1443. struct fence *f = NULL;
  1444. int r, i;
  1445. u32 tmp;
  1446. unsigned total_size, vgpr_offset, sgpr_offset;
  1447. u64 gpu_addr;
  1448. /* only supported on CZ */
  1449. if (adev->asic_type != CHIP_CARRIZO)
  1450. return 0;
  1451. /* bail if the compute ring is not ready */
  1452. if (!ring->ready)
  1453. return 0;
  1454. tmp = RREG32(mmGB_EDC_MODE);
  1455. WREG32(mmGB_EDC_MODE, 0);
  1456. total_size =
  1457. (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
  1458. total_size +=
  1459. (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
  1460. total_size +=
  1461. (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
  1462. total_size = ALIGN(total_size, 256);
  1463. vgpr_offset = total_size;
  1464. total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
  1465. sgpr_offset = total_size;
  1466. total_size += sizeof(sgpr_init_compute_shader);
  1467. /* allocate an indirect buffer to put the commands in */
  1468. memset(&ib, 0, sizeof(ib));
  1469. r = amdgpu_ib_get(adev, NULL, total_size, &ib);
  1470. if (r) {
  1471. DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
  1472. return r;
  1473. }
  1474. /* load the compute shaders */
  1475. for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
  1476. ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
  1477. for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
  1478. ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
  1479. /* init the ib length to 0 */
  1480. ib.length_dw = 0;
  1481. /* VGPR */
  1482. /* write the register state for the compute dispatch */
  1483. for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
  1484. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
  1485. ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
  1486. ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
  1487. }
  1488. /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
  1489. gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
  1490. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
  1491. ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
  1492. ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
  1493. ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
  1494. /* write dispatch packet */
  1495. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
  1496. ib.ptr[ib.length_dw++] = 8; /* x */
  1497. ib.ptr[ib.length_dw++] = 1; /* y */
  1498. ib.ptr[ib.length_dw++] = 1; /* z */
  1499. ib.ptr[ib.length_dw++] =
  1500. REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
  1501. /* write CS partial flush packet */
  1502. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
  1503. ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
  1504. /* SGPR1 */
  1505. /* write the register state for the compute dispatch */
  1506. for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
  1507. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
  1508. ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
  1509. ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
  1510. }
  1511. /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
  1512. gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
  1513. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
  1514. ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
  1515. ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
  1516. ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
  1517. /* write dispatch packet */
  1518. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
  1519. ib.ptr[ib.length_dw++] = 8; /* x */
  1520. ib.ptr[ib.length_dw++] = 1; /* y */
  1521. ib.ptr[ib.length_dw++] = 1; /* z */
  1522. ib.ptr[ib.length_dw++] =
  1523. REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
  1524. /* write CS partial flush packet */
  1525. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
  1526. ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
  1527. /* SGPR2 */
  1528. /* write the register state for the compute dispatch */
  1529. for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
  1530. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
  1531. ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
  1532. ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
  1533. }
  1534. /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
  1535. gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
  1536. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
  1537. ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
  1538. ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
  1539. ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
  1540. /* write dispatch packet */
  1541. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
  1542. ib.ptr[ib.length_dw++] = 8; /* x */
  1543. ib.ptr[ib.length_dw++] = 1; /* y */
  1544. ib.ptr[ib.length_dw++] = 1; /* z */
  1545. ib.ptr[ib.length_dw++] =
  1546. REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
  1547. /* write CS partial flush packet */
  1548. ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
  1549. ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
  1550. /* shedule the ib on the ring */
  1551. r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
  1552. if (r) {
  1553. DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
  1554. goto fail;
  1555. }
  1556. /* wait for the GPU to finish processing the IB */
  1557. r = fence_wait(f, false);
  1558. if (r) {
  1559. DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
  1560. goto fail;
  1561. }
  1562. tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
  1563. tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
  1564. WREG32(mmGB_EDC_MODE, tmp);
  1565. tmp = RREG32(mmCC_GC_EDC_CONFIG);
  1566. tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
  1567. WREG32(mmCC_GC_EDC_CONFIG, tmp);
  1568. /* read back registers to clear the counters */
  1569. for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
  1570. RREG32(sec_ded_counter_registers[i]);
  1571. fail:
  1572. amdgpu_ib_free(adev, &ib, NULL);
  1573. fence_put(f);
  1574. return r;
  1575. }
  1576. static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
  1577. {
  1578. u32 gb_addr_config;
  1579. u32 mc_shared_chmap, mc_arb_ramcfg;
  1580. u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
  1581. u32 tmp;
  1582. int ret;
  1583. switch (adev->asic_type) {
  1584. case CHIP_TOPAZ:
  1585. adev->gfx.config.max_shader_engines = 1;
  1586. adev->gfx.config.max_tile_pipes = 2;
  1587. adev->gfx.config.max_cu_per_sh = 6;
  1588. adev->gfx.config.max_sh_per_se = 1;
  1589. adev->gfx.config.max_backends_per_se = 2;
  1590. adev->gfx.config.max_texture_channel_caches = 2;
  1591. adev->gfx.config.max_gprs = 256;
  1592. adev->gfx.config.max_gs_threads = 32;
  1593. adev->gfx.config.max_hw_contexts = 8;
  1594. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1595. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1596. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1597. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1598. gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
  1599. break;
  1600. case CHIP_FIJI:
  1601. adev->gfx.config.max_shader_engines = 4;
  1602. adev->gfx.config.max_tile_pipes = 16;
  1603. adev->gfx.config.max_cu_per_sh = 16;
  1604. adev->gfx.config.max_sh_per_se = 1;
  1605. adev->gfx.config.max_backends_per_se = 4;
  1606. adev->gfx.config.max_texture_channel_caches = 16;
  1607. adev->gfx.config.max_gprs = 256;
  1608. adev->gfx.config.max_gs_threads = 32;
  1609. adev->gfx.config.max_hw_contexts = 8;
  1610. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1611. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1612. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1613. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1614. gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
  1615. break;
  1616. case CHIP_POLARIS11:
  1617. ret = amdgpu_atombios_get_gfx_info(adev);
  1618. if (ret)
  1619. return ret;
  1620. adev->gfx.config.max_gprs = 256;
  1621. adev->gfx.config.max_gs_threads = 32;
  1622. adev->gfx.config.max_hw_contexts = 8;
  1623. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1624. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1625. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1626. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1627. gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
  1628. break;
  1629. case CHIP_POLARIS10:
  1630. ret = amdgpu_atombios_get_gfx_info(adev);
  1631. if (ret)
  1632. return ret;
  1633. adev->gfx.config.max_gprs = 256;
  1634. adev->gfx.config.max_gs_threads = 32;
  1635. adev->gfx.config.max_hw_contexts = 8;
  1636. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1637. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1638. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1639. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1640. gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
  1641. break;
  1642. case CHIP_TONGA:
  1643. adev->gfx.config.max_shader_engines = 4;
  1644. adev->gfx.config.max_tile_pipes = 8;
  1645. adev->gfx.config.max_cu_per_sh = 8;
  1646. adev->gfx.config.max_sh_per_se = 1;
  1647. adev->gfx.config.max_backends_per_se = 2;
  1648. adev->gfx.config.max_texture_channel_caches = 8;
  1649. adev->gfx.config.max_gprs = 256;
  1650. adev->gfx.config.max_gs_threads = 32;
  1651. adev->gfx.config.max_hw_contexts = 8;
  1652. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1653. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1654. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1655. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1656. gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
  1657. break;
  1658. case CHIP_CARRIZO:
  1659. adev->gfx.config.max_shader_engines = 1;
  1660. adev->gfx.config.max_tile_pipes = 2;
  1661. adev->gfx.config.max_sh_per_se = 1;
  1662. adev->gfx.config.max_backends_per_se = 2;
  1663. switch (adev->pdev->revision) {
  1664. case 0xc4:
  1665. case 0x84:
  1666. case 0xc8:
  1667. case 0xcc:
  1668. case 0xe1:
  1669. case 0xe3:
  1670. /* B10 */
  1671. adev->gfx.config.max_cu_per_sh = 8;
  1672. break;
  1673. case 0xc5:
  1674. case 0x81:
  1675. case 0x85:
  1676. case 0xc9:
  1677. case 0xcd:
  1678. case 0xe2:
  1679. case 0xe4:
  1680. /* B8 */
  1681. adev->gfx.config.max_cu_per_sh = 6;
  1682. break;
  1683. case 0xc6:
  1684. case 0xca:
  1685. case 0xce:
  1686. case 0x88:
  1687. /* B6 */
  1688. adev->gfx.config.max_cu_per_sh = 6;
  1689. break;
  1690. case 0xc7:
  1691. case 0x87:
  1692. case 0xcb:
  1693. case 0xe5:
  1694. case 0x89:
  1695. default:
  1696. /* B4 */
  1697. adev->gfx.config.max_cu_per_sh = 4;
  1698. break;
  1699. }
  1700. adev->gfx.config.max_texture_channel_caches = 2;
  1701. adev->gfx.config.max_gprs = 256;
  1702. adev->gfx.config.max_gs_threads = 32;
  1703. adev->gfx.config.max_hw_contexts = 8;
  1704. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1705. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1706. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1707. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1708. gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
  1709. break;
  1710. case CHIP_STONEY:
  1711. adev->gfx.config.max_shader_engines = 1;
  1712. adev->gfx.config.max_tile_pipes = 2;
  1713. adev->gfx.config.max_sh_per_se = 1;
  1714. adev->gfx.config.max_backends_per_se = 1;
  1715. switch (adev->pdev->revision) {
  1716. case 0xc0:
  1717. case 0xc1:
  1718. case 0xc2:
  1719. case 0xc4:
  1720. case 0xc8:
  1721. case 0xc9:
  1722. adev->gfx.config.max_cu_per_sh = 3;
  1723. break;
  1724. case 0xd0:
  1725. case 0xd1:
  1726. case 0xd2:
  1727. default:
  1728. adev->gfx.config.max_cu_per_sh = 2;
  1729. break;
  1730. }
  1731. adev->gfx.config.max_texture_channel_caches = 2;
  1732. adev->gfx.config.max_gprs = 256;
  1733. adev->gfx.config.max_gs_threads = 16;
  1734. adev->gfx.config.max_hw_contexts = 8;
  1735. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1736. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1737. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1738. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1739. gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
  1740. break;
  1741. default:
  1742. adev->gfx.config.max_shader_engines = 2;
  1743. adev->gfx.config.max_tile_pipes = 4;
  1744. adev->gfx.config.max_cu_per_sh = 2;
  1745. adev->gfx.config.max_sh_per_se = 1;
  1746. adev->gfx.config.max_backends_per_se = 2;
  1747. adev->gfx.config.max_texture_channel_caches = 4;
  1748. adev->gfx.config.max_gprs = 256;
  1749. adev->gfx.config.max_gs_threads = 32;
  1750. adev->gfx.config.max_hw_contexts = 8;
  1751. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  1752. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  1753. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  1754. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  1755. gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
  1756. break;
  1757. }
  1758. mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
  1759. adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
  1760. mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
  1761. adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
  1762. adev->gfx.config.mem_max_burst_length_bytes = 256;
  1763. if (adev->flags & AMD_IS_APU) {
  1764. /* Get memory bank mapping mode. */
  1765. tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
  1766. dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
  1767. dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
  1768. tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
  1769. dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
  1770. dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
  1771. /* Validate settings in case only one DIMM installed. */
  1772. if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
  1773. dimm00_addr_map = 0;
  1774. if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
  1775. dimm01_addr_map = 0;
  1776. if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
  1777. dimm10_addr_map = 0;
  1778. if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
  1779. dimm11_addr_map = 0;
  1780. /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
  1781. /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
  1782. if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
  1783. adev->gfx.config.mem_row_size_in_kb = 2;
  1784. else
  1785. adev->gfx.config.mem_row_size_in_kb = 1;
  1786. } else {
  1787. tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
  1788. adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
  1789. if (adev->gfx.config.mem_row_size_in_kb > 4)
  1790. adev->gfx.config.mem_row_size_in_kb = 4;
  1791. }
  1792. adev->gfx.config.shader_engine_tile_size = 32;
  1793. adev->gfx.config.num_gpus = 1;
  1794. adev->gfx.config.multi_gpu_tile_size = 64;
  1795. /* fix up row size */
  1796. switch (adev->gfx.config.mem_row_size_in_kb) {
  1797. case 1:
  1798. default:
  1799. gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
  1800. break;
  1801. case 2:
  1802. gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
  1803. break;
  1804. case 4:
  1805. gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
  1806. break;
  1807. }
  1808. adev->gfx.config.gb_addr_config = gb_addr_config;
  1809. return 0;
  1810. }
  1811. static int gfx_v8_0_sw_init(void *handle)
  1812. {
  1813. int i, r;
  1814. struct amdgpu_ring *ring;
  1815. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1816. /* EOP Event */
  1817. r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
  1818. if (r)
  1819. return r;
  1820. /* Privileged reg */
  1821. r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
  1822. if (r)
  1823. return r;
  1824. /* Privileged inst */
  1825. r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
  1826. if (r)
  1827. return r;
  1828. adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
  1829. gfx_v8_0_scratch_init(adev);
  1830. r = gfx_v8_0_init_microcode(adev);
  1831. if (r) {
  1832. DRM_ERROR("Failed to load gfx firmware!\n");
  1833. return r;
  1834. }
  1835. r = gfx_v8_0_rlc_init(adev);
  1836. if (r) {
  1837. DRM_ERROR("Failed to init rlc BOs!\n");
  1838. return r;
  1839. }
  1840. r = gfx_v8_0_mec_init(adev);
  1841. if (r) {
  1842. DRM_ERROR("Failed to init MEC BOs!\n");
  1843. return r;
  1844. }
  1845. /* set up the gfx ring */
  1846. for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
  1847. ring = &adev->gfx.gfx_ring[i];
  1848. ring->ring_obj = NULL;
  1849. sprintf(ring->name, "gfx");
  1850. /* no gfx doorbells on iceland */
  1851. if (adev->asic_type != CHIP_TOPAZ) {
  1852. ring->use_doorbell = true;
  1853. ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
  1854. }
  1855. r = amdgpu_ring_init(adev, ring, 1024,
  1856. PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
  1857. &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
  1858. AMDGPU_RING_TYPE_GFX);
  1859. if (r)
  1860. return r;
  1861. }
  1862. /* set up the compute queues */
  1863. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  1864. unsigned irq_type;
  1865. /* max 32 queues per MEC */
  1866. if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
  1867. DRM_ERROR("Too many (%d) compute rings!\n", i);
  1868. break;
  1869. }
  1870. ring = &adev->gfx.compute_ring[i];
  1871. ring->ring_obj = NULL;
  1872. ring->use_doorbell = true;
  1873. ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
  1874. ring->me = 1; /* first MEC */
  1875. ring->pipe = i / 8;
  1876. ring->queue = i % 8;
  1877. sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
  1878. irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
  1879. /* type-2 packets are deprecated on MEC, use type-3 instead */
  1880. r = amdgpu_ring_init(adev, ring, 1024,
  1881. PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
  1882. &adev->gfx.eop_irq, irq_type,
  1883. AMDGPU_RING_TYPE_COMPUTE);
  1884. if (r)
  1885. return r;
  1886. }
  1887. /* reserve GDS, GWS and OA resource for gfx */
  1888. r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
  1889. PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
  1890. &adev->gds.gds_gfx_bo, NULL, NULL);
  1891. if (r)
  1892. return r;
  1893. r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
  1894. PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
  1895. &adev->gds.gws_gfx_bo, NULL, NULL);
  1896. if (r)
  1897. return r;
  1898. r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
  1899. PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
  1900. &adev->gds.oa_gfx_bo, NULL, NULL);
  1901. if (r)
  1902. return r;
  1903. adev->gfx.ce_ram_size = 0x8000;
  1904. r = gfx_v8_0_gpu_early_init(adev);
  1905. if (r)
  1906. return r;
  1907. return 0;
  1908. }
  1909. static int gfx_v8_0_sw_fini(void *handle)
  1910. {
  1911. int i;
  1912. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1913. amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
  1914. amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
  1915. amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
  1916. for (i = 0; i < adev->gfx.num_gfx_rings; i++)
  1917. amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
  1918. for (i = 0; i < adev->gfx.num_compute_rings; i++)
  1919. amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
  1920. gfx_v8_0_mec_fini(adev);
  1921. gfx_v8_0_rlc_fini(adev);
  1922. gfx_v8_0_free_microcode(adev);
  1923. return 0;
  1924. }
  1925. static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
  1926. {
  1927. uint32_t *modearray, *mod2array;
  1928. const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
  1929. const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
  1930. u32 reg_offset;
  1931. modearray = adev->gfx.config.tile_mode_array;
  1932. mod2array = adev->gfx.config.macrotile_mode_array;
  1933. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  1934. modearray[reg_offset] = 0;
  1935. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  1936. mod2array[reg_offset] = 0;
  1937. switch (adev->asic_type) {
  1938. case CHIP_TOPAZ:
  1939. modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1940. PIPE_CONFIG(ADDR_SURF_P2) |
  1941. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  1942. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1943. modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1944. PIPE_CONFIG(ADDR_SURF_P2) |
  1945. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  1946. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1947. modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1948. PIPE_CONFIG(ADDR_SURF_P2) |
  1949. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  1950. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1951. modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1952. PIPE_CONFIG(ADDR_SURF_P2) |
  1953. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  1954. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1955. modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1956. PIPE_CONFIG(ADDR_SURF_P2) |
  1957. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  1958. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1959. modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1960. PIPE_CONFIG(ADDR_SURF_P2) |
  1961. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  1962. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1963. modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1964. PIPE_CONFIG(ADDR_SURF_P2) |
  1965. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  1966. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1967. modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1968. PIPE_CONFIG(ADDR_SURF_P2));
  1969. modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1970. PIPE_CONFIG(ADDR_SURF_P2) |
  1971. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1972. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1973. modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1974. PIPE_CONFIG(ADDR_SURF_P2) |
  1975. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1976. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1977. modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1978. PIPE_CONFIG(ADDR_SURF_P2) |
  1979. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1980. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1981. modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1982. PIPE_CONFIG(ADDR_SURF_P2) |
  1983. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1984. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1985. modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1986. PIPE_CONFIG(ADDR_SURF_P2) |
  1987. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1988. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1989. modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  1990. PIPE_CONFIG(ADDR_SURF_P2) |
  1991. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1992. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1993. modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1994. PIPE_CONFIG(ADDR_SURF_P2) |
  1995. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1996. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1997. modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  1998. PIPE_CONFIG(ADDR_SURF_P2) |
  1999. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2000. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2001. modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2002. PIPE_CONFIG(ADDR_SURF_P2) |
  2003. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2004. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2005. modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2006. PIPE_CONFIG(ADDR_SURF_P2) |
  2007. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2008. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2009. modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  2010. PIPE_CONFIG(ADDR_SURF_P2) |
  2011. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2012. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2013. modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2014. PIPE_CONFIG(ADDR_SURF_P2) |
  2015. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2016. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2017. modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2018. PIPE_CONFIG(ADDR_SURF_P2) |
  2019. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2020. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2021. modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  2022. PIPE_CONFIG(ADDR_SURF_P2) |
  2023. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2024. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2025. modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  2026. PIPE_CONFIG(ADDR_SURF_P2) |
  2027. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2028. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2029. modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2030. PIPE_CONFIG(ADDR_SURF_P2) |
  2031. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2032. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2033. modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2034. PIPE_CONFIG(ADDR_SURF_P2) |
  2035. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2036. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2037. modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2038. PIPE_CONFIG(ADDR_SURF_P2) |
  2039. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2040. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2041. mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2042. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2043. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2044. NUM_BANKS(ADDR_SURF_8_BANK));
  2045. mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2046. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2047. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2048. NUM_BANKS(ADDR_SURF_8_BANK));
  2049. mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2050. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2051. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2052. NUM_BANKS(ADDR_SURF_8_BANK));
  2053. mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2054. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2055. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2056. NUM_BANKS(ADDR_SURF_8_BANK));
  2057. mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2058. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2059. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2060. NUM_BANKS(ADDR_SURF_8_BANK));
  2061. mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2062. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2063. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2064. NUM_BANKS(ADDR_SURF_8_BANK));
  2065. mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2066. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2067. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2068. NUM_BANKS(ADDR_SURF_8_BANK));
  2069. mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2070. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2071. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2072. NUM_BANKS(ADDR_SURF_16_BANK));
  2073. mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2074. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2075. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2076. NUM_BANKS(ADDR_SURF_16_BANK));
  2077. mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2078. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2079. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2080. NUM_BANKS(ADDR_SURF_16_BANK));
  2081. mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2082. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2083. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2084. NUM_BANKS(ADDR_SURF_16_BANK));
  2085. mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2086. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2087. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2088. NUM_BANKS(ADDR_SURF_16_BANK));
  2089. mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2090. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2091. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2092. NUM_BANKS(ADDR_SURF_16_BANK));
  2093. mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2094. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2095. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2096. NUM_BANKS(ADDR_SURF_8_BANK));
  2097. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2098. if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
  2099. reg_offset != 23)
  2100. WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
  2101. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2102. if (reg_offset != 7)
  2103. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
  2104. break;
  2105. case CHIP_FIJI:
  2106. modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2107. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2108. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  2109. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2110. modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2111. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2112. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  2113. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2114. modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2115. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2116. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  2117. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2118. modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2119. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2120. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  2121. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2122. modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2123. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2124. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2125. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2126. modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2127. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2128. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2129. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2130. modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2131. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2132. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2133. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2134. modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2135. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2136. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2137. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2138. modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2139. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
  2140. modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2141. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2142. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2143. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2144. modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2145. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2146. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2147. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2148. modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2149. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2150. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2151. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2152. modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2153. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2154. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2155. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2156. modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2157. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2158. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2159. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2160. modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2161. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2162. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2163. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2164. modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  2165. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2166. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2167. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2168. modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2169. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2170. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2171. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2172. modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2173. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2174. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2175. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2176. modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2177. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2178. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2179. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2180. modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2181. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2182. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2183. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2184. modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2185. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2186. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2187. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2188. modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  2189. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2190. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2191. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2192. modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2193. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2194. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2195. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2196. modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2197. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2198. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2199. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2200. modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2201. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2202. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2203. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2204. modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  2205. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2206. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2207. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2208. modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  2209. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2210. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2211. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2212. modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2213. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2214. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2215. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2216. modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2217. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2218. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2219. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2220. modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2221. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2222. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2223. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2224. modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2225. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2226. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2227. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2228. mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2229. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2230. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2231. NUM_BANKS(ADDR_SURF_8_BANK));
  2232. mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2233. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2234. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2235. NUM_BANKS(ADDR_SURF_8_BANK));
  2236. mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2237. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2238. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2239. NUM_BANKS(ADDR_SURF_8_BANK));
  2240. mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2241. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2242. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2243. NUM_BANKS(ADDR_SURF_8_BANK));
  2244. mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2245. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2246. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2247. NUM_BANKS(ADDR_SURF_8_BANK));
  2248. mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2249. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2250. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2251. NUM_BANKS(ADDR_SURF_8_BANK));
  2252. mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2253. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2254. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2255. NUM_BANKS(ADDR_SURF_8_BANK));
  2256. mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2257. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2258. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2259. NUM_BANKS(ADDR_SURF_8_BANK));
  2260. mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2261. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2262. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2263. NUM_BANKS(ADDR_SURF_8_BANK));
  2264. mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2265. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2266. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2267. NUM_BANKS(ADDR_SURF_8_BANK));
  2268. mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2269. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2270. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2271. NUM_BANKS(ADDR_SURF_8_BANK));
  2272. mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2273. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2274. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2275. NUM_BANKS(ADDR_SURF_8_BANK));
  2276. mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2277. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2278. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2279. NUM_BANKS(ADDR_SURF_8_BANK));
  2280. mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2281. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2282. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2283. NUM_BANKS(ADDR_SURF_4_BANK));
  2284. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2285. WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
  2286. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2287. if (reg_offset != 7)
  2288. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
  2289. break;
  2290. case CHIP_TONGA:
  2291. modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2292. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2293. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  2294. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2295. modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2296. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2297. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  2298. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2299. modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2300. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2301. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  2302. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2303. modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2304. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2305. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  2306. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2307. modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2308. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2309. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2310. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2311. modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2312. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2313. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2314. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2315. modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2316. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2317. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2318. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2319. modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2320. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2321. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2322. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2323. modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2324. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
  2325. modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2326. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2327. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2328. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2329. modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2330. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2331. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2332. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2333. modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2334. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2335. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2336. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2337. modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2338. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2339. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2340. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2341. modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2342. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2343. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2344. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2345. modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2346. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2347. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2348. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2349. modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  2350. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2351. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2352. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2353. modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2354. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2355. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2356. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2357. modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2358. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2359. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2360. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2361. modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2362. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2363. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2364. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2365. modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2366. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2367. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2368. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2369. modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2370. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2371. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2372. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2373. modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  2374. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2375. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2376. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2377. modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2378. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2379. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2380. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2381. modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2382. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2383. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2384. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2385. modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2386. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2387. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2388. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2389. modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  2390. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2391. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2392. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2393. modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  2394. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2395. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2396. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2397. modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2398. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2399. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2400. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2401. modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2402. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2403. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2404. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2405. modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2406. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2407. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2408. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2409. modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2410. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2411. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2412. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2413. mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2414. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2415. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2416. NUM_BANKS(ADDR_SURF_16_BANK));
  2417. mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2418. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2419. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2420. NUM_BANKS(ADDR_SURF_16_BANK));
  2421. mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2422. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2423. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2424. NUM_BANKS(ADDR_SURF_16_BANK));
  2425. mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2426. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2427. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2428. NUM_BANKS(ADDR_SURF_16_BANK));
  2429. mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2430. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2431. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2432. NUM_BANKS(ADDR_SURF_16_BANK));
  2433. mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2434. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2435. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2436. NUM_BANKS(ADDR_SURF_16_BANK));
  2437. mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2438. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2439. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2440. NUM_BANKS(ADDR_SURF_16_BANK));
  2441. mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2442. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2443. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2444. NUM_BANKS(ADDR_SURF_16_BANK));
  2445. mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2446. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2447. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2448. NUM_BANKS(ADDR_SURF_16_BANK));
  2449. mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2450. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2451. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2452. NUM_BANKS(ADDR_SURF_16_BANK));
  2453. mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2454. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2455. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2456. NUM_BANKS(ADDR_SURF_16_BANK));
  2457. mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2458. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2459. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2460. NUM_BANKS(ADDR_SURF_8_BANK));
  2461. mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2462. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2463. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2464. NUM_BANKS(ADDR_SURF_4_BANK));
  2465. mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2466. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2467. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2468. NUM_BANKS(ADDR_SURF_4_BANK));
  2469. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2470. WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
  2471. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2472. if (reg_offset != 7)
  2473. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
  2474. break;
  2475. case CHIP_POLARIS11:
  2476. modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2477. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2478. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  2479. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2480. modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2481. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2482. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  2483. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2484. modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2485. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2486. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  2487. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2488. modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2489. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2490. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  2491. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2492. modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2493. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2494. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2495. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2496. modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2497. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2498. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2499. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2500. modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2501. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2502. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2503. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2504. modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2505. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2506. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2507. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2508. modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2509. PIPE_CONFIG(ADDR_SURF_P4_16x16));
  2510. modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2511. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2512. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2513. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2514. modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2515. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2516. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2517. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2518. modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2519. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2520. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2521. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2522. modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2523. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2524. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2525. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2526. modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2527. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2528. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2529. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2530. modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2531. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2532. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2533. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2534. modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  2535. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2536. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2537. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2538. modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2539. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2540. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2541. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2542. modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2543. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2544. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2545. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2546. modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2547. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2548. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2549. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2550. modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2551. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2552. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2553. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2554. modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2555. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2556. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2557. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2558. modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  2559. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2560. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2561. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2562. modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2563. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2564. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2565. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2566. modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2567. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2568. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2569. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2570. modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2571. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2572. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2573. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2574. modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  2575. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2576. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2577. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2578. modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  2579. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2580. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2581. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2582. modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2583. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2584. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2585. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2586. modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2587. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2588. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2589. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2590. modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2591. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2592. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2593. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2594. modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2595. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2596. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2597. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2598. mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2599. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2600. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2601. NUM_BANKS(ADDR_SURF_16_BANK));
  2602. mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2603. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2604. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2605. NUM_BANKS(ADDR_SURF_16_BANK));
  2606. mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2607. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2608. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2609. NUM_BANKS(ADDR_SURF_16_BANK));
  2610. mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2611. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2612. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2613. NUM_BANKS(ADDR_SURF_16_BANK));
  2614. mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2615. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2616. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2617. NUM_BANKS(ADDR_SURF_16_BANK));
  2618. mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2619. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2620. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2621. NUM_BANKS(ADDR_SURF_16_BANK));
  2622. mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2623. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2624. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2625. NUM_BANKS(ADDR_SURF_16_BANK));
  2626. mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2627. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2628. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2629. NUM_BANKS(ADDR_SURF_16_BANK));
  2630. mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2631. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2632. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2633. NUM_BANKS(ADDR_SURF_16_BANK));
  2634. mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2635. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2636. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2637. NUM_BANKS(ADDR_SURF_16_BANK));
  2638. mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2639. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2640. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2641. NUM_BANKS(ADDR_SURF_16_BANK));
  2642. mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2643. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2644. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2645. NUM_BANKS(ADDR_SURF_16_BANK));
  2646. mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2647. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2648. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2649. NUM_BANKS(ADDR_SURF_8_BANK));
  2650. mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2651. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2652. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2653. NUM_BANKS(ADDR_SURF_4_BANK));
  2654. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2655. WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
  2656. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2657. if (reg_offset != 7)
  2658. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
  2659. break;
  2660. case CHIP_POLARIS10:
  2661. modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2662. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2663. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  2664. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2665. modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2666. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2667. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  2668. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2669. modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2670. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2671. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  2672. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2673. modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2674. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2675. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  2676. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2677. modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2678. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2679. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2680. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2681. modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2682. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2683. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2684. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2685. modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2686. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2687. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2688. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2689. modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2690. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2691. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2692. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2693. modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2694. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
  2695. modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2696. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2697. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2698. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2699. modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2700. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2701. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2702. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2703. modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2704. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2705. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2706. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2707. modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2708. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2709. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2710. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2711. modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2712. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2713. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2714. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2715. modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2716. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2717. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2718. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2719. modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  2720. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2721. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2722. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2723. modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2724. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2725. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2726. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2727. modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2728. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2729. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2730. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2731. modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2732. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2733. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2734. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2735. modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2736. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2737. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2738. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2739. modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2740. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2741. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2742. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2743. modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  2744. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2745. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2746. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2747. modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2748. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2749. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2750. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2751. modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2752. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2753. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2754. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2755. modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2756. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2757. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2758. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2759. modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  2760. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2761. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2762. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2763. modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  2764. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2765. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2766. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2767. modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2768. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2769. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2770. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2771. modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2772. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2773. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2774. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2775. modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2776. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2777. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2778. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2779. modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2780. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2781. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2782. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2783. mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2784. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2785. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2786. NUM_BANKS(ADDR_SURF_16_BANK));
  2787. mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2788. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2789. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2790. NUM_BANKS(ADDR_SURF_16_BANK));
  2791. mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2792. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2793. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2794. NUM_BANKS(ADDR_SURF_16_BANK));
  2795. mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2796. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2797. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2798. NUM_BANKS(ADDR_SURF_16_BANK));
  2799. mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2800. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2801. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2802. NUM_BANKS(ADDR_SURF_16_BANK));
  2803. mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2804. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2805. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2806. NUM_BANKS(ADDR_SURF_16_BANK));
  2807. mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2808. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2809. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2810. NUM_BANKS(ADDR_SURF_16_BANK));
  2811. mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2812. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2813. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2814. NUM_BANKS(ADDR_SURF_16_BANK));
  2815. mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2816. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2817. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2818. NUM_BANKS(ADDR_SURF_16_BANK));
  2819. mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2820. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2821. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2822. NUM_BANKS(ADDR_SURF_16_BANK));
  2823. mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2824. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2825. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2826. NUM_BANKS(ADDR_SURF_16_BANK));
  2827. mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2828. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2829. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2830. NUM_BANKS(ADDR_SURF_8_BANK));
  2831. mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2832. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2833. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2834. NUM_BANKS(ADDR_SURF_4_BANK));
  2835. mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2836. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2837. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2838. NUM_BANKS(ADDR_SURF_4_BANK));
  2839. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2840. WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
  2841. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2842. if (reg_offset != 7)
  2843. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
  2844. break;
  2845. case CHIP_STONEY:
  2846. modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2847. PIPE_CONFIG(ADDR_SURF_P2) |
  2848. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  2849. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2850. modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2851. PIPE_CONFIG(ADDR_SURF_P2) |
  2852. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  2853. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2854. modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2855. PIPE_CONFIG(ADDR_SURF_P2) |
  2856. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  2857. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2858. modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2859. PIPE_CONFIG(ADDR_SURF_P2) |
  2860. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  2861. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2862. modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2863. PIPE_CONFIG(ADDR_SURF_P2) |
  2864. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2865. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2866. modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2867. PIPE_CONFIG(ADDR_SURF_P2) |
  2868. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2869. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2870. modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2871. PIPE_CONFIG(ADDR_SURF_P2) |
  2872. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  2873. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2874. modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2875. PIPE_CONFIG(ADDR_SURF_P2));
  2876. modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2877. PIPE_CONFIG(ADDR_SURF_P2) |
  2878. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2879. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2880. modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2881. PIPE_CONFIG(ADDR_SURF_P2) |
  2882. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2883. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2884. modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2885. PIPE_CONFIG(ADDR_SURF_P2) |
  2886. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2887. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2888. modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2889. PIPE_CONFIG(ADDR_SURF_P2) |
  2890. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2891. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2892. modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2893. PIPE_CONFIG(ADDR_SURF_P2) |
  2894. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2895. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2896. modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  2897. PIPE_CONFIG(ADDR_SURF_P2) |
  2898. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2899. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2900. modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2901. PIPE_CONFIG(ADDR_SURF_P2) |
  2902. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2903. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2904. modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2905. PIPE_CONFIG(ADDR_SURF_P2) |
  2906. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2907. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2908. modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  2909. PIPE_CONFIG(ADDR_SURF_P2) |
  2910. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2911. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2912. modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2913. PIPE_CONFIG(ADDR_SURF_P2) |
  2914. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2915. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2916. modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  2917. PIPE_CONFIG(ADDR_SURF_P2) |
  2918. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2919. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2920. modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  2921. PIPE_CONFIG(ADDR_SURF_P2) |
  2922. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2923. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2924. modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  2925. PIPE_CONFIG(ADDR_SURF_P2) |
  2926. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2927. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2928. modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  2929. PIPE_CONFIG(ADDR_SURF_P2) |
  2930. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2931. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2932. modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  2933. PIPE_CONFIG(ADDR_SURF_P2) |
  2934. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  2935. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  2936. modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2937. PIPE_CONFIG(ADDR_SURF_P2) |
  2938. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2939. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2940. modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2941. PIPE_CONFIG(ADDR_SURF_P2) |
  2942. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2943. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2944. modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2945. PIPE_CONFIG(ADDR_SURF_P2) |
  2946. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2947. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  2948. mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2949. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2950. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2951. NUM_BANKS(ADDR_SURF_8_BANK));
  2952. mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2953. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2954. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2955. NUM_BANKS(ADDR_SURF_8_BANK));
  2956. mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2957. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2958. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2959. NUM_BANKS(ADDR_SURF_8_BANK));
  2960. mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2961. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2962. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2963. NUM_BANKS(ADDR_SURF_8_BANK));
  2964. mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2965. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2966. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2967. NUM_BANKS(ADDR_SURF_8_BANK));
  2968. mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2969. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2970. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2971. NUM_BANKS(ADDR_SURF_8_BANK));
  2972. mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2973. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2974. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2975. NUM_BANKS(ADDR_SURF_8_BANK));
  2976. mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2977. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2978. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2979. NUM_BANKS(ADDR_SURF_16_BANK));
  2980. mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2981. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2982. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2983. NUM_BANKS(ADDR_SURF_16_BANK));
  2984. mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2985. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2986. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2987. NUM_BANKS(ADDR_SURF_16_BANK));
  2988. mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2989. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2990. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2991. NUM_BANKS(ADDR_SURF_16_BANK));
  2992. mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2993. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2994. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2995. NUM_BANKS(ADDR_SURF_16_BANK));
  2996. mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2997. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2998. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2999. NUM_BANKS(ADDR_SURF_16_BANK));
  3000. mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3001. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3002. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  3003. NUM_BANKS(ADDR_SURF_8_BANK));
  3004. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  3005. if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
  3006. reg_offset != 23)
  3007. WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
  3008. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  3009. if (reg_offset != 7)
  3010. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
  3011. break;
  3012. default:
  3013. dev_warn(adev->dev,
  3014. "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
  3015. adev->asic_type);
  3016. case CHIP_CARRIZO:
  3017. modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3018. PIPE_CONFIG(ADDR_SURF_P2) |
  3019. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  3020. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  3021. modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3022. PIPE_CONFIG(ADDR_SURF_P2) |
  3023. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  3024. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  3025. modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3026. PIPE_CONFIG(ADDR_SURF_P2) |
  3027. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  3028. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  3029. modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3030. PIPE_CONFIG(ADDR_SURF_P2) |
  3031. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  3032. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  3033. modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3034. PIPE_CONFIG(ADDR_SURF_P2) |
  3035. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  3036. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  3037. modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  3038. PIPE_CONFIG(ADDR_SURF_P2) |
  3039. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  3040. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  3041. modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  3042. PIPE_CONFIG(ADDR_SURF_P2) |
  3043. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
  3044. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  3045. modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  3046. PIPE_CONFIG(ADDR_SURF_P2));
  3047. modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  3048. PIPE_CONFIG(ADDR_SURF_P2) |
  3049. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  3050. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  3051. modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3052. PIPE_CONFIG(ADDR_SURF_P2) |
  3053. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  3054. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  3055. modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  3056. PIPE_CONFIG(ADDR_SURF_P2) |
  3057. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  3058. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  3059. modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  3060. PIPE_CONFIG(ADDR_SURF_P2) |
  3061. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  3062. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  3063. modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3064. PIPE_CONFIG(ADDR_SURF_P2) |
  3065. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  3066. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  3067. modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  3068. PIPE_CONFIG(ADDR_SURF_P2) |
  3069. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  3070. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  3071. modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  3072. PIPE_CONFIG(ADDR_SURF_P2) |
  3073. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  3074. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  3075. modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  3076. PIPE_CONFIG(ADDR_SURF_P2) |
  3077. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  3078. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3079. modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  3080. PIPE_CONFIG(ADDR_SURF_P2) |
  3081. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  3082. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3083. modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  3084. PIPE_CONFIG(ADDR_SURF_P2) |
  3085. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  3086. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3087. modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  3088. PIPE_CONFIG(ADDR_SURF_P2) |
  3089. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  3090. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3091. modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  3092. PIPE_CONFIG(ADDR_SURF_P2) |
  3093. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  3094. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3095. modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  3096. PIPE_CONFIG(ADDR_SURF_P2) |
  3097. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  3098. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3099. modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  3100. PIPE_CONFIG(ADDR_SURF_P2) |
  3101. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  3102. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3103. modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  3104. PIPE_CONFIG(ADDR_SURF_P2) |
  3105. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  3106. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  3107. modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  3108. PIPE_CONFIG(ADDR_SURF_P2) |
  3109. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  3110. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  3111. modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  3112. PIPE_CONFIG(ADDR_SURF_P2) |
  3113. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  3114. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  3115. modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  3116. PIPE_CONFIG(ADDR_SURF_P2) |
  3117. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  3118. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  3119. mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3120. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  3121. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3122. NUM_BANKS(ADDR_SURF_8_BANK));
  3123. mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3124. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  3125. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3126. NUM_BANKS(ADDR_SURF_8_BANK));
  3127. mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3128. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3129. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  3130. NUM_BANKS(ADDR_SURF_8_BANK));
  3131. mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3132. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3133. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  3134. NUM_BANKS(ADDR_SURF_8_BANK));
  3135. mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3136. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3137. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  3138. NUM_BANKS(ADDR_SURF_8_BANK));
  3139. mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3140. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3141. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  3142. NUM_BANKS(ADDR_SURF_8_BANK));
  3143. mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3144. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3145. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  3146. NUM_BANKS(ADDR_SURF_8_BANK));
  3147. mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  3148. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  3149. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3150. NUM_BANKS(ADDR_SURF_16_BANK));
  3151. mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  3152. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  3153. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3154. NUM_BANKS(ADDR_SURF_16_BANK));
  3155. mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  3156. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  3157. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3158. NUM_BANKS(ADDR_SURF_16_BANK));
  3159. mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  3160. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  3161. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3162. NUM_BANKS(ADDR_SURF_16_BANK));
  3163. mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3164. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  3165. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3166. NUM_BANKS(ADDR_SURF_16_BANK));
  3167. mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3168. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3169. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  3170. NUM_BANKS(ADDR_SURF_16_BANK));
  3171. mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  3172. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  3173. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  3174. NUM_BANKS(ADDR_SURF_8_BANK));
  3175. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  3176. if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
  3177. reg_offset != 23)
  3178. WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
  3179. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  3180. if (reg_offset != 7)
  3181. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
  3182. break;
  3183. }
  3184. }
  3185. static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
  3186. u32 se_num, u32 sh_num, u32 instance)
  3187. {
  3188. u32 data;
  3189. if (instance == 0xffffffff)
  3190. data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
  3191. else
  3192. data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
  3193. if (se_num == 0xffffffff)
  3194. data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
  3195. else
  3196. data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
  3197. if (sh_num == 0xffffffff)
  3198. data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
  3199. else
  3200. data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
  3201. WREG32(mmGRBM_GFX_INDEX, data);
  3202. }
  3203. static u32 gfx_v8_0_create_bitmask(u32 bit_width)
  3204. {
  3205. return (u32)((1ULL << bit_width) - 1);
  3206. }
  3207. static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
  3208. {
  3209. u32 data, mask;
  3210. data = RREG32(mmCC_RB_BACKEND_DISABLE) |
  3211. RREG32(mmGC_USER_RB_BACKEND_DISABLE);
  3212. data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
  3213. mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
  3214. adev->gfx.config.max_sh_per_se);
  3215. return (~data) & mask;
  3216. }
  3217. static void
  3218. gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
  3219. {
  3220. switch (adev->asic_type) {
  3221. case CHIP_FIJI:
  3222. *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
  3223. RB_XSEL2(1) | PKR_MAP(2) |
  3224. PKR_XSEL(1) | PKR_YSEL(1) |
  3225. SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
  3226. *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
  3227. SE_PAIR_YSEL(2);
  3228. break;
  3229. case CHIP_TONGA:
  3230. case CHIP_POLARIS10:
  3231. *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
  3232. SE_XSEL(1) | SE_YSEL(1);
  3233. *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
  3234. SE_PAIR_YSEL(2);
  3235. break;
  3236. case CHIP_TOPAZ:
  3237. case CHIP_CARRIZO:
  3238. *rconf |= RB_MAP_PKR0(2);
  3239. *rconf1 |= 0x0;
  3240. break;
  3241. case CHIP_POLARIS11:
  3242. *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
  3243. SE_XSEL(1) | SE_YSEL(1);
  3244. *rconf1 |= 0x0;
  3245. break;
  3246. case CHIP_STONEY:
  3247. *rconf |= 0x0;
  3248. *rconf1 |= 0x0;
  3249. break;
  3250. default:
  3251. DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
  3252. break;
  3253. }
  3254. }
  3255. static void
  3256. gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
  3257. u32 raster_config, u32 raster_config_1,
  3258. unsigned rb_mask, unsigned num_rb)
  3259. {
  3260. unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
  3261. unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
  3262. unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
  3263. unsigned rb_per_se = num_rb / num_se;
  3264. unsigned se_mask[4];
  3265. unsigned se;
  3266. se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
  3267. se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
  3268. se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
  3269. se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
  3270. WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
  3271. WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
  3272. WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
  3273. if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
  3274. (!se_mask[2] && !se_mask[3]))) {
  3275. raster_config_1 &= ~SE_PAIR_MAP_MASK;
  3276. if (!se_mask[0] && !se_mask[1]) {
  3277. raster_config_1 |=
  3278. SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
  3279. } else {
  3280. raster_config_1 |=
  3281. SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
  3282. }
  3283. }
  3284. for (se = 0; se < num_se; se++) {
  3285. unsigned raster_config_se = raster_config;
  3286. unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
  3287. unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
  3288. int idx = (se / 2) * 2;
  3289. if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
  3290. raster_config_se &= ~SE_MAP_MASK;
  3291. if (!se_mask[idx]) {
  3292. raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
  3293. } else {
  3294. raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
  3295. }
  3296. }
  3297. pkr0_mask &= rb_mask;
  3298. pkr1_mask &= rb_mask;
  3299. if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
  3300. raster_config_se &= ~PKR_MAP_MASK;
  3301. if (!pkr0_mask) {
  3302. raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
  3303. } else {
  3304. raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
  3305. }
  3306. }
  3307. if (rb_per_se >= 2) {
  3308. unsigned rb0_mask = 1 << (se * rb_per_se);
  3309. unsigned rb1_mask = rb0_mask << 1;
  3310. rb0_mask &= rb_mask;
  3311. rb1_mask &= rb_mask;
  3312. if (!rb0_mask || !rb1_mask) {
  3313. raster_config_se &= ~RB_MAP_PKR0_MASK;
  3314. if (!rb0_mask) {
  3315. raster_config_se |=
  3316. RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
  3317. } else {
  3318. raster_config_se |=
  3319. RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
  3320. }
  3321. }
  3322. if (rb_per_se > 2) {
  3323. rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
  3324. rb1_mask = rb0_mask << 1;
  3325. rb0_mask &= rb_mask;
  3326. rb1_mask &= rb_mask;
  3327. if (!rb0_mask || !rb1_mask) {
  3328. raster_config_se &= ~RB_MAP_PKR1_MASK;
  3329. if (!rb0_mask) {
  3330. raster_config_se |=
  3331. RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
  3332. } else {
  3333. raster_config_se |=
  3334. RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
  3335. }
  3336. }
  3337. }
  3338. }
  3339. /* GRBM_GFX_INDEX has a different offset on VI */
  3340. gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
  3341. WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
  3342. WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
  3343. }
  3344. /* GRBM_GFX_INDEX has a different offset on VI */
  3345. gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  3346. }
  3347. static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
  3348. {
  3349. int i, j;
  3350. u32 data;
  3351. u32 raster_config = 0, raster_config_1 = 0;
  3352. u32 active_rbs = 0;
  3353. u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
  3354. adev->gfx.config.max_sh_per_se;
  3355. unsigned num_rb_pipes;
  3356. mutex_lock(&adev->grbm_idx_mutex);
  3357. for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
  3358. for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
  3359. gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
  3360. data = gfx_v8_0_get_rb_active_bitmap(adev);
  3361. active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
  3362. rb_bitmap_width_per_sh);
  3363. }
  3364. }
  3365. gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  3366. adev->gfx.config.backend_enable_mask = active_rbs;
  3367. adev->gfx.config.num_rbs = hweight32(active_rbs);
  3368. num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
  3369. adev->gfx.config.max_shader_engines, 16);
  3370. gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
  3371. if (!adev->gfx.config.backend_enable_mask ||
  3372. adev->gfx.config.num_rbs >= num_rb_pipes) {
  3373. WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
  3374. WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
  3375. } else {
  3376. gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
  3377. adev->gfx.config.backend_enable_mask,
  3378. num_rb_pipes);
  3379. }
  3380. /* cache the values for userspace */
  3381. for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
  3382. for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
  3383. gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
  3384. adev->gfx.config.rb_config[i][j].rb_backend_disable =
  3385. RREG32(mmCC_RB_BACKEND_DISABLE);
  3386. adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
  3387. RREG32(mmGC_USER_RB_BACKEND_DISABLE);
  3388. adev->gfx.config.rb_config[i][j].raster_config =
  3389. RREG32(mmPA_SC_RASTER_CONFIG);
  3390. adev->gfx.config.rb_config[i][j].raster_config_1 =
  3391. RREG32(mmPA_SC_RASTER_CONFIG_1);
  3392. }
  3393. }
  3394. gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  3395. mutex_unlock(&adev->grbm_idx_mutex);
  3396. }
  3397. /**
  3398. * gfx_v8_0_init_compute_vmid - gart enable
  3399. *
  3400. * @rdev: amdgpu_device pointer
  3401. *
  3402. * Initialize compute vmid sh_mem registers
  3403. *
  3404. */
  3405. #define DEFAULT_SH_MEM_BASES (0x6000)
  3406. #define FIRST_COMPUTE_VMID (8)
  3407. #define LAST_COMPUTE_VMID (16)
  3408. static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
  3409. {
  3410. int i;
  3411. uint32_t sh_mem_config;
  3412. uint32_t sh_mem_bases;
  3413. /*
  3414. * Configure apertures:
  3415. * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
  3416. * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
  3417. * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
  3418. */
  3419. sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
  3420. sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
  3421. SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
  3422. SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
  3423. SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
  3424. MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
  3425. SH_MEM_CONFIG__PRIVATE_ATC_MASK;
  3426. mutex_lock(&adev->srbm_mutex);
  3427. for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
  3428. vi_srbm_select(adev, 0, 0, 0, i);
  3429. /* CP and shaders */
  3430. WREG32(mmSH_MEM_CONFIG, sh_mem_config);
  3431. WREG32(mmSH_MEM_APE1_BASE, 1);
  3432. WREG32(mmSH_MEM_APE1_LIMIT, 0);
  3433. WREG32(mmSH_MEM_BASES, sh_mem_bases);
  3434. }
  3435. vi_srbm_select(adev, 0, 0, 0, 0);
  3436. mutex_unlock(&adev->srbm_mutex);
  3437. }
  3438. static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
  3439. {
  3440. u32 tmp;
  3441. int i;
  3442. WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
  3443. WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
  3444. WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
  3445. WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
  3446. gfx_v8_0_tiling_mode_table_init(adev);
  3447. gfx_v8_0_setup_rb(adev);
  3448. gfx_v8_0_get_cu_info(adev);
  3449. /* XXX SH_MEM regs */
  3450. /* where to put LDS, scratch, GPUVM in FSA64 space */
  3451. mutex_lock(&adev->srbm_mutex);
  3452. for (i = 0; i < 16; i++) {
  3453. vi_srbm_select(adev, 0, 0, 0, i);
  3454. /* CP and shaders */
  3455. if (i == 0) {
  3456. tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
  3457. tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
  3458. tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
  3459. SH_MEM_ALIGNMENT_MODE_UNALIGNED);
  3460. WREG32(mmSH_MEM_CONFIG, tmp);
  3461. } else {
  3462. tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
  3463. tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
  3464. tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
  3465. SH_MEM_ALIGNMENT_MODE_UNALIGNED);
  3466. WREG32(mmSH_MEM_CONFIG, tmp);
  3467. }
  3468. WREG32(mmSH_MEM_APE1_BASE, 1);
  3469. WREG32(mmSH_MEM_APE1_LIMIT, 0);
  3470. WREG32(mmSH_MEM_BASES, 0);
  3471. }
  3472. vi_srbm_select(adev, 0, 0, 0, 0);
  3473. mutex_unlock(&adev->srbm_mutex);
  3474. gfx_v8_0_init_compute_vmid(adev);
  3475. mutex_lock(&adev->grbm_idx_mutex);
  3476. /*
  3477. * making sure that the following register writes will be broadcasted
  3478. * to all the shaders
  3479. */
  3480. gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  3481. WREG32(mmPA_SC_FIFO_SIZE,
  3482. (adev->gfx.config.sc_prim_fifo_size_frontend <<
  3483. PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
  3484. (adev->gfx.config.sc_prim_fifo_size_backend <<
  3485. PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
  3486. (adev->gfx.config.sc_hiz_tile_fifo_size <<
  3487. PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
  3488. (adev->gfx.config.sc_earlyz_tile_fifo_size <<
  3489. PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
  3490. mutex_unlock(&adev->grbm_idx_mutex);
  3491. }
  3492. static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
  3493. {
  3494. u32 i, j, k;
  3495. u32 mask;
  3496. mutex_lock(&adev->grbm_idx_mutex);
  3497. for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
  3498. for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
  3499. gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
  3500. for (k = 0; k < adev->usec_timeout; k++) {
  3501. if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
  3502. break;
  3503. udelay(1);
  3504. }
  3505. }
  3506. }
  3507. gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  3508. mutex_unlock(&adev->grbm_idx_mutex);
  3509. mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
  3510. RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
  3511. RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
  3512. RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
  3513. for (k = 0; k < adev->usec_timeout; k++) {
  3514. if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
  3515. break;
  3516. udelay(1);
  3517. }
  3518. }
  3519. static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
  3520. bool enable)
  3521. {
  3522. u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
  3523. tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
  3524. tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
  3525. tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
  3526. tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
  3527. WREG32(mmCP_INT_CNTL_RING0, tmp);
  3528. }
  3529. static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
  3530. {
  3531. /* csib */
  3532. WREG32(mmRLC_CSIB_ADDR_HI,
  3533. adev->gfx.rlc.clear_state_gpu_addr >> 32);
  3534. WREG32(mmRLC_CSIB_ADDR_LO,
  3535. adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
  3536. WREG32(mmRLC_CSIB_LENGTH,
  3537. adev->gfx.rlc.clear_state_size);
  3538. }
  3539. static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
  3540. int ind_offset,
  3541. int list_size,
  3542. int *unique_indices,
  3543. int *indices_count,
  3544. int max_indices,
  3545. int *ind_start_offsets,
  3546. int *offset_count,
  3547. int max_offset)
  3548. {
  3549. int indices;
  3550. bool new_entry = true;
  3551. for (; ind_offset < list_size; ind_offset++) {
  3552. if (new_entry) {
  3553. new_entry = false;
  3554. ind_start_offsets[*offset_count] = ind_offset;
  3555. *offset_count = *offset_count + 1;
  3556. BUG_ON(*offset_count >= max_offset);
  3557. }
  3558. if (register_list_format[ind_offset] == 0xFFFFFFFF) {
  3559. new_entry = true;
  3560. continue;
  3561. }
  3562. ind_offset += 2;
  3563. /* look for the matching indice */
  3564. for (indices = 0;
  3565. indices < *indices_count;
  3566. indices++) {
  3567. if (unique_indices[indices] ==
  3568. register_list_format[ind_offset])
  3569. break;
  3570. }
  3571. if (indices >= *indices_count) {
  3572. unique_indices[*indices_count] =
  3573. register_list_format[ind_offset];
  3574. indices = *indices_count;
  3575. *indices_count = *indices_count + 1;
  3576. BUG_ON(*indices_count >= max_indices);
  3577. }
  3578. register_list_format[ind_offset] = indices;
  3579. }
  3580. }
  3581. static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
  3582. {
  3583. int i, temp, data;
  3584. int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
  3585. int indices_count = 0;
  3586. int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  3587. int offset_count = 0;
  3588. int list_size;
  3589. unsigned int *register_list_format =
  3590. kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
  3591. if (register_list_format == NULL)
  3592. return -ENOMEM;
  3593. memcpy(register_list_format, adev->gfx.rlc.register_list_format,
  3594. adev->gfx.rlc.reg_list_format_size_bytes);
  3595. gfx_v8_0_parse_ind_reg_list(register_list_format,
  3596. RLC_FormatDirectRegListLength,
  3597. adev->gfx.rlc.reg_list_format_size_bytes >> 2,
  3598. unique_indices,
  3599. &indices_count,
  3600. sizeof(unique_indices) / sizeof(int),
  3601. indirect_start_offsets,
  3602. &offset_count,
  3603. sizeof(indirect_start_offsets)/sizeof(int));
  3604. /* save and restore list */
  3605. WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
  3606. WREG32(mmRLC_SRM_ARAM_ADDR, 0);
  3607. for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
  3608. WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
  3609. /* indirect list */
  3610. WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
  3611. for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
  3612. WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
  3613. list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
  3614. list_size = list_size >> 1;
  3615. WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
  3616. WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
  3617. /* starting offsets starts */
  3618. WREG32(mmRLC_GPM_SCRATCH_ADDR,
  3619. adev->gfx.rlc.starting_offsets_start);
  3620. for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
  3621. WREG32(mmRLC_GPM_SCRATCH_DATA,
  3622. indirect_start_offsets[i]);
  3623. /* unique indices */
  3624. temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
  3625. data = mmRLC_SRM_INDEX_CNTL_DATA_0;
  3626. for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
  3627. amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
  3628. amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
  3629. }
  3630. kfree(register_list_format);
  3631. return 0;
  3632. }
  3633. static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
  3634. {
  3635. WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
  3636. }
  3637. static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
  3638. {
  3639. uint32_t data;
  3640. if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
  3641. AMD_PG_SUPPORT_GFX_SMG |
  3642. AMD_PG_SUPPORT_GFX_DMG)) {
  3643. WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
  3644. data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
  3645. data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
  3646. data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
  3647. data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
  3648. WREG32(mmRLC_PG_DELAY, data);
  3649. WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
  3650. WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
  3651. }
  3652. }
  3653. static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
  3654. bool enable)
  3655. {
  3656. WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
  3657. }
  3658. static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
  3659. bool enable)
  3660. {
  3661. WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
  3662. }
  3663. static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
  3664. {
  3665. WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
  3666. }
  3667. static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
  3668. {
  3669. if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
  3670. AMD_PG_SUPPORT_GFX_SMG |
  3671. AMD_PG_SUPPORT_GFX_DMG |
  3672. AMD_PG_SUPPORT_CP |
  3673. AMD_PG_SUPPORT_GDS |
  3674. AMD_PG_SUPPORT_RLC_SMU_HS)) {
  3675. gfx_v8_0_init_csb(adev);
  3676. gfx_v8_0_init_save_restore_list(adev);
  3677. gfx_v8_0_enable_save_restore_machine(adev);
  3678. if ((adev->asic_type == CHIP_CARRIZO) ||
  3679. (adev->asic_type == CHIP_STONEY)) {
  3680. WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
  3681. gfx_v8_0_init_power_gating(adev);
  3682. WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
  3683. if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
  3684. cz_enable_sck_slow_down_on_power_up(adev, true);
  3685. cz_enable_sck_slow_down_on_power_down(adev, true);
  3686. } else {
  3687. cz_enable_sck_slow_down_on_power_up(adev, false);
  3688. cz_enable_sck_slow_down_on_power_down(adev, false);
  3689. }
  3690. if (adev->pg_flags & AMD_PG_SUPPORT_CP)
  3691. cz_enable_cp_power_gating(adev, true);
  3692. else
  3693. cz_enable_cp_power_gating(adev, false);
  3694. } else if (adev->asic_type == CHIP_POLARIS11) {
  3695. gfx_v8_0_init_power_gating(adev);
  3696. }
  3697. }
  3698. }
  3699. static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
  3700. {
  3701. WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
  3702. gfx_v8_0_enable_gui_idle_interrupt(adev, false);
  3703. gfx_v8_0_wait_for_rlc_serdes(adev);
  3704. }
  3705. static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
  3706. {
  3707. WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
  3708. udelay(50);
  3709. WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
  3710. udelay(50);
  3711. }
  3712. static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
  3713. {
  3714. WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
  3715. /* carrizo do enable cp interrupt after cp inited */
  3716. if (!(adev->flags & AMD_IS_APU))
  3717. gfx_v8_0_enable_gui_idle_interrupt(adev, true);
  3718. udelay(50);
  3719. }
  3720. static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
  3721. {
  3722. const struct rlc_firmware_header_v2_0 *hdr;
  3723. const __le32 *fw_data;
  3724. unsigned i, fw_size;
  3725. if (!adev->gfx.rlc_fw)
  3726. return -EINVAL;
  3727. hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
  3728. amdgpu_ucode_print_rlc_hdr(&hdr->header);
  3729. fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
  3730. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  3731. fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  3732. WREG32(mmRLC_GPM_UCODE_ADDR, 0);
  3733. for (i = 0; i < fw_size; i++)
  3734. WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
  3735. WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
  3736. return 0;
  3737. }
  3738. static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
  3739. {
  3740. int r;
  3741. u32 tmp;
  3742. gfx_v8_0_rlc_stop(adev);
  3743. /* disable CG */
  3744. tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
  3745. tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
  3746. RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
  3747. WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
  3748. if (adev->asic_type == CHIP_POLARIS11 ||
  3749. adev->asic_type == CHIP_POLARIS10) {
  3750. tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
  3751. tmp &= ~0x3;
  3752. WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
  3753. }
  3754. /* disable PG */
  3755. WREG32(mmRLC_PG_CNTL, 0);
  3756. gfx_v8_0_rlc_reset(adev);
  3757. gfx_v8_0_init_pg(adev);
  3758. if (!adev->pp_enabled) {
  3759. if (!adev->firmware.smu_load) {
  3760. /* legacy rlc firmware loading */
  3761. r = gfx_v8_0_rlc_load_microcode(adev);
  3762. if (r)
  3763. return r;
  3764. } else {
  3765. r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
  3766. AMDGPU_UCODE_ID_RLC_G);
  3767. if (r)
  3768. return -EINVAL;
  3769. }
  3770. }
  3771. gfx_v8_0_rlc_start(adev);
  3772. return 0;
  3773. }
  3774. static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
  3775. {
  3776. int i;
  3777. u32 tmp = RREG32(mmCP_ME_CNTL);
  3778. if (enable) {
  3779. tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
  3780. tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
  3781. tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
  3782. } else {
  3783. tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
  3784. tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
  3785. tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
  3786. for (i = 0; i < adev->gfx.num_gfx_rings; i++)
  3787. adev->gfx.gfx_ring[i].ready = false;
  3788. }
  3789. WREG32(mmCP_ME_CNTL, tmp);
  3790. udelay(50);
  3791. }
  3792. static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
  3793. {
  3794. const struct gfx_firmware_header_v1_0 *pfp_hdr;
  3795. const struct gfx_firmware_header_v1_0 *ce_hdr;
  3796. const struct gfx_firmware_header_v1_0 *me_hdr;
  3797. const __le32 *fw_data;
  3798. unsigned i, fw_size;
  3799. if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
  3800. return -EINVAL;
  3801. pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
  3802. adev->gfx.pfp_fw->data;
  3803. ce_hdr = (const struct gfx_firmware_header_v1_0 *)
  3804. adev->gfx.ce_fw->data;
  3805. me_hdr = (const struct gfx_firmware_header_v1_0 *)
  3806. adev->gfx.me_fw->data;
  3807. amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
  3808. amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
  3809. amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
  3810. gfx_v8_0_cp_gfx_enable(adev, false);
  3811. /* PFP */
  3812. fw_data = (const __le32 *)
  3813. (adev->gfx.pfp_fw->data +
  3814. le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
  3815. fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
  3816. WREG32(mmCP_PFP_UCODE_ADDR, 0);
  3817. for (i = 0; i < fw_size; i++)
  3818. WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
  3819. WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
  3820. /* CE */
  3821. fw_data = (const __le32 *)
  3822. (adev->gfx.ce_fw->data +
  3823. le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
  3824. fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
  3825. WREG32(mmCP_CE_UCODE_ADDR, 0);
  3826. for (i = 0; i < fw_size; i++)
  3827. WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
  3828. WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
  3829. /* ME */
  3830. fw_data = (const __le32 *)
  3831. (adev->gfx.me_fw->data +
  3832. le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
  3833. fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
  3834. WREG32(mmCP_ME_RAM_WADDR, 0);
  3835. for (i = 0; i < fw_size; i++)
  3836. WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
  3837. WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
  3838. return 0;
  3839. }
  3840. static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
  3841. {
  3842. u32 count = 0;
  3843. const struct cs_section_def *sect = NULL;
  3844. const struct cs_extent_def *ext = NULL;
  3845. /* begin clear state */
  3846. count += 2;
  3847. /* context control state */
  3848. count += 3;
  3849. for (sect = vi_cs_data; sect->section != NULL; ++sect) {
  3850. for (ext = sect->section; ext->extent != NULL; ++ext) {
  3851. if (sect->id == SECT_CONTEXT)
  3852. count += 2 + ext->reg_count;
  3853. else
  3854. return 0;
  3855. }
  3856. }
  3857. /* pa_sc_raster_config/pa_sc_raster_config1 */
  3858. count += 4;
  3859. /* end clear state */
  3860. count += 2;
  3861. /* clear state */
  3862. count += 2;
  3863. return count;
  3864. }
  3865. static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
  3866. {
  3867. struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
  3868. const struct cs_section_def *sect = NULL;
  3869. const struct cs_extent_def *ext = NULL;
  3870. int r, i;
  3871. /* init the CP */
  3872. WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
  3873. WREG32(mmCP_ENDIAN_SWAP, 0);
  3874. WREG32(mmCP_DEVICE_ID, 1);
  3875. gfx_v8_0_cp_gfx_enable(adev, true);
  3876. r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
  3877. if (r) {
  3878. DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
  3879. return r;
  3880. }
  3881. /* clear state buffer */
  3882. amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3883. amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  3884. amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  3885. amdgpu_ring_write(ring, 0x80000000);
  3886. amdgpu_ring_write(ring, 0x80000000);
  3887. for (sect = vi_cs_data; sect->section != NULL; ++sect) {
  3888. for (ext = sect->section; ext->extent != NULL; ++ext) {
  3889. if (sect->id == SECT_CONTEXT) {
  3890. amdgpu_ring_write(ring,
  3891. PACKET3(PACKET3_SET_CONTEXT_REG,
  3892. ext->reg_count));
  3893. amdgpu_ring_write(ring,
  3894. ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
  3895. for (i = 0; i < ext->reg_count; i++)
  3896. amdgpu_ring_write(ring, ext->extent[i]);
  3897. }
  3898. }
  3899. }
  3900. amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  3901. amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
  3902. switch (adev->asic_type) {
  3903. case CHIP_TONGA:
  3904. case CHIP_POLARIS10:
  3905. amdgpu_ring_write(ring, 0x16000012);
  3906. amdgpu_ring_write(ring, 0x0000002A);
  3907. break;
  3908. case CHIP_POLARIS11:
  3909. amdgpu_ring_write(ring, 0x16000012);
  3910. amdgpu_ring_write(ring, 0x00000000);
  3911. break;
  3912. case CHIP_FIJI:
  3913. amdgpu_ring_write(ring, 0x3a00161a);
  3914. amdgpu_ring_write(ring, 0x0000002e);
  3915. break;
  3916. case CHIP_CARRIZO:
  3917. amdgpu_ring_write(ring, 0x00000002);
  3918. amdgpu_ring_write(ring, 0x00000000);
  3919. break;
  3920. case CHIP_TOPAZ:
  3921. amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
  3922. 0x00000000 : 0x00000002);
  3923. amdgpu_ring_write(ring, 0x00000000);
  3924. break;
  3925. case CHIP_STONEY:
  3926. amdgpu_ring_write(ring, 0x00000000);
  3927. amdgpu_ring_write(ring, 0x00000000);
  3928. break;
  3929. default:
  3930. BUG();
  3931. }
  3932. amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3933. amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  3934. amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  3935. amdgpu_ring_write(ring, 0);
  3936. /* init the CE partitions */
  3937. amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
  3938. amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
  3939. amdgpu_ring_write(ring, 0x8000);
  3940. amdgpu_ring_write(ring, 0x8000);
  3941. amdgpu_ring_commit(ring);
  3942. return 0;
  3943. }
  3944. static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
  3945. {
  3946. struct amdgpu_ring *ring;
  3947. u32 tmp;
  3948. u32 rb_bufsz;
  3949. u64 rb_addr, rptr_addr, wptr_gpu_addr;
  3950. int r;
  3951. /* Set the write pointer delay */
  3952. WREG32(mmCP_RB_WPTR_DELAY, 0);
  3953. /* set the RB to use vmid 0 */
  3954. WREG32(mmCP_RB_VMID, 0);
  3955. /* Set ring buffer size */
  3956. ring = &adev->gfx.gfx_ring[0];
  3957. rb_bufsz = order_base_2(ring->ring_size / 8);
  3958. tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
  3959. tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
  3960. tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
  3961. tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
  3962. #ifdef __BIG_ENDIAN
  3963. tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
  3964. #endif
  3965. WREG32(mmCP_RB0_CNTL, tmp);
  3966. /* Initialize the ring buffer's read and write pointers */
  3967. WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
  3968. ring->wptr = 0;
  3969. WREG32(mmCP_RB0_WPTR, ring->wptr);
  3970. /* set the wb address wether it's enabled or not */
  3971. rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
  3972. WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
  3973. WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
  3974. wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
  3975. WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
  3976. WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
  3977. mdelay(1);
  3978. WREG32(mmCP_RB0_CNTL, tmp);
  3979. rb_addr = ring->gpu_addr >> 8;
  3980. WREG32(mmCP_RB0_BASE, rb_addr);
  3981. WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
  3982. /* no gfx doorbells on iceland */
  3983. if (adev->asic_type != CHIP_TOPAZ) {
  3984. tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
  3985. if (ring->use_doorbell) {
  3986. tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
  3987. DOORBELL_OFFSET, ring->doorbell_index);
  3988. tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
  3989. DOORBELL_HIT, 0);
  3990. tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
  3991. DOORBELL_EN, 1);
  3992. } else {
  3993. tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
  3994. DOORBELL_EN, 0);
  3995. }
  3996. WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
  3997. if (adev->asic_type == CHIP_TONGA) {
  3998. tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
  3999. DOORBELL_RANGE_LOWER,
  4000. AMDGPU_DOORBELL_GFX_RING0);
  4001. WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
  4002. WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
  4003. CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
  4004. }
  4005. }
  4006. /* start the ring */
  4007. gfx_v8_0_cp_gfx_start(adev);
  4008. ring->ready = true;
  4009. r = amdgpu_ring_test_ring(ring);
  4010. if (r)
  4011. ring->ready = false;
  4012. return r;
  4013. }
  4014. static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
  4015. {
  4016. int i;
  4017. if (enable) {
  4018. WREG32(mmCP_MEC_CNTL, 0);
  4019. } else {
  4020. WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
  4021. for (i = 0; i < adev->gfx.num_compute_rings; i++)
  4022. adev->gfx.compute_ring[i].ready = false;
  4023. }
  4024. udelay(50);
  4025. }
  4026. static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
  4027. {
  4028. const struct gfx_firmware_header_v1_0 *mec_hdr;
  4029. const __le32 *fw_data;
  4030. unsigned i, fw_size;
  4031. if (!adev->gfx.mec_fw)
  4032. return -EINVAL;
  4033. gfx_v8_0_cp_compute_enable(adev, false);
  4034. mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
  4035. amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
  4036. fw_data = (const __le32 *)
  4037. (adev->gfx.mec_fw->data +
  4038. le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
  4039. fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
  4040. /* MEC1 */
  4041. WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
  4042. for (i = 0; i < fw_size; i++)
  4043. WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
  4044. WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
  4045. /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
  4046. if (adev->gfx.mec2_fw) {
  4047. const struct gfx_firmware_header_v1_0 *mec2_hdr;
  4048. mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
  4049. amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
  4050. fw_data = (const __le32 *)
  4051. (adev->gfx.mec2_fw->data +
  4052. le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
  4053. fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
  4054. WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
  4055. for (i = 0; i < fw_size; i++)
  4056. WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
  4057. WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
  4058. }
  4059. return 0;
  4060. }
  4061. struct vi_mqd {
  4062. uint32_t header; /* ordinal0 */
  4063. uint32_t compute_dispatch_initiator; /* ordinal1 */
  4064. uint32_t compute_dim_x; /* ordinal2 */
  4065. uint32_t compute_dim_y; /* ordinal3 */
  4066. uint32_t compute_dim_z; /* ordinal4 */
  4067. uint32_t compute_start_x; /* ordinal5 */
  4068. uint32_t compute_start_y; /* ordinal6 */
  4069. uint32_t compute_start_z; /* ordinal7 */
  4070. uint32_t compute_num_thread_x; /* ordinal8 */
  4071. uint32_t compute_num_thread_y; /* ordinal9 */
  4072. uint32_t compute_num_thread_z; /* ordinal10 */
  4073. uint32_t compute_pipelinestat_enable; /* ordinal11 */
  4074. uint32_t compute_perfcount_enable; /* ordinal12 */
  4075. uint32_t compute_pgm_lo; /* ordinal13 */
  4076. uint32_t compute_pgm_hi; /* ordinal14 */
  4077. uint32_t compute_tba_lo; /* ordinal15 */
  4078. uint32_t compute_tba_hi; /* ordinal16 */
  4079. uint32_t compute_tma_lo; /* ordinal17 */
  4080. uint32_t compute_tma_hi; /* ordinal18 */
  4081. uint32_t compute_pgm_rsrc1; /* ordinal19 */
  4082. uint32_t compute_pgm_rsrc2; /* ordinal20 */
  4083. uint32_t compute_vmid; /* ordinal21 */
  4084. uint32_t compute_resource_limits; /* ordinal22 */
  4085. uint32_t compute_static_thread_mgmt_se0; /* ordinal23 */
  4086. uint32_t compute_static_thread_mgmt_se1; /* ordinal24 */
  4087. uint32_t compute_tmpring_size; /* ordinal25 */
  4088. uint32_t compute_static_thread_mgmt_se2; /* ordinal26 */
  4089. uint32_t compute_static_thread_mgmt_se3; /* ordinal27 */
  4090. uint32_t compute_restart_x; /* ordinal28 */
  4091. uint32_t compute_restart_y; /* ordinal29 */
  4092. uint32_t compute_restart_z; /* ordinal30 */
  4093. uint32_t compute_thread_trace_enable; /* ordinal31 */
  4094. uint32_t compute_misc_reserved; /* ordinal32 */
  4095. uint32_t compute_dispatch_id; /* ordinal33 */
  4096. uint32_t compute_threadgroup_id; /* ordinal34 */
  4097. uint32_t compute_relaunch; /* ordinal35 */
  4098. uint32_t compute_wave_restore_addr_lo; /* ordinal36 */
  4099. uint32_t compute_wave_restore_addr_hi; /* ordinal37 */
  4100. uint32_t compute_wave_restore_control; /* ordinal38 */
  4101. uint32_t reserved9; /* ordinal39 */
  4102. uint32_t reserved10; /* ordinal40 */
  4103. uint32_t reserved11; /* ordinal41 */
  4104. uint32_t reserved12; /* ordinal42 */
  4105. uint32_t reserved13; /* ordinal43 */
  4106. uint32_t reserved14; /* ordinal44 */
  4107. uint32_t reserved15; /* ordinal45 */
  4108. uint32_t reserved16; /* ordinal46 */
  4109. uint32_t reserved17; /* ordinal47 */
  4110. uint32_t reserved18; /* ordinal48 */
  4111. uint32_t reserved19; /* ordinal49 */
  4112. uint32_t reserved20; /* ordinal50 */
  4113. uint32_t reserved21; /* ordinal51 */
  4114. uint32_t reserved22; /* ordinal52 */
  4115. uint32_t reserved23; /* ordinal53 */
  4116. uint32_t reserved24; /* ordinal54 */
  4117. uint32_t reserved25; /* ordinal55 */
  4118. uint32_t reserved26; /* ordinal56 */
  4119. uint32_t reserved27; /* ordinal57 */
  4120. uint32_t reserved28; /* ordinal58 */
  4121. uint32_t reserved29; /* ordinal59 */
  4122. uint32_t reserved30; /* ordinal60 */
  4123. uint32_t reserved31; /* ordinal61 */
  4124. uint32_t reserved32; /* ordinal62 */
  4125. uint32_t reserved33; /* ordinal63 */
  4126. uint32_t reserved34; /* ordinal64 */
  4127. uint32_t compute_user_data_0; /* ordinal65 */
  4128. uint32_t compute_user_data_1; /* ordinal66 */
  4129. uint32_t compute_user_data_2; /* ordinal67 */
  4130. uint32_t compute_user_data_3; /* ordinal68 */
  4131. uint32_t compute_user_data_4; /* ordinal69 */
  4132. uint32_t compute_user_data_5; /* ordinal70 */
  4133. uint32_t compute_user_data_6; /* ordinal71 */
  4134. uint32_t compute_user_data_7; /* ordinal72 */
  4135. uint32_t compute_user_data_8; /* ordinal73 */
  4136. uint32_t compute_user_data_9; /* ordinal74 */
  4137. uint32_t compute_user_data_10; /* ordinal75 */
  4138. uint32_t compute_user_data_11; /* ordinal76 */
  4139. uint32_t compute_user_data_12; /* ordinal77 */
  4140. uint32_t compute_user_data_13; /* ordinal78 */
  4141. uint32_t compute_user_data_14; /* ordinal79 */
  4142. uint32_t compute_user_data_15; /* ordinal80 */
  4143. uint32_t cp_compute_csinvoc_count_lo; /* ordinal81 */
  4144. uint32_t cp_compute_csinvoc_count_hi; /* ordinal82 */
  4145. uint32_t reserved35; /* ordinal83 */
  4146. uint32_t reserved36; /* ordinal84 */
  4147. uint32_t reserved37; /* ordinal85 */
  4148. uint32_t cp_mqd_query_time_lo; /* ordinal86 */
  4149. uint32_t cp_mqd_query_time_hi; /* ordinal87 */
  4150. uint32_t cp_mqd_connect_start_time_lo; /* ordinal88 */
  4151. uint32_t cp_mqd_connect_start_time_hi; /* ordinal89 */
  4152. uint32_t cp_mqd_connect_end_time_lo; /* ordinal90 */
  4153. uint32_t cp_mqd_connect_end_time_hi; /* ordinal91 */
  4154. uint32_t cp_mqd_connect_end_wf_count; /* ordinal92 */
  4155. uint32_t cp_mqd_connect_end_pq_rptr; /* ordinal93 */
  4156. uint32_t cp_mqd_connect_end_pq_wptr; /* ordinal94 */
  4157. uint32_t cp_mqd_connect_end_ib_rptr; /* ordinal95 */
  4158. uint32_t reserved38; /* ordinal96 */
  4159. uint32_t reserved39; /* ordinal97 */
  4160. uint32_t cp_mqd_save_start_time_lo; /* ordinal98 */
  4161. uint32_t cp_mqd_save_start_time_hi; /* ordinal99 */
  4162. uint32_t cp_mqd_save_end_time_lo; /* ordinal100 */
  4163. uint32_t cp_mqd_save_end_time_hi; /* ordinal101 */
  4164. uint32_t cp_mqd_restore_start_time_lo; /* ordinal102 */
  4165. uint32_t cp_mqd_restore_start_time_hi; /* ordinal103 */
  4166. uint32_t cp_mqd_restore_end_time_lo; /* ordinal104 */
  4167. uint32_t cp_mqd_restore_end_time_hi; /* ordinal105 */
  4168. uint32_t reserved40; /* ordinal106 */
  4169. uint32_t reserved41; /* ordinal107 */
  4170. uint32_t gds_cs_ctxsw_cnt0; /* ordinal108 */
  4171. uint32_t gds_cs_ctxsw_cnt1; /* ordinal109 */
  4172. uint32_t gds_cs_ctxsw_cnt2; /* ordinal110 */
  4173. uint32_t gds_cs_ctxsw_cnt3; /* ordinal111 */
  4174. uint32_t reserved42; /* ordinal112 */
  4175. uint32_t reserved43; /* ordinal113 */
  4176. uint32_t cp_pq_exe_status_lo; /* ordinal114 */
  4177. uint32_t cp_pq_exe_status_hi; /* ordinal115 */
  4178. uint32_t cp_packet_id_lo; /* ordinal116 */
  4179. uint32_t cp_packet_id_hi; /* ordinal117 */
  4180. uint32_t cp_packet_exe_status_lo; /* ordinal118 */
  4181. uint32_t cp_packet_exe_status_hi; /* ordinal119 */
  4182. uint32_t gds_save_base_addr_lo; /* ordinal120 */
  4183. uint32_t gds_save_base_addr_hi; /* ordinal121 */
  4184. uint32_t gds_save_mask_lo; /* ordinal122 */
  4185. uint32_t gds_save_mask_hi; /* ordinal123 */
  4186. uint32_t ctx_save_base_addr_lo; /* ordinal124 */
  4187. uint32_t ctx_save_base_addr_hi; /* ordinal125 */
  4188. uint32_t reserved44; /* ordinal126 */
  4189. uint32_t reserved45; /* ordinal127 */
  4190. uint32_t cp_mqd_base_addr_lo; /* ordinal128 */
  4191. uint32_t cp_mqd_base_addr_hi; /* ordinal129 */
  4192. uint32_t cp_hqd_active; /* ordinal130 */
  4193. uint32_t cp_hqd_vmid; /* ordinal131 */
  4194. uint32_t cp_hqd_persistent_state; /* ordinal132 */
  4195. uint32_t cp_hqd_pipe_priority; /* ordinal133 */
  4196. uint32_t cp_hqd_queue_priority; /* ordinal134 */
  4197. uint32_t cp_hqd_quantum; /* ordinal135 */
  4198. uint32_t cp_hqd_pq_base_lo; /* ordinal136 */
  4199. uint32_t cp_hqd_pq_base_hi; /* ordinal137 */
  4200. uint32_t cp_hqd_pq_rptr; /* ordinal138 */
  4201. uint32_t cp_hqd_pq_rptr_report_addr_lo; /* ordinal139 */
  4202. uint32_t cp_hqd_pq_rptr_report_addr_hi; /* ordinal140 */
  4203. uint32_t cp_hqd_pq_wptr_poll_addr; /* ordinal141 */
  4204. uint32_t cp_hqd_pq_wptr_poll_addr_hi; /* ordinal142 */
  4205. uint32_t cp_hqd_pq_doorbell_control; /* ordinal143 */
  4206. uint32_t cp_hqd_pq_wptr; /* ordinal144 */
  4207. uint32_t cp_hqd_pq_control; /* ordinal145 */
  4208. uint32_t cp_hqd_ib_base_addr_lo; /* ordinal146 */
  4209. uint32_t cp_hqd_ib_base_addr_hi; /* ordinal147 */
  4210. uint32_t cp_hqd_ib_rptr; /* ordinal148 */
  4211. uint32_t cp_hqd_ib_control; /* ordinal149 */
  4212. uint32_t cp_hqd_iq_timer; /* ordinal150 */
  4213. uint32_t cp_hqd_iq_rptr; /* ordinal151 */
  4214. uint32_t cp_hqd_dequeue_request; /* ordinal152 */
  4215. uint32_t cp_hqd_dma_offload; /* ordinal153 */
  4216. uint32_t cp_hqd_sema_cmd; /* ordinal154 */
  4217. uint32_t cp_hqd_msg_type; /* ordinal155 */
  4218. uint32_t cp_hqd_atomic0_preop_lo; /* ordinal156 */
  4219. uint32_t cp_hqd_atomic0_preop_hi; /* ordinal157 */
  4220. uint32_t cp_hqd_atomic1_preop_lo; /* ordinal158 */
  4221. uint32_t cp_hqd_atomic1_preop_hi; /* ordinal159 */
  4222. uint32_t cp_hqd_hq_status0; /* ordinal160 */
  4223. uint32_t cp_hqd_hq_control0; /* ordinal161 */
  4224. uint32_t cp_mqd_control; /* ordinal162 */
  4225. uint32_t cp_hqd_hq_status1; /* ordinal163 */
  4226. uint32_t cp_hqd_hq_control1; /* ordinal164 */
  4227. uint32_t cp_hqd_eop_base_addr_lo; /* ordinal165 */
  4228. uint32_t cp_hqd_eop_base_addr_hi; /* ordinal166 */
  4229. uint32_t cp_hqd_eop_control; /* ordinal167 */
  4230. uint32_t cp_hqd_eop_rptr; /* ordinal168 */
  4231. uint32_t cp_hqd_eop_wptr; /* ordinal169 */
  4232. uint32_t cp_hqd_eop_done_events; /* ordinal170 */
  4233. uint32_t cp_hqd_ctx_save_base_addr_lo; /* ordinal171 */
  4234. uint32_t cp_hqd_ctx_save_base_addr_hi; /* ordinal172 */
  4235. uint32_t cp_hqd_ctx_save_control; /* ordinal173 */
  4236. uint32_t cp_hqd_cntl_stack_offset; /* ordinal174 */
  4237. uint32_t cp_hqd_cntl_stack_size; /* ordinal175 */
  4238. uint32_t cp_hqd_wg_state_offset; /* ordinal176 */
  4239. uint32_t cp_hqd_ctx_save_size; /* ordinal177 */
  4240. uint32_t cp_hqd_gds_resource_state; /* ordinal178 */
  4241. uint32_t cp_hqd_error; /* ordinal179 */
  4242. uint32_t cp_hqd_eop_wptr_mem; /* ordinal180 */
  4243. uint32_t cp_hqd_eop_dones; /* ordinal181 */
  4244. uint32_t reserved46; /* ordinal182 */
  4245. uint32_t reserved47; /* ordinal183 */
  4246. uint32_t reserved48; /* ordinal184 */
  4247. uint32_t reserved49; /* ordinal185 */
  4248. uint32_t reserved50; /* ordinal186 */
  4249. uint32_t reserved51; /* ordinal187 */
  4250. uint32_t reserved52; /* ordinal188 */
  4251. uint32_t reserved53; /* ordinal189 */
  4252. uint32_t reserved54; /* ordinal190 */
  4253. uint32_t reserved55; /* ordinal191 */
  4254. uint32_t iqtimer_pkt_header; /* ordinal192 */
  4255. uint32_t iqtimer_pkt_dw0; /* ordinal193 */
  4256. uint32_t iqtimer_pkt_dw1; /* ordinal194 */
  4257. uint32_t iqtimer_pkt_dw2; /* ordinal195 */
  4258. uint32_t iqtimer_pkt_dw3; /* ordinal196 */
  4259. uint32_t iqtimer_pkt_dw4; /* ordinal197 */
  4260. uint32_t iqtimer_pkt_dw5; /* ordinal198 */
  4261. uint32_t iqtimer_pkt_dw6; /* ordinal199 */
  4262. uint32_t iqtimer_pkt_dw7; /* ordinal200 */
  4263. uint32_t iqtimer_pkt_dw8; /* ordinal201 */
  4264. uint32_t iqtimer_pkt_dw9; /* ordinal202 */
  4265. uint32_t iqtimer_pkt_dw10; /* ordinal203 */
  4266. uint32_t iqtimer_pkt_dw11; /* ordinal204 */
  4267. uint32_t iqtimer_pkt_dw12; /* ordinal205 */
  4268. uint32_t iqtimer_pkt_dw13; /* ordinal206 */
  4269. uint32_t iqtimer_pkt_dw14; /* ordinal207 */
  4270. uint32_t iqtimer_pkt_dw15; /* ordinal208 */
  4271. uint32_t iqtimer_pkt_dw16; /* ordinal209 */
  4272. uint32_t iqtimer_pkt_dw17; /* ordinal210 */
  4273. uint32_t iqtimer_pkt_dw18; /* ordinal211 */
  4274. uint32_t iqtimer_pkt_dw19; /* ordinal212 */
  4275. uint32_t iqtimer_pkt_dw20; /* ordinal213 */
  4276. uint32_t iqtimer_pkt_dw21; /* ordinal214 */
  4277. uint32_t iqtimer_pkt_dw22; /* ordinal215 */
  4278. uint32_t iqtimer_pkt_dw23; /* ordinal216 */
  4279. uint32_t iqtimer_pkt_dw24; /* ordinal217 */
  4280. uint32_t iqtimer_pkt_dw25; /* ordinal218 */
  4281. uint32_t iqtimer_pkt_dw26; /* ordinal219 */
  4282. uint32_t iqtimer_pkt_dw27; /* ordinal220 */
  4283. uint32_t iqtimer_pkt_dw28; /* ordinal221 */
  4284. uint32_t iqtimer_pkt_dw29; /* ordinal222 */
  4285. uint32_t iqtimer_pkt_dw30; /* ordinal223 */
  4286. uint32_t iqtimer_pkt_dw31; /* ordinal224 */
  4287. uint32_t reserved56; /* ordinal225 */
  4288. uint32_t reserved57; /* ordinal226 */
  4289. uint32_t reserved58; /* ordinal227 */
  4290. uint32_t set_resources_header; /* ordinal228 */
  4291. uint32_t set_resources_dw1; /* ordinal229 */
  4292. uint32_t set_resources_dw2; /* ordinal230 */
  4293. uint32_t set_resources_dw3; /* ordinal231 */
  4294. uint32_t set_resources_dw4; /* ordinal232 */
  4295. uint32_t set_resources_dw5; /* ordinal233 */
  4296. uint32_t set_resources_dw6; /* ordinal234 */
  4297. uint32_t set_resources_dw7; /* ordinal235 */
  4298. uint32_t reserved59; /* ordinal236 */
  4299. uint32_t reserved60; /* ordinal237 */
  4300. uint32_t reserved61; /* ordinal238 */
  4301. uint32_t reserved62; /* ordinal239 */
  4302. uint32_t reserved63; /* ordinal240 */
  4303. uint32_t reserved64; /* ordinal241 */
  4304. uint32_t reserved65; /* ordinal242 */
  4305. uint32_t reserved66; /* ordinal243 */
  4306. uint32_t reserved67; /* ordinal244 */
  4307. uint32_t reserved68; /* ordinal245 */
  4308. uint32_t reserved69; /* ordinal246 */
  4309. uint32_t reserved70; /* ordinal247 */
  4310. uint32_t reserved71; /* ordinal248 */
  4311. uint32_t reserved72; /* ordinal249 */
  4312. uint32_t reserved73; /* ordinal250 */
  4313. uint32_t reserved74; /* ordinal251 */
  4314. uint32_t reserved75; /* ordinal252 */
  4315. uint32_t reserved76; /* ordinal253 */
  4316. uint32_t reserved77; /* ordinal254 */
  4317. uint32_t reserved78; /* ordinal255 */
  4318. uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */
  4319. };
  4320. static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev)
  4321. {
  4322. int i, r;
  4323. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  4324. struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
  4325. if (ring->mqd_obj) {
  4326. r = amdgpu_bo_reserve(ring->mqd_obj, false);
  4327. if (unlikely(r != 0))
  4328. dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
  4329. amdgpu_bo_unpin(ring->mqd_obj);
  4330. amdgpu_bo_unreserve(ring->mqd_obj);
  4331. amdgpu_bo_unref(&ring->mqd_obj);
  4332. ring->mqd_obj = NULL;
  4333. }
  4334. }
  4335. }
  4336. static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
  4337. {
  4338. int r, i, j;
  4339. u32 tmp;
  4340. bool use_doorbell = true;
  4341. u64 hqd_gpu_addr;
  4342. u64 mqd_gpu_addr;
  4343. u64 eop_gpu_addr;
  4344. u64 wb_gpu_addr;
  4345. u32 *buf;
  4346. struct vi_mqd *mqd;
  4347. /* init the pipes */
  4348. mutex_lock(&adev->srbm_mutex);
  4349. for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
  4350. int me = (i < 4) ? 1 : 2;
  4351. int pipe = (i < 4) ? i : (i - 4);
  4352. eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
  4353. eop_gpu_addr >>= 8;
  4354. vi_srbm_select(adev, me, pipe, 0, 0);
  4355. /* write the EOP addr */
  4356. WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr);
  4357. WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
  4358. /* set the VMID assigned */
  4359. WREG32(mmCP_HQD_VMID, 0);
  4360. /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
  4361. tmp = RREG32(mmCP_HQD_EOP_CONTROL);
  4362. tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
  4363. (order_base_2(MEC_HPD_SIZE / 4) - 1));
  4364. WREG32(mmCP_HQD_EOP_CONTROL, tmp);
  4365. }
  4366. vi_srbm_select(adev, 0, 0, 0, 0);
  4367. mutex_unlock(&adev->srbm_mutex);
  4368. /* init the queues. Just two for now. */
  4369. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  4370. struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
  4371. if (ring->mqd_obj == NULL) {
  4372. r = amdgpu_bo_create(adev,
  4373. sizeof(struct vi_mqd),
  4374. PAGE_SIZE, true,
  4375. AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
  4376. NULL, &ring->mqd_obj);
  4377. if (r) {
  4378. dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
  4379. return r;
  4380. }
  4381. }
  4382. r = amdgpu_bo_reserve(ring->mqd_obj, false);
  4383. if (unlikely(r != 0)) {
  4384. gfx_v8_0_cp_compute_fini(adev);
  4385. return r;
  4386. }
  4387. r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
  4388. &mqd_gpu_addr);
  4389. if (r) {
  4390. dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
  4391. gfx_v8_0_cp_compute_fini(adev);
  4392. return r;
  4393. }
  4394. r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
  4395. if (r) {
  4396. dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
  4397. gfx_v8_0_cp_compute_fini(adev);
  4398. return r;
  4399. }
  4400. /* init the mqd struct */
  4401. memset(buf, 0, sizeof(struct vi_mqd));
  4402. mqd = (struct vi_mqd *)buf;
  4403. mqd->header = 0xC0310800;
  4404. mqd->compute_pipelinestat_enable = 0x00000001;
  4405. mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
  4406. mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
  4407. mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
  4408. mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
  4409. mqd->compute_misc_reserved = 0x00000003;
  4410. mutex_lock(&adev->srbm_mutex);
  4411. vi_srbm_select(adev, ring->me,
  4412. ring->pipe,
  4413. ring->queue, 0);
  4414. /* disable wptr polling */
  4415. tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
  4416. tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
  4417. WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
  4418. mqd->cp_hqd_eop_base_addr_lo =
  4419. RREG32(mmCP_HQD_EOP_BASE_ADDR);
  4420. mqd->cp_hqd_eop_base_addr_hi =
  4421. RREG32(mmCP_HQD_EOP_BASE_ADDR_HI);
  4422. /* enable doorbell? */
  4423. tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
  4424. if (use_doorbell) {
  4425. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
  4426. } else {
  4427. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0);
  4428. }
  4429. WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, tmp);
  4430. mqd->cp_hqd_pq_doorbell_control = tmp;
  4431. /* disable the queue if it's active */
  4432. mqd->cp_hqd_dequeue_request = 0;
  4433. mqd->cp_hqd_pq_rptr = 0;
  4434. mqd->cp_hqd_pq_wptr= 0;
  4435. if (RREG32(mmCP_HQD_ACTIVE) & 1) {
  4436. WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
  4437. for (j = 0; j < adev->usec_timeout; j++) {
  4438. if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
  4439. break;
  4440. udelay(1);
  4441. }
  4442. WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
  4443. WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
  4444. WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
  4445. }
  4446. /* set the pointer to the MQD */
  4447. mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
  4448. mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
  4449. WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
  4450. WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
  4451. /* set MQD vmid to 0 */
  4452. tmp = RREG32(mmCP_MQD_CONTROL);
  4453. tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
  4454. WREG32(mmCP_MQD_CONTROL, tmp);
  4455. mqd->cp_mqd_control = tmp;
  4456. /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
  4457. hqd_gpu_addr = ring->gpu_addr >> 8;
  4458. mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
  4459. mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
  4460. WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
  4461. WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
  4462. /* set up the HQD, this is similar to CP_RB0_CNTL */
  4463. tmp = RREG32(mmCP_HQD_PQ_CONTROL);
  4464. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
  4465. (order_base_2(ring->ring_size / 4) - 1));
  4466. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
  4467. ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
  4468. #ifdef __BIG_ENDIAN
  4469. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
  4470. #endif
  4471. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
  4472. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
  4473. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
  4474. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
  4475. WREG32(mmCP_HQD_PQ_CONTROL, tmp);
  4476. mqd->cp_hqd_pq_control = tmp;
  4477. /* set the wb address wether it's enabled or not */
  4478. wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
  4479. mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
  4480. mqd->cp_hqd_pq_rptr_report_addr_hi =
  4481. upper_32_bits(wb_gpu_addr) & 0xffff;
  4482. WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
  4483. mqd->cp_hqd_pq_rptr_report_addr_lo);
  4484. WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
  4485. mqd->cp_hqd_pq_rptr_report_addr_hi);
  4486. /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
  4487. wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
  4488. mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
  4489. mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
  4490. WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr);
  4491. WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
  4492. mqd->cp_hqd_pq_wptr_poll_addr_hi);
  4493. /* enable the doorbell if requested */
  4494. if (use_doorbell) {
  4495. if ((adev->asic_type == CHIP_CARRIZO) ||
  4496. (adev->asic_type == CHIP_FIJI) ||
  4497. (adev->asic_type == CHIP_STONEY) ||
  4498. (adev->asic_type == CHIP_POLARIS11) ||
  4499. (adev->asic_type == CHIP_POLARIS10)) {
  4500. WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
  4501. AMDGPU_DOORBELL_KIQ << 2);
  4502. WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
  4503. AMDGPU_DOORBELL_MEC_RING7 << 2);
  4504. }
  4505. tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
  4506. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
  4507. DOORBELL_OFFSET, ring->doorbell_index);
  4508. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
  4509. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0);
  4510. tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0);
  4511. mqd->cp_hqd_pq_doorbell_control = tmp;
  4512. } else {
  4513. mqd->cp_hqd_pq_doorbell_control = 0;
  4514. }
  4515. WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
  4516. mqd->cp_hqd_pq_doorbell_control);
  4517. /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
  4518. ring->wptr = 0;
  4519. mqd->cp_hqd_pq_wptr = ring->wptr;
  4520. WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
  4521. mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
  4522. /* set the vmid for the queue */
  4523. mqd->cp_hqd_vmid = 0;
  4524. WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
  4525. tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
  4526. tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
  4527. WREG32(mmCP_HQD_PERSISTENT_STATE, tmp);
  4528. mqd->cp_hqd_persistent_state = tmp;
  4529. if (adev->asic_type == CHIP_STONEY ||
  4530. adev->asic_type == CHIP_POLARIS11 ||
  4531. adev->asic_type == CHIP_POLARIS10) {
  4532. tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
  4533. tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
  4534. WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
  4535. }
  4536. /* activate the queue */
  4537. mqd->cp_hqd_active = 1;
  4538. WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
  4539. vi_srbm_select(adev, 0, 0, 0, 0);
  4540. mutex_unlock(&adev->srbm_mutex);
  4541. amdgpu_bo_kunmap(ring->mqd_obj);
  4542. amdgpu_bo_unreserve(ring->mqd_obj);
  4543. }
  4544. if (use_doorbell) {
  4545. tmp = RREG32(mmCP_PQ_STATUS);
  4546. tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
  4547. WREG32(mmCP_PQ_STATUS, tmp);
  4548. }
  4549. gfx_v8_0_cp_compute_enable(adev, true);
  4550. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  4551. struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
  4552. ring->ready = true;
  4553. r = amdgpu_ring_test_ring(ring);
  4554. if (r)
  4555. ring->ready = false;
  4556. }
  4557. return 0;
  4558. }
  4559. static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
  4560. {
  4561. int r;
  4562. if (!(adev->flags & AMD_IS_APU))
  4563. gfx_v8_0_enable_gui_idle_interrupt(adev, false);
  4564. if (!adev->pp_enabled) {
  4565. if (!adev->firmware.smu_load) {
  4566. /* legacy firmware loading */
  4567. r = gfx_v8_0_cp_gfx_load_microcode(adev);
  4568. if (r)
  4569. return r;
  4570. r = gfx_v8_0_cp_compute_load_microcode(adev);
  4571. if (r)
  4572. return r;
  4573. } else {
  4574. r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
  4575. AMDGPU_UCODE_ID_CP_CE);
  4576. if (r)
  4577. return -EINVAL;
  4578. r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
  4579. AMDGPU_UCODE_ID_CP_PFP);
  4580. if (r)
  4581. return -EINVAL;
  4582. r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
  4583. AMDGPU_UCODE_ID_CP_ME);
  4584. if (r)
  4585. return -EINVAL;
  4586. if (adev->asic_type == CHIP_TOPAZ) {
  4587. r = gfx_v8_0_cp_compute_load_microcode(adev);
  4588. if (r)
  4589. return r;
  4590. } else {
  4591. r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
  4592. AMDGPU_UCODE_ID_CP_MEC1);
  4593. if (r)
  4594. return -EINVAL;
  4595. }
  4596. }
  4597. }
  4598. r = gfx_v8_0_cp_gfx_resume(adev);
  4599. if (r)
  4600. return r;
  4601. r = gfx_v8_0_cp_compute_resume(adev);
  4602. if (r)
  4603. return r;
  4604. gfx_v8_0_enable_gui_idle_interrupt(adev, true);
  4605. return 0;
  4606. }
  4607. static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
  4608. {
  4609. gfx_v8_0_cp_gfx_enable(adev, enable);
  4610. gfx_v8_0_cp_compute_enable(adev, enable);
  4611. }
  4612. static int gfx_v8_0_hw_init(void *handle)
  4613. {
  4614. int r;
  4615. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4616. gfx_v8_0_init_golden_registers(adev);
  4617. gfx_v8_0_gpu_init(adev);
  4618. r = gfx_v8_0_rlc_resume(adev);
  4619. if (r)
  4620. return r;
  4621. r = gfx_v8_0_cp_resume(adev);
  4622. return r;
  4623. }
  4624. static int gfx_v8_0_hw_fini(void *handle)
  4625. {
  4626. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4627. amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
  4628. amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
  4629. gfx_v8_0_cp_enable(adev, false);
  4630. gfx_v8_0_rlc_stop(adev);
  4631. gfx_v8_0_cp_compute_fini(adev);
  4632. amdgpu_set_powergating_state(adev,
  4633. AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE);
  4634. return 0;
  4635. }
  4636. static int gfx_v8_0_suspend(void *handle)
  4637. {
  4638. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4639. return gfx_v8_0_hw_fini(adev);
  4640. }
  4641. static int gfx_v8_0_resume(void *handle)
  4642. {
  4643. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4644. return gfx_v8_0_hw_init(adev);
  4645. }
  4646. static bool gfx_v8_0_is_idle(void *handle)
  4647. {
  4648. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4649. if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE))
  4650. return false;
  4651. else
  4652. return true;
  4653. }
  4654. static int gfx_v8_0_wait_for_idle(void *handle)
  4655. {
  4656. unsigned i;
  4657. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4658. for (i = 0; i < adev->usec_timeout; i++) {
  4659. if (gfx_v8_0_is_idle(handle))
  4660. return 0;
  4661. udelay(1);
  4662. }
  4663. return -ETIMEDOUT;
  4664. }
  4665. static bool gfx_v8_0_check_soft_reset(void *handle)
  4666. {
  4667. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4668. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  4669. u32 tmp;
  4670. /* GRBM_STATUS */
  4671. tmp = RREG32(mmGRBM_STATUS);
  4672. if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
  4673. GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
  4674. GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
  4675. GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
  4676. GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
  4677. GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
  4678. GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
  4679. grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
  4680. GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
  4681. grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
  4682. GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
  4683. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  4684. SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
  4685. }
  4686. /* GRBM_STATUS2 */
  4687. tmp = RREG32(mmGRBM_STATUS2);
  4688. if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
  4689. grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
  4690. GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
  4691. if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
  4692. REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
  4693. REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
  4694. grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
  4695. SOFT_RESET_CPF, 1);
  4696. grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
  4697. SOFT_RESET_CPC, 1);
  4698. grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
  4699. SOFT_RESET_CPG, 1);
  4700. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
  4701. SOFT_RESET_GRBM, 1);
  4702. }
  4703. /* SRBM_STATUS */
  4704. tmp = RREG32(mmSRBM_STATUS);
  4705. if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
  4706. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  4707. SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
  4708. if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
  4709. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  4710. SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
  4711. if (grbm_soft_reset || srbm_soft_reset) {
  4712. adev->gfx.grbm_soft_reset = grbm_soft_reset;
  4713. adev->gfx.srbm_soft_reset = srbm_soft_reset;
  4714. return true;
  4715. } else {
  4716. adev->gfx.grbm_soft_reset = 0;
  4717. adev->gfx.srbm_soft_reset = 0;
  4718. return false;
  4719. }
  4720. }
  4721. static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
  4722. struct amdgpu_ring *ring)
  4723. {
  4724. int i;
  4725. vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
  4726. if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
  4727. u32 tmp;
  4728. tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
  4729. tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
  4730. DEQUEUE_REQ, 2);
  4731. WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
  4732. for (i = 0; i < adev->usec_timeout; i++) {
  4733. if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
  4734. break;
  4735. udelay(1);
  4736. }
  4737. }
  4738. }
  4739. static int gfx_v8_0_pre_soft_reset(void *handle)
  4740. {
  4741. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4742. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  4743. if ((!adev->gfx.grbm_soft_reset) &&
  4744. (!adev->gfx.srbm_soft_reset))
  4745. return 0;
  4746. grbm_soft_reset = adev->gfx.grbm_soft_reset;
  4747. srbm_soft_reset = adev->gfx.srbm_soft_reset;
  4748. /* stop the rlc */
  4749. gfx_v8_0_rlc_stop(adev);
  4750. if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
  4751. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
  4752. /* Disable GFX parsing/prefetching */
  4753. gfx_v8_0_cp_gfx_enable(adev, false);
  4754. if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
  4755. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
  4756. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
  4757. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
  4758. int i;
  4759. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  4760. struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
  4761. gfx_v8_0_inactive_hqd(adev, ring);
  4762. }
  4763. /* Disable MEC parsing/prefetching */
  4764. gfx_v8_0_cp_compute_enable(adev, false);
  4765. }
  4766. return 0;
  4767. }
  4768. static int gfx_v8_0_soft_reset(void *handle)
  4769. {
  4770. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4771. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  4772. u32 tmp;
  4773. if ((!adev->gfx.grbm_soft_reset) &&
  4774. (!adev->gfx.srbm_soft_reset))
  4775. return 0;
  4776. grbm_soft_reset = adev->gfx.grbm_soft_reset;
  4777. srbm_soft_reset = adev->gfx.srbm_soft_reset;
  4778. if (grbm_soft_reset || srbm_soft_reset) {
  4779. tmp = RREG32(mmGMCON_DEBUG);
  4780. tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
  4781. tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
  4782. WREG32(mmGMCON_DEBUG, tmp);
  4783. udelay(50);
  4784. }
  4785. if (grbm_soft_reset) {
  4786. tmp = RREG32(mmGRBM_SOFT_RESET);
  4787. tmp |= grbm_soft_reset;
  4788. dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  4789. WREG32(mmGRBM_SOFT_RESET, tmp);
  4790. tmp = RREG32(mmGRBM_SOFT_RESET);
  4791. udelay(50);
  4792. tmp &= ~grbm_soft_reset;
  4793. WREG32(mmGRBM_SOFT_RESET, tmp);
  4794. tmp = RREG32(mmGRBM_SOFT_RESET);
  4795. }
  4796. if (srbm_soft_reset) {
  4797. tmp = RREG32(mmSRBM_SOFT_RESET);
  4798. tmp |= srbm_soft_reset;
  4799. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  4800. WREG32(mmSRBM_SOFT_RESET, tmp);
  4801. tmp = RREG32(mmSRBM_SOFT_RESET);
  4802. udelay(50);
  4803. tmp &= ~srbm_soft_reset;
  4804. WREG32(mmSRBM_SOFT_RESET, tmp);
  4805. tmp = RREG32(mmSRBM_SOFT_RESET);
  4806. }
  4807. if (grbm_soft_reset || srbm_soft_reset) {
  4808. tmp = RREG32(mmGMCON_DEBUG);
  4809. tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
  4810. tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
  4811. WREG32(mmGMCON_DEBUG, tmp);
  4812. }
  4813. /* Wait a little for things to settle down */
  4814. udelay(50);
  4815. return 0;
  4816. }
  4817. static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
  4818. struct amdgpu_ring *ring)
  4819. {
  4820. vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
  4821. WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
  4822. WREG32(mmCP_HQD_PQ_RPTR, 0);
  4823. WREG32(mmCP_HQD_PQ_WPTR, 0);
  4824. vi_srbm_select(adev, 0, 0, 0, 0);
  4825. }
  4826. static int gfx_v8_0_post_soft_reset(void *handle)
  4827. {
  4828. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4829. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  4830. if ((!adev->gfx.grbm_soft_reset) &&
  4831. (!adev->gfx.srbm_soft_reset))
  4832. return 0;
  4833. grbm_soft_reset = adev->gfx.grbm_soft_reset;
  4834. srbm_soft_reset = adev->gfx.srbm_soft_reset;
  4835. if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
  4836. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
  4837. gfx_v8_0_cp_gfx_resume(adev);
  4838. if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
  4839. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
  4840. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
  4841. REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
  4842. int i;
  4843. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  4844. struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
  4845. gfx_v8_0_init_hqd(adev, ring);
  4846. }
  4847. gfx_v8_0_cp_compute_resume(adev);
  4848. }
  4849. gfx_v8_0_rlc_start(adev);
  4850. return 0;
  4851. }
  4852. /**
  4853. * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
  4854. *
  4855. * @adev: amdgpu_device pointer
  4856. *
  4857. * Fetches a GPU clock counter snapshot.
  4858. * Returns the 64 bit clock counter snapshot.
  4859. */
  4860. static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
  4861. {
  4862. uint64_t clock;
  4863. mutex_lock(&adev->gfx.gpu_clock_mutex);
  4864. WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
  4865. clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
  4866. ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
  4867. mutex_unlock(&adev->gfx.gpu_clock_mutex);
  4868. return clock;
  4869. }
  4870. static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
  4871. uint32_t vmid,
  4872. uint32_t gds_base, uint32_t gds_size,
  4873. uint32_t gws_base, uint32_t gws_size,
  4874. uint32_t oa_base, uint32_t oa_size)
  4875. {
  4876. gds_base = gds_base >> AMDGPU_GDS_SHIFT;
  4877. gds_size = gds_size >> AMDGPU_GDS_SHIFT;
  4878. gws_base = gws_base >> AMDGPU_GWS_SHIFT;
  4879. gws_size = gws_size >> AMDGPU_GWS_SHIFT;
  4880. oa_base = oa_base >> AMDGPU_OA_SHIFT;
  4881. oa_size = oa_size >> AMDGPU_OA_SHIFT;
  4882. /* GDS Base */
  4883. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4884. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4885. WRITE_DATA_DST_SEL(0)));
  4886. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
  4887. amdgpu_ring_write(ring, 0);
  4888. amdgpu_ring_write(ring, gds_base);
  4889. /* GDS Size */
  4890. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4891. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4892. WRITE_DATA_DST_SEL(0)));
  4893. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
  4894. amdgpu_ring_write(ring, 0);
  4895. amdgpu_ring_write(ring, gds_size);
  4896. /* GWS */
  4897. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4898. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4899. WRITE_DATA_DST_SEL(0)));
  4900. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
  4901. amdgpu_ring_write(ring, 0);
  4902. amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
  4903. /* OA */
  4904. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4905. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4906. WRITE_DATA_DST_SEL(0)));
  4907. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
  4908. amdgpu_ring_write(ring, 0);
  4909. amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
  4910. }
  4911. static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
  4912. .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
  4913. .select_se_sh = &gfx_v8_0_select_se_sh,
  4914. };
  4915. static int gfx_v8_0_early_init(void *handle)
  4916. {
  4917. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4918. adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
  4919. adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
  4920. adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
  4921. gfx_v8_0_set_ring_funcs(adev);
  4922. gfx_v8_0_set_irq_funcs(adev);
  4923. gfx_v8_0_set_gds_init(adev);
  4924. gfx_v8_0_set_rlc_funcs(adev);
  4925. return 0;
  4926. }
  4927. static int gfx_v8_0_late_init(void *handle)
  4928. {
  4929. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4930. int r;
  4931. r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
  4932. if (r)
  4933. return r;
  4934. r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
  4935. if (r)
  4936. return r;
  4937. /* requires IBs so do in late init after IB pool is initialized */
  4938. r = gfx_v8_0_do_edc_gpr_workarounds(adev);
  4939. if (r)
  4940. return r;
  4941. amdgpu_set_powergating_state(adev,
  4942. AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE);
  4943. return 0;
  4944. }
  4945. static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
  4946. bool enable)
  4947. {
  4948. if (adev->asic_type == CHIP_POLARIS11)
  4949. /* Send msg to SMU via Powerplay */
  4950. amdgpu_set_powergating_state(adev,
  4951. AMD_IP_BLOCK_TYPE_SMC,
  4952. enable ?
  4953. AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
  4954. WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
  4955. }
  4956. static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
  4957. bool enable)
  4958. {
  4959. WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
  4960. }
  4961. static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
  4962. bool enable)
  4963. {
  4964. WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
  4965. }
  4966. static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
  4967. bool enable)
  4968. {
  4969. WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
  4970. }
  4971. static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
  4972. bool enable)
  4973. {
  4974. WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
  4975. /* Read any GFX register to wake up GFX. */
  4976. if (!enable)
  4977. RREG32(mmDB_RENDER_CONTROL);
  4978. }
  4979. static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
  4980. bool enable)
  4981. {
  4982. if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
  4983. cz_enable_gfx_cg_power_gating(adev, true);
  4984. if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
  4985. cz_enable_gfx_pipeline_power_gating(adev, true);
  4986. } else {
  4987. cz_enable_gfx_cg_power_gating(adev, false);
  4988. cz_enable_gfx_pipeline_power_gating(adev, false);
  4989. }
  4990. }
  4991. static int gfx_v8_0_set_powergating_state(void *handle,
  4992. enum amd_powergating_state state)
  4993. {
  4994. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4995. bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
  4996. if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
  4997. return 0;
  4998. switch (adev->asic_type) {
  4999. case CHIP_CARRIZO:
  5000. case CHIP_STONEY:
  5001. if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
  5002. cz_update_gfx_cg_power_gating(adev, enable);
  5003. if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
  5004. gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
  5005. else
  5006. gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
  5007. if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
  5008. gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
  5009. else
  5010. gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
  5011. break;
  5012. case CHIP_POLARIS11:
  5013. if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
  5014. gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
  5015. else
  5016. gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
  5017. if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
  5018. gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
  5019. else
  5020. gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
  5021. if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
  5022. polaris11_enable_gfx_quick_mg_power_gating(adev, true);
  5023. else
  5024. polaris11_enable_gfx_quick_mg_power_gating(adev, false);
  5025. break;
  5026. default:
  5027. break;
  5028. }
  5029. return 0;
  5030. }
  5031. static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
  5032. uint32_t reg_addr, uint32_t cmd)
  5033. {
  5034. uint32_t data;
  5035. gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  5036. WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  5037. WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  5038. data = RREG32(mmRLC_SERDES_WR_CTRL);
  5039. if (adev->asic_type == CHIP_STONEY)
  5040. data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
  5041. RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
  5042. RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
  5043. RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
  5044. RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
  5045. RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
  5046. RLC_SERDES_WR_CTRL__POWER_UP_MASK |
  5047. RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
  5048. RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
  5049. else
  5050. data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
  5051. RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
  5052. RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
  5053. RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
  5054. RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
  5055. RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
  5056. RLC_SERDES_WR_CTRL__POWER_UP_MASK |
  5057. RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
  5058. RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
  5059. RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
  5060. RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
  5061. data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
  5062. (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
  5063. (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
  5064. (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
  5065. WREG32(mmRLC_SERDES_WR_CTRL, data);
  5066. }
  5067. #define MSG_ENTER_RLC_SAFE_MODE 1
  5068. #define MSG_EXIT_RLC_SAFE_MODE 0
  5069. #define RLC_GPR_REG2__REQ_MASK 0x00000001
  5070. #define RLC_GPR_REG2__REQ__SHIFT 0
  5071. #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
  5072. #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
  5073. static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
  5074. {
  5075. u32 data = 0;
  5076. unsigned i;
  5077. data = RREG32(mmRLC_CNTL);
  5078. if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
  5079. return;
  5080. if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
  5081. (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
  5082. AMD_PG_SUPPORT_GFX_DMG))) {
  5083. data |= RLC_GPR_REG2__REQ_MASK;
  5084. data &= ~RLC_GPR_REG2__MESSAGE_MASK;
  5085. data |= (MSG_ENTER_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
  5086. WREG32(mmRLC_GPR_REG2, data);
  5087. for (i = 0; i < adev->usec_timeout; i++) {
  5088. if ((RREG32(mmRLC_GPM_STAT) &
  5089. (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
  5090. RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
  5091. (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
  5092. RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
  5093. break;
  5094. udelay(1);
  5095. }
  5096. for (i = 0; i < adev->usec_timeout; i++) {
  5097. if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
  5098. break;
  5099. udelay(1);
  5100. }
  5101. adev->gfx.rlc.in_safe_mode = true;
  5102. }
  5103. }
  5104. static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
  5105. {
  5106. u32 data;
  5107. unsigned i;
  5108. data = RREG32(mmRLC_CNTL);
  5109. if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
  5110. return;
  5111. if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
  5112. (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
  5113. AMD_PG_SUPPORT_GFX_DMG))) {
  5114. data |= RLC_GPR_REG2__REQ_MASK;
  5115. data &= ~RLC_GPR_REG2__MESSAGE_MASK;
  5116. data |= (MSG_EXIT_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
  5117. WREG32(mmRLC_GPR_REG2, data);
  5118. adev->gfx.rlc.in_safe_mode = false;
  5119. }
  5120. for (i = 0; i < adev->usec_timeout; i++) {
  5121. if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
  5122. break;
  5123. udelay(1);
  5124. }
  5125. }
  5126. static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
  5127. {
  5128. u32 data;
  5129. unsigned i;
  5130. data = RREG32(mmRLC_CNTL);
  5131. if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
  5132. return;
  5133. if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
  5134. data |= RLC_SAFE_MODE__CMD_MASK;
  5135. data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
  5136. data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
  5137. WREG32(mmRLC_SAFE_MODE, data);
  5138. for (i = 0; i < adev->usec_timeout; i++) {
  5139. if ((RREG32(mmRLC_GPM_STAT) &
  5140. (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
  5141. RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
  5142. (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
  5143. RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
  5144. break;
  5145. udelay(1);
  5146. }
  5147. for (i = 0; i < adev->usec_timeout; i++) {
  5148. if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
  5149. break;
  5150. udelay(1);
  5151. }
  5152. adev->gfx.rlc.in_safe_mode = true;
  5153. }
  5154. }
  5155. static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
  5156. {
  5157. u32 data = 0;
  5158. unsigned i;
  5159. data = RREG32(mmRLC_CNTL);
  5160. if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
  5161. return;
  5162. if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
  5163. if (adev->gfx.rlc.in_safe_mode) {
  5164. data |= RLC_SAFE_MODE__CMD_MASK;
  5165. data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
  5166. WREG32(mmRLC_SAFE_MODE, data);
  5167. adev->gfx.rlc.in_safe_mode = false;
  5168. }
  5169. }
  5170. for (i = 0; i < adev->usec_timeout; i++) {
  5171. if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
  5172. break;
  5173. udelay(1);
  5174. }
  5175. }
  5176. static void gfx_v8_0_nop_enter_rlc_safe_mode(struct amdgpu_device *adev)
  5177. {
  5178. adev->gfx.rlc.in_safe_mode = true;
  5179. }
  5180. static void gfx_v8_0_nop_exit_rlc_safe_mode(struct amdgpu_device *adev)
  5181. {
  5182. adev->gfx.rlc.in_safe_mode = false;
  5183. }
  5184. static const struct amdgpu_rlc_funcs cz_rlc_funcs = {
  5185. .enter_safe_mode = cz_enter_rlc_safe_mode,
  5186. .exit_safe_mode = cz_exit_rlc_safe_mode
  5187. };
  5188. static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
  5189. .enter_safe_mode = iceland_enter_rlc_safe_mode,
  5190. .exit_safe_mode = iceland_exit_rlc_safe_mode
  5191. };
  5192. static const struct amdgpu_rlc_funcs gfx_v8_0_nop_rlc_funcs = {
  5193. .enter_safe_mode = gfx_v8_0_nop_enter_rlc_safe_mode,
  5194. .exit_safe_mode = gfx_v8_0_nop_exit_rlc_safe_mode
  5195. };
  5196. static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  5197. bool enable)
  5198. {
  5199. uint32_t temp, data;
  5200. adev->gfx.rlc.funcs->enter_safe_mode(adev);
  5201. /* It is disabled by HW by default */
  5202. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
  5203. if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
  5204. if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
  5205. /* 1 - RLC memory Light sleep */
  5206. WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
  5207. if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
  5208. WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
  5209. }
  5210. /* 3 - RLC_CGTT_MGCG_OVERRIDE */
  5211. temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
  5212. if (adev->flags & AMD_IS_APU)
  5213. data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
  5214. RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
  5215. RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
  5216. else
  5217. data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
  5218. RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
  5219. RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
  5220. RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
  5221. if (temp != data)
  5222. WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
  5223. /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5224. gfx_v8_0_wait_for_rlc_serdes(adev);
  5225. /* 5 - clear mgcg override */
  5226. gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
  5227. if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
  5228. /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
  5229. temp = data = RREG32(mmCGTS_SM_CTRL_REG);
  5230. data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
  5231. data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
  5232. data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
  5233. data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
  5234. if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
  5235. (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
  5236. data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
  5237. data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
  5238. data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
  5239. if (temp != data)
  5240. WREG32(mmCGTS_SM_CTRL_REG, data);
  5241. }
  5242. udelay(50);
  5243. /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5244. gfx_v8_0_wait_for_rlc_serdes(adev);
  5245. } else {
  5246. /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
  5247. temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
  5248. data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
  5249. RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
  5250. RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
  5251. RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
  5252. if (temp != data)
  5253. WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
  5254. /* 2 - disable MGLS in RLC */
  5255. data = RREG32(mmRLC_MEM_SLP_CNTL);
  5256. if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
  5257. data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
  5258. WREG32(mmRLC_MEM_SLP_CNTL, data);
  5259. }
  5260. /* 3 - disable MGLS in CP */
  5261. data = RREG32(mmCP_MEM_SLP_CNTL);
  5262. if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
  5263. data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
  5264. WREG32(mmCP_MEM_SLP_CNTL, data);
  5265. }
  5266. /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
  5267. temp = data = RREG32(mmCGTS_SM_CTRL_REG);
  5268. data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
  5269. CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
  5270. if (temp != data)
  5271. WREG32(mmCGTS_SM_CTRL_REG, data);
  5272. /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5273. gfx_v8_0_wait_for_rlc_serdes(adev);
  5274. /* 6 - set mgcg override */
  5275. gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
  5276. udelay(50);
  5277. /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5278. gfx_v8_0_wait_for_rlc_serdes(adev);
  5279. }
  5280. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  5281. }
  5282. static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
  5283. bool enable)
  5284. {
  5285. uint32_t temp, temp1, data, data1;
  5286. temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
  5287. adev->gfx.rlc.funcs->enter_safe_mode(adev);
  5288. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
  5289. /* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
  5290. * Cmp_busy/GFX_Idle interrupts
  5291. */
  5292. gfx_v8_0_enable_gui_idle_interrupt(adev, true);
  5293. temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
  5294. data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
  5295. if (temp1 != data1)
  5296. WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
  5297. /* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5298. gfx_v8_0_wait_for_rlc_serdes(adev);
  5299. /* 3 - clear cgcg override */
  5300. gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
  5301. /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5302. gfx_v8_0_wait_for_rlc_serdes(adev);
  5303. /* 4 - write cmd to set CGLS */
  5304. gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
  5305. /* 5 - enable cgcg */
  5306. data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
  5307. if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
  5308. /* enable cgls*/
  5309. data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
  5310. temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
  5311. data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
  5312. if (temp1 != data1)
  5313. WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
  5314. } else {
  5315. data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
  5316. }
  5317. if (temp != data)
  5318. WREG32(mmRLC_CGCG_CGLS_CTRL, data);
  5319. } else {
  5320. /* disable cntx_empty_int_enable & GFX Idle interrupt */
  5321. gfx_v8_0_enable_gui_idle_interrupt(adev, false);
  5322. /* TEST CGCG */
  5323. temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
  5324. data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
  5325. RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
  5326. if (temp1 != data1)
  5327. WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
  5328. /* read gfx register to wake up cgcg */
  5329. RREG32(mmCB_CGTT_SCLK_CTRL);
  5330. RREG32(mmCB_CGTT_SCLK_CTRL);
  5331. RREG32(mmCB_CGTT_SCLK_CTRL);
  5332. RREG32(mmCB_CGTT_SCLK_CTRL);
  5333. /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5334. gfx_v8_0_wait_for_rlc_serdes(adev);
  5335. /* write cmd to Set CGCG Overrride */
  5336. gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
  5337. /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
  5338. gfx_v8_0_wait_for_rlc_serdes(adev);
  5339. /* write cmd to Clear CGLS */
  5340. gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
  5341. /* disable cgcg, cgls should be disabled too. */
  5342. data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
  5343. RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
  5344. if (temp != data)
  5345. WREG32(mmRLC_CGCG_CGLS_CTRL, data);
  5346. }
  5347. gfx_v8_0_wait_for_rlc_serdes(adev);
  5348. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  5349. }
  5350. static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
  5351. bool enable)
  5352. {
  5353. if (enable) {
  5354. /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
  5355. * === MGCG + MGLS + TS(CG/LS) ===
  5356. */
  5357. gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
  5358. gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
  5359. } else {
  5360. /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
  5361. * === CGCG + CGLS ===
  5362. */
  5363. gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
  5364. gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
  5365. }
  5366. return 0;
  5367. }
  5368. static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
  5369. enum amd_clockgating_state state)
  5370. {
  5371. uint32_t msg_id, pp_state;
  5372. void *pp_handle = adev->powerplay.pp_handle;
  5373. if (state == AMD_CG_STATE_UNGATE)
  5374. pp_state = 0;
  5375. else
  5376. pp_state = PP_STATE_CG | PP_STATE_LS;
  5377. msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
  5378. PP_BLOCK_GFX_CG,
  5379. PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
  5380. pp_state);
  5381. amd_set_clockgating_by_smu(pp_handle, msg_id);
  5382. msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
  5383. PP_BLOCK_GFX_MG,
  5384. PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
  5385. pp_state);
  5386. amd_set_clockgating_by_smu(pp_handle, msg_id);
  5387. return 0;
  5388. }
  5389. static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
  5390. enum amd_clockgating_state state)
  5391. {
  5392. uint32_t msg_id, pp_state;
  5393. void *pp_handle = adev->powerplay.pp_handle;
  5394. if (state == AMD_CG_STATE_UNGATE)
  5395. pp_state = 0;
  5396. else
  5397. pp_state = PP_STATE_CG | PP_STATE_LS;
  5398. msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
  5399. PP_BLOCK_GFX_CG,
  5400. PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
  5401. pp_state);
  5402. amd_set_clockgating_by_smu(pp_handle, msg_id);
  5403. msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
  5404. PP_BLOCK_GFX_3D,
  5405. PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
  5406. pp_state);
  5407. amd_set_clockgating_by_smu(pp_handle, msg_id);
  5408. msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
  5409. PP_BLOCK_GFX_MG,
  5410. PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
  5411. pp_state);
  5412. amd_set_clockgating_by_smu(pp_handle, msg_id);
  5413. msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
  5414. PP_BLOCK_GFX_RLC,
  5415. PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
  5416. pp_state);
  5417. amd_set_clockgating_by_smu(pp_handle, msg_id);
  5418. msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
  5419. PP_BLOCK_GFX_CP,
  5420. PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
  5421. pp_state);
  5422. amd_set_clockgating_by_smu(pp_handle, msg_id);
  5423. return 0;
  5424. }
  5425. static int gfx_v8_0_set_clockgating_state(void *handle,
  5426. enum amd_clockgating_state state)
  5427. {
  5428. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  5429. switch (adev->asic_type) {
  5430. case CHIP_FIJI:
  5431. case CHIP_CARRIZO:
  5432. case CHIP_STONEY:
  5433. gfx_v8_0_update_gfx_clock_gating(adev,
  5434. state == AMD_CG_STATE_GATE ? true : false);
  5435. break;
  5436. case CHIP_TONGA:
  5437. gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
  5438. break;
  5439. case CHIP_POLARIS10:
  5440. case CHIP_POLARIS11:
  5441. gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
  5442. break;
  5443. default:
  5444. break;
  5445. }
  5446. return 0;
  5447. }
  5448. static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
  5449. {
  5450. return ring->adev->wb.wb[ring->rptr_offs];
  5451. }
  5452. static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
  5453. {
  5454. struct amdgpu_device *adev = ring->adev;
  5455. if (ring->use_doorbell)
  5456. /* XXX check if swapping is necessary on BE */
  5457. return ring->adev->wb.wb[ring->wptr_offs];
  5458. else
  5459. return RREG32(mmCP_RB0_WPTR);
  5460. }
  5461. static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
  5462. {
  5463. struct amdgpu_device *adev = ring->adev;
  5464. if (ring->use_doorbell) {
  5465. /* XXX check if swapping is necessary on BE */
  5466. adev->wb.wb[ring->wptr_offs] = ring->wptr;
  5467. WDOORBELL32(ring->doorbell_index, ring->wptr);
  5468. } else {
  5469. WREG32(mmCP_RB0_WPTR, ring->wptr);
  5470. (void)RREG32(mmCP_RB0_WPTR);
  5471. }
  5472. }
  5473. static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
  5474. {
  5475. u32 ref_and_mask, reg_mem_engine;
  5476. if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
  5477. switch (ring->me) {
  5478. case 1:
  5479. ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
  5480. break;
  5481. case 2:
  5482. ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
  5483. break;
  5484. default:
  5485. return;
  5486. }
  5487. reg_mem_engine = 0;
  5488. } else {
  5489. ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
  5490. reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
  5491. }
  5492. amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
  5493. amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
  5494. WAIT_REG_MEM_FUNCTION(3) | /* == */
  5495. reg_mem_engine));
  5496. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
  5497. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
  5498. amdgpu_ring_write(ring, ref_and_mask);
  5499. amdgpu_ring_write(ring, ref_and_mask);
  5500. amdgpu_ring_write(ring, 0x20); /* poll interval */
  5501. }
  5502. static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
  5503. {
  5504. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5505. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  5506. WRITE_DATA_DST_SEL(0) |
  5507. WR_CONFIRM));
  5508. amdgpu_ring_write(ring, mmHDP_DEBUG0);
  5509. amdgpu_ring_write(ring, 0);
  5510. amdgpu_ring_write(ring, 1);
  5511. }
  5512. static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
  5513. struct amdgpu_ib *ib,
  5514. unsigned vm_id, bool ctx_switch)
  5515. {
  5516. u32 header, control = 0;
  5517. if (ib->flags & AMDGPU_IB_FLAG_CE)
  5518. header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
  5519. else
  5520. header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
  5521. control |= ib->length_dw | (vm_id << 24);
  5522. amdgpu_ring_write(ring, header);
  5523. amdgpu_ring_write(ring,
  5524. #ifdef __BIG_ENDIAN
  5525. (2 << 0) |
  5526. #endif
  5527. (ib->gpu_addr & 0xFFFFFFFC));
  5528. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  5529. amdgpu_ring_write(ring, control);
  5530. }
  5531. static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
  5532. struct amdgpu_ib *ib,
  5533. unsigned vm_id, bool ctx_switch)
  5534. {
  5535. u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
  5536. amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
  5537. amdgpu_ring_write(ring,
  5538. #ifdef __BIG_ENDIAN
  5539. (2 << 0) |
  5540. #endif
  5541. (ib->gpu_addr & 0xFFFFFFFC));
  5542. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  5543. amdgpu_ring_write(ring, control);
  5544. }
  5545. static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
  5546. u64 seq, unsigned flags)
  5547. {
  5548. bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  5549. bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
  5550. /* EVENT_WRITE_EOP - flush caches, send int */
  5551. amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  5552. amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
  5553. EOP_TC_ACTION_EN |
  5554. EOP_TC_WB_ACTION_EN |
  5555. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  5556. EVENT_INDEX(5)));
  5557. amdgpu_ring_write(ring, addr & 0xfffffffc);
  5558. amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
  5559. DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
  5560. amdgpu_ring_write(ring, lower_32_bits(seq));
  5561. amdgpu_ring_write(ring, upper_32_bits(seq));
  5562. }
  5563. static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
  5564. {
  5565. int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
  5566. uint32_t seq = ring->fence_drv.sync_seq;
  5567. uint64_t addr = ring->fence_drv.gpu_addr;
  5568. amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
  5569. amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
  5570. WAIT_REG_MEM_FUNCTION(3) | /* equal */
  5571. WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
  5572. amdgpu_ring_write(ring, addr & 0xfffffffc);
  5573. amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
  5574. amdgpu_ring_write(ring, seq);
  5575. amdgpu_ring_write(ring, 0xffffffff);
  5576. amdgpu_ring_write(ring, 4); /* poll interval */
  5577. }
  5578. static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
  5579. unsigned vm_id, uint64_t pd_addr)
  5580. {
  5581. int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
  5582. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5583. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
  5584. WRITE_DATA_DST_SEL(0)) |
  5585. WR_CONFIRM);
  5586. if (vm_id < 8) {
  5587. amdgpu_ring_write(ring,
  5588. (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
  5589. } else {
  5590. amdgpu_ring_write(ring,
  5591. (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
  5592. }
  5593. amdgpu_ring_write(ring, 0);
  5594. amdgpu_ring_write(ring, pd_addr >> 12);
  5595. /* bits 0-15 are the VM contexts0-15 */
  5596. /* invalidate the cache */
  5597. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5598. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  5599. WRITE_DATA_DST_SEL(0)));
  5600. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
  5601. amdgpu_ring_write(ring, 0);
  5602. amdgpu_ring_write(ring, 1 << vm_id);
  5603. /* wait for the invalidate to complete */
  5604. amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
  5605. amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
  5606. WAIT_REG_MEM_FUNCTION(0) | /* always */
  5607. WAIT_REG_MEM_ENGINE(0))); /* me */
  5608. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
  5609. amdgpu_ring_write(ring, 0);
  5610. amdgpu_ring_write(ring, 0); /* ref */
  5611. amdgpu_ring_write(ring, 0); /* mask */
  5612. amdgpu_ring_write(ring, 0x20); /* poll interval */
  5613. /* compute doesn't have PFP */
  5614. if (usepfp) {
  5615. /* sync PFP to ME, otherwise we might get invalid PFP reads */
  5616. amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  5617. amdgpu_ring_write(ring, 0x0);
  5618. /* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
  5619. amdgpu_ring_insert_nop(ring, 128);
  5620. }
  5621. }
  5622. static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
  5623. {
  5624. return ring->adev->wb.wb[ring->wptr_offs];
  5625. }
  5626. static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
  5627. {
  5628. struct amdgpu_device *adev = ring->adev;
  5629. /* XXX check if swapping is necessary on BE */
  5630. adev->wb.wb[ring->wptr_offs] = ring->wptr;
  5631. WDOORBELL32(ring->doorbell_index, ring->wptr);
  5632. }
  5633. static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  5634. u64 addr, u64 seq,
  5635. unsigned flags)
  5636. {
  5637. bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  5638. bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
  5639. /* RELEASE_MEM - flush caches, send int */
  5640. amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
  5641. amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
  5642. EOP_TC_ACTION_EN |
  5643. EOP_TC_WB_ACTION_EN |
  5644. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  5645. EVENT_INDEX(5)));
  5646. amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
  5647. amdgpu_ring_write(ring, addr & 0xfffffffc);
  5648. amdgpu_ring_write(ring, upper_32_bits(addr));
  5649. amdgpu_ring_write(ring, lower_32_bits(seq));
  5650. amdgpu_ring_write(ring, upper_32_bits(seq));
  5651. }
  5652. static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
  5653. {
  5654. amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  5655. amdgpu_ring_write(ring, 0);
  5656. }
  5657. static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
  5658. {
  5659. uint32_t dw2 = 0;
  5660. dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
  5661. if (flags & AMDGPU_HAVE_CTX_SWITCH) {
  5662. /* set load_global_config & load_global_uconfig */
  5663. dw2 |= 0x8001;
  5664. /* set load_cs_sh_regs */
  5665. dw2 |= 0x01000000;
  5666. /* set load_per_context_state & load_gfx_sh_regs for GFX */
  5667. dw2 |= 0x10002;
  5668. /* set load_ce_ram if preamble presented */
  5669. if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
  5670. dw2 |= 0x10000000;
  5671. } else {
  5672. /* still load_ce_ram if this is the first time preamble presented
  5673. * although there is no context switch happens.
  5674. */
  5675. if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
  5676. dw2 |= 0x10000000;
  5677. }
  5678. amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  5679. amdgpu_ring_write(ring, dw2);
  5680. amdgpu_ring_write(ring, 0);
  5681. }
  5682. static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
  5683. {
  5684. return
  5685. 4; /* gfx_v8_0_ring_emit_ib_gfx */
  5686. }
  5687. static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
  5688. {
  5689. return
  5690. 20 + /* gfx_v8_0_ring_emit_gds_switch */
  5691. 7 + /* gfx_v8_0_ring_emit_hdp_flush */
  5692. 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
  5693. 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
  5694. 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
  5695. 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
  5696. 2 + /* gfx_v8_ring_emit_sb */
  5697. 3; /* gfx_v8_ring_emit_cntxcntl */
  5698. }
  5699. static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
  5700. {
  5701. return
  5702. 4; /* gfx_v8_0_ring_emit_ib_compute */
  5703. }
  5704. static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
  5705. {
  5706. return
  5707. 20 + /* gfx_v8_0_ring_emit_gds_switch */
  5708. 7 + /* gfx_v8_0_ring_emit_hdp_flush */
  5709. 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
  5710. 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
  5711. 17 + /* gfx_v8_0_ring_emit_vm_flush */
  5712. 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
  5713. }
  5714. static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
  5715. enum amdgpu_interrupt_state state)
  5716. {
  5717. WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
  5718. state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
  5719. }
  5720. static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
  5721. int me, int pipe,
  5722. enum amdgpu_interrupt_state state)
  5723. {
  5724. /*
  5725. * amdgpu controls only pipe 0 of MEC1. That's why this function only
  5726. * handles the setting of interrupts for this specific pipe. All other
  5727. * pipes' interrupts are set by amdkfd.
  5728. */
  5729. if (me == 1) {
  5730. switch (pipe) {
  5731. case 0:
  5732. break;
  5733. default:
  5734. DRM_DEBUG("invalid pipe %d\n", pipe);
  5735. return;
  5736. }
  5737. } else {
  5738. DRM_DEBUG("invalid me %d\n", me);
  5739. return;
  5740. }
  5741. WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE,
  5742. state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
  5743. }
  5744. static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
  5745. struct amdgpu_irq_src *source,
  5746. unsigned type,
  5747. enum amdgpu_interrupt_state state)
  5748. {
  5749. WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
  5750. state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
  5751. return 0;
  5752. }
  5753. static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
  5754. struct amdgpu_irq_src *source,
  5755. unsigned type,
  5756. enum amdgpu_interrupt_state state)
  5757. {
  5758. WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
  5759. state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
  5760. return 0;
  5761. }
  5762. static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
  5763. struct amdgpu_irq_src *src,
  5764. unsigned type,
  5765. enum amdgpu_interrupt_state state)
  5766. {
  5767. switch (type) {
  5768. case AMDGPU_CP_IRQ_GFX_EOP:
  5769. gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
  5770. break;
  5771. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
  5772. gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
  5773. break;
  5774. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
  5775. gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
  5776. break;
  5777. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
  5778. gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
  5779. break;
  5780. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
  5781. gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
  5782. break;
  5783. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
  5784. gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
  5785. break;
  5786. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
  5787. gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
  5788. break;
  5789. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
  5790. gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
  5791. break;
  5792. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
  5793. gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
  5794. break;
  5795. default:
  5796. break;
  5797. }
  5798. return 0;
  5799. }
  5800. static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
  5801. struct amdgpu_irq_src *source,
  5802. struct amdgpu_iv_entry *entry)
  5803. {
  5804. int i;
  5805. u8 me_id, pipe_id, queue_id;
  5806. struct amdgpu_ring *ring;
  5807. DRM_DEBUG("IH: CP EOP\n");
  5808. me_id = (entry->ring_id & 0x0c) >> 2;
  5809. pipe_id = (entry->ring_id & 0x03) >> 0;
  5810. queue_id = (entry->ring_id & 0x70) >> 4;
  5811. switch (me_id) {
  5812. case 0:
  5813. amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
  5814. break;
  5815. case 1:
  5816. case 2:
  5817. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  5818. ring = &adev->gfx.compute_ring[i];
  5819. /* Per-queue interrupt is supported for MEC starting from VI.
  5820. * The interrupt can only be enabled/disabled per pipe instead of per queue.
  5821. */
  5822. if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
  5823. amdgpu_fence_process(ring);
  5824. }
  5825. break;
  5826. }
  5827. return 0;
  5828. }
  5829. static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
  5830. struct amdgpu_irq_src *source,
  5831. struct amdgpu_iv_entry *entry)
  5832. {
  5833. DRM_ERROR("Illegal register access in command stream\n");
  5834. schedule_work(&adev->reset_work);
  5835. return 0;
  5836. }
  5837. static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
  5838. struct amdgpu_irq_src *source,
  5839. struct amdgpu_iv_entry *entry)
  5840. {
  5841. DRM_ERROR("Illegal instruction in command stream\n");
  5842. schedule_work(&adev->reset_work);
  5843. return 0;
  5844. }
  5845. const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
  5846. .name = "gfx_v8_0",
  5847. .early_init = gfx_v8_0_early_init,
  5848. .late_init = gfx_v8_0_late_init,
  5849. .sw_init = gfx_v8_0_sw_init,
  5850. .sw_fini = gfx_v8_0_sw_fini,
  5851. .hw_init = gfx_v8_0_hw_init,
  5852. .hw_fini = gfx_v8_0_hw_fini,
  5853. .suspend = gfx_v8_0_suspend,
  5854. .resume = gfx_v8_0_resume,
  5855. .is_idle = gfx_v8_0_is_idle,
  5856. .wait_for_idle = gfx_v8_0_wait_for_idle,
  5857. .check_soft_reset = gfx_v8_0_check_soft_reset,
  5858. .pre_soft_reset = gfx_v8_0_pre_soft_reset,
  5859. .soft_reset = gfx_v8_0_soft_reset,
  5860. .post_soft_reset = gfx_v8_0_post_soft_reset,
  5861. .set_clockgating_state = gfx_v8_0_set_clockgating_state,
  5862. .set_powergating_state = gfx_v8_0_set_powergating_state,
  5863. };
  5864. static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
  5865. .get_rptr = gfx_v8_0_ring_get_rptr,
  5866. .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
  5867. .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
  5868. .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
  5869. .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
  5870. .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
  5871. .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
  5872. .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
  5873. .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
  5874. .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
  5875. .test_ring = gfx_v8_0_ring_test_ring,
  5876. .test_ib = gfx_v8_0_ring_test_ib,
  5877. .insert_nop = amdgpu_ring_insert_nop,
  5878. .pad_ib = amdgpu_ring_generic_pad_ib,
  5879. .emit_switch_buffer = gfx_v8_ring_emit_sb,
  5880. .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
  5881. .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
  5882. .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
  5883. };
  5884. static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
  5885. .get_rptr = gfx_v8_0_ring_get_rptr,
  5886. .get_wptr = gfx_v8_0_ring_get_wptr_compute,
  5887. .set_wptr = gfx_v8_0_ring_set_wptr_compute,
  5888. .emit_ib = gfx_v8_0_ring_emit_ib_compute,
  5889. .emit_fence = gfx_v8_0_ring_emit_fence_compute,
  5890. .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
  5891. .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
  5892. .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
  5893. .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
  5894. .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
  5895. .test_ring = gfx_v8_0_ring_test_ring,
  5896. .test_ib = gfx_v8_0_ring_test_ib,
  5897. .insert_nop = amdgpu_ring_insert_nop,
  5898. .pad_ib = amdgpu_ring_generic_pad_ib,
  5899. .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
  5900. .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
  5901. };
  5902. static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
  5903. {
  5904. int i;
  5905. for (i = 0; i < adev->gfx.num_gfx_rings; i++)
  5906. adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
  5907. for (i = 0; i < adev->gfx.num_compute_rings; i++)
  5908. adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
  5909. }
  5910. static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
  5911. .set = gfx_v8_0_set_eop_interrupt_state,
  5912. .process = gfx_v8_0_eop_irq,
  5913. };
  5914. static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
  5915. .set = gfx_v8_0_set_priv_reg_fault_state,
  5916. .process = gfx_v8_0_priv_reg_irq,
  5917. };
  5918. static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
  5919. .set = gfx_v8_0_set_priv_inst_fault_state,
  5920. .process = gfx_v8_0_priv_inst_irq,
  5921. };
  5922. static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
  5923. {
  5924. adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
  5925. adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
  5926. adev->gfx.priv_reg_irq.num_types = 1;
  5927. adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
  5928. adev->gfx.priv_inst_irq.num_types = 1;
  5929. adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
  5930. }
  5931. static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
  5932. {
  5933. switch (adev->asic_type) {
  5934. case CHIP_TOPAZ:
  5935. adev->gfx.rlc.funcs = &iceland_rlc_funcs;
  5936. break;
  5937. case CHIP_STONEY:
  5938. case CHIP_CARRIZO:
  5939. adev->gfx.rlc.funcs = &cz_rlc_funcs;
  5940. break;
  5941. default:
  5942. adev->gfx.rlc.funcs = &gfx_v8_0_nop_rlc_funcs;
  5943. break;
  5944. }
  5945. }
  5946. static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
  5947. {
  5948. /* init asci gds info */
  5949. adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
  5950. adev->gds.gws.total_size = 64;
  5951. adev->gds.oa.total_size = 16;
  5952. if (adev->gds.mem.total_size == 64 * 1024) {
  5953. adev->gds.mem.gfx_partition_size = 4096;
  5954. adev->gds.mem.cs_partition_size = 4096;
  5955. adev->gds.gws.gfx_partition_size = 4;
  5956. adev->gds.gws.cs_partition_size = 4;
  5957. adev->gds.oa.gfx_partition_size = 4;
  5958. adev->gds.oa.cs_partition_size = 1;
  5959. } else {
  5960. adev->gds.mem.gfx_partition_size = 1024;
  5961. adev->gds.mem.cs_partition_size = 1024;
  5962. adev->gds.gws.gfx_partition_size = 16;
  5963. adev->gds.gws.cs_partition_size = 16;
  5964. adev->gds.oa.gfx_partition_size = 4;
  5965. adev->gds.oa.cs_partition_size = 4;
  5966. }
  5967. }
  5968. static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
  5969. u32 bitmap)
  5970. {
  5971. u32 data;
  5972. if (!bitmap)
  5973. return;
  5974. data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
  5975. data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
  5976. WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
  5977. }
  5978. static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
  5979. {
  5980. u32 data, mask;
  5981. data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
  5982. RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
  5983. mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
  5984. return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
  5985. }
  5986. static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
  5987. {
  5988. int i, j, k, counter, active_cu_number = 0;
  5989. u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
  5990. struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
  5991. unsigned disable_masks[4 * 2];
  5992. memset(cu_info, 0, sizeof(*cu_info));
  5993. amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
  5994. mutex_lock(&adev->grbm_idx_mutex);
  5995. for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
  5996. for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
  5997. mask = 1;
  5998. ao_bitmap = 0;
  5999. counter = 0;
  6000. gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
  6001. if (i < 4 && j < 2)
  6002. gfx_v8_0_set_user_cu_inactive_bitmap(
  6003. adev, disable_masks[i * 2 + j]);
  6004. bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
  6005. cu_info->bitmap[i][j] = bitmap;
  6006. for (k = 0; k < 16; k ++) {
  6007. if (bitmap & mask) {
  6008. if (counter < 2)
  6009. ao_bitmap |= mask;
  6010. counter ++;
  6011. }
  6012. mask <<= 1;
  6013. }
  6014. active_cu_number += counter;
  6015. ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
  6016. }
  6017. }
  6018. gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  6019. mutex_unlock(&adev->grbm_idx_mutex);
  6020. cu_info->number = active_cu_number;
  6021. cu_info->ao_cu_mask = ao_cu_mask;
  6022. }