gfx_v7_0.c 165 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include "drmP.h"
  25. #include "amdgpu.h"
  26. #include "amdgpu_ih.h"
  27. #include "amdgpu_gfx.h"
  28. #include "cikd.h"
  29. #include "cik.h"
  30. #include "atom.h"
  31. #include "amdgpu_ucode.h"
  32. #include "clearstate_ci.h"
  33. #include "uvd/uvd_4_2_d.h"
  34. #include "dce/dce_8_0_d.h"
  35. #include "dce/dce_8_0_sh_mask.h"
  36. #include "bif/bif_4_1_d.h"
  37. #include "bif/bif_4_1_sh_mask.h"
  38. #include "gca/gfx_7_0_d.h"
  39. #include "gca/gfx_7_2_enum.h"
  40. #include "gca/gfx_7_2_sh_mask.h"
  41. #include "gmc/gmc_7_0_d.h"
  42. #include "gmc/gmc_7_0_sh_mask.h"
  43. #include "oss/oss_2_0_d.h"
  44. #include "oss/oss_2_0_sh_mask.h"
  45. #define GFX7_NUM_GFX_RINGS 1
  46. #define GFX7_NUM_COMPUTE_RINGS 8
  47. static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  48. static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  49. static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
  50. int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *);
  51. MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
  52. MODULE_FIRMWARE("radeon/bonaire_me.bin");
  53. MODULE_FIRMWARE("radeon/bonaire_ce.bin");
  54. MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
  55. MODULE_FIRMWARE("radeon/bonaire_mec.bin");
  56. MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
  57. MODULE_FIRMWARE("radeon/hawaii_me.bin");
  58. MODULE_FIRMWARE("radeon/hawaii_ce.bin");
  59. MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
  60. MODULE_FIRMWARE("radeon/hawaii_mec.bin");
  61. MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
  62. MODULE_FIRMWARE("radeon/kaveri_me.bin");
  63. MODULE_FIRMWARE("radeon/kaveri_ce.bin");
  64. MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
  65. MODULE_FIRMWARE("radeon/kaveri_mec.bin");
  66. MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
  67. MODULE_FIRMWARE("radeon/kabini_pfp.bin");
  68. MODULE_FIRMWARE("radeon/kabini_me.bin");
  69. MODULE_FIRMWARE("radeon/kabini_ce.bin");
  70. MODULE_FIRMWARE("radeon/kabini_rlc.bin");
  71. MODULE_FIRMWARE("radeon/kabini_mec.bin");
  72. MODULE_FIRMWARE("radeon/mullins_pfp.bin");
  73. MODULE_FIRMWARE("radeon/mullins_me.bin");
  74. MODULE_FIRMWARE("radeon/mullins_ce.bin");
  75. MODULE_FIRMWARE("radeon/mullins_rlc.bin");
  76. MODULE_FIRMWARE("radeon/mullins_mec.bin");
  77. static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
  78. {
  79. {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
  80. {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
  81. {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
  82. {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
  83. {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
  84. {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
  85. {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
  86. {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
  87. {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
  88. {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
  89. {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
  90. {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
  91. {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
  92. {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
  93. {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
  94. {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
  95. };
  96. static const u32 spectre_rlc_save_restore_register_list[] =
  97. {
  98. (0x0e00 << 16) | (0xc12c >> 2),
  99. 0x00000000,
  100. (0x0e00 << 16) | (0xc140 >> 2),
  101. 0x00000000,
  102. (0x0e00 << 16) | (0xc150 >> 2),
  103. 0x00000000,
  104. (0x0e00 << 16) | (0xc15c >> 2),
  105. 0x00000000,
  106. (0x0e00 << 16) | (0xc168 >> 2),
  107. 0x00000000,
  108. (0x0e00 << 16) | (0xc170 >> 2),
  109. 0x00000000,
  110. (0x0e00 << 16) | (0xc178 >> 2),
  111. 0x00000000,
  112. (0x0e00 << 16) | (0xc204 >> 2),
  113. 0x00000000,
  114. (0x0e00 << 16) | (0xc2b4 >> 2),
  115. 0x00000000,
  116. (0x0e00 << 16) | (0xc2b8 >> 2),
  117. 0x00000000,
  118. (0x0e00 << 16) | (0xc2bc >> 2),
  119. 0x00000000,
  120. (0x0e00 << 16) | (0xc2c0 >> 2),
  121. 0x00000000,
  122. (0x0e00 << 16) | (0x8228 >> 2),
  123. 0x00000000,
  124. (0x0e00 << 16) | (0x829c >> 2),
  125. 0x00000000,
  126. (0x0e00 << 16) | (0x869c >> 2),
  127. 0x00000000,
  128. (0x0600 << 16) | (0x98f4 >> 2),
  129. 0x00000000,
  130. (0x0e00 << 16) | (0x98f8 >> 2),
  131. 0x00000000,
  132. (0x0e00 << 16) | (0x9900 >> 2),
  133. 0x00000000,
  134. (0x0e00 << 16) | (0xc260 >> 2),
  135. 0x00000000,
  136. (0x0e00 << 16) | (0x90e8 >> 2),
  137. 0x00000000,
  138. (0x0e00 << 16) | (0x3c000 >> 2),
  139. 0x00000000,
  140. (0x0e00 << 16) | (0x3c00c >> 2),
  141. 0x00000000,
  142. (0x0e00 << 16) | (0x8c1c >> 2),
  143. 0x00000000,
  144. (0x0e00 << 16) | (0x9700 >> 2),
  145. 0x00000000,
  146. (0x0e00 << 16) | (0xcd20 >> 2),
  147. 0x00000000,
  148. (0x4e00 << 16) | (0xcd20 >> 2),
  149. 0x00000000,
  150. (0x5e00 << 16) | (0xcd20 >> 2),
  151. 0x00000000,
  152. (0x6e00 << 16) | (0xcd20 >> 2),
  153. 0x00000000,
  154. (0x7e00 << 16) | (0xcd20 >> 2),
  155. 0x00000000,
  156. (0x8e00 << 16) | (0xcd20 >> 2),
  157. 0x00000000,
  158. (0x9e00 << 16) | (0xcd20 >> 2),
  159. 0x00000000,
  160. (0xae00 << 16) | (0xcd20 >> 2),
  161. 0x00000000,
  162. (0xbe00 << 16) | (0xcd20 >> 2),
  163. 0x00000000,
  164. (0x0e00 << 16) | (0x89bc >> 2),
  165. 0x00000000,
  166. (0x0e00 << 16) | (0x8900 >> 2),
  167. 0x00000000,
  168. 0x3,
  169. (0x0e00 << 16) | (0xc130 >> 2),
  170. 0x00000000,
  171. (0x0e00 << 16) | (0xc134 >> 2),
  172. 0x00000000,
  173. (0x0e00 << 16) | (0xc1fc >> 2),
  174. 0x00000000,
  175. (0x0e00 << 16) | (0xc208 >> 2),
  176. 0x00000000,
  177. (0x0e00 << 16) | (0xc264 >> 2),
  178. 0x00000000,
  179. (0x0e00 << 16) | (0xc268 >> 2),
  180. 0x00000000,
  181. (0x0e00 << 16) | (0xc26c >> 2),
  182. 0x00000000,
  183. (0x0e00 << 16) | (0xc270 >> 2),
  184. 0x00000000,
  185. (0x0e00 << 16) | (0xc274 >> 2),
  186. 0x00000000,
  187. (0x0e00 << 16) | (0xc278 >> 2),
  188. 0x00000000,
  189. (0x0e00 << 16) | (0xc27c >> 2),
  190. 0x00000000,
  191. (0x0e00 << 16) | (0xc280 >> 2),
  192. 0x00000000,
  193. (0x0e00 << 16) | (0xc284 >> 2),
  194. 0x00000000,
  195. (0x0e00 << 16) | (0xc288 >> 2),
  196. 0x00000000,
  197. (0x0e00 << 16) | (0xc28c >> 2),
  198. 0x00000000,
  199. (0x0e00 << 16) | (0xc290 >> 2),
  200. 0x00000000,
  201. (0x0e00 << 16) | (0xc294 >> 2),
  202. 0x00000000,
  203. (0x0e00 << 16) | (0xc298 >> 2),
  204. 0x00000000,
  205. (0x0e00 << 16) | (0xc29c >> 2),
  206. 0x00000000,
  207. (0x0e00 << 16) | (0xc2a0 >> 2),
  208. 0x00000000,
  209. (0x0e00 << 16) | (0xc2a4 >> 2),
  210. 0x00000000,
  211. (0x0e00 << 16) | (0xc2a8 >> 2),
  212. 0x00000000,
  213. (0x0e00 << 16) | (0xc2ac >> 2),
  214. 0x00000000,
  215. (0x0e00 << 16) | (0xc2b0 >> 2),
  216. 0x00000000,
  217. (0x0e00 << 16) | (0x301d0 >> 2),
  218. 0x00000000,
  219. (0x0e00 << 16) | (0x30238 >> 2),
  220. 0x00000000,
  221. (0x0e00 << 16) | (0x30250 >> 2),
  222. 0x00000000,
  223. (0x0e00 << 16) | (0x30254 >> 2),
  224. 0x00000000,
  225. (0x0e00 << 16) | (0x30258 >> 2),
  226. 0x00000000,
  227. (0x0e00 << 16) | (0x3025c >> 2),
  228. 0x00000000,
  229. (0x4e00 << 16) | (0xc900 >> 2),
  230. 0x00000000,
  231. (0x5e00 << 16) | (0xc900 >> 2),
  232. 0x00000000,
  233. (0x6e00 << 16) | (0xc900 >> 2),
  234. 0x00000000,
  235. (0x7e00 << 16) | (0xc900 >> 2),
  236. 0x00000000,
  237. (0x8e00 << 16) | (0xc900 >> 2),
  238. 0x00000000,
  239. (0x9e00 << 16) | (0xc900 >> 2),
  240. 0x00000000,
  241. (0xae00 << 16) | (0xc900 >> 2),
  242. 0x00000000,
  243. (0xbe00 << 16) | (0xc900 >> 2),
  244. 0x00000000,
  245. (0x4e00 << 16) | (0xc904 >> 2),
  246. 0x00000000,
  247. (0x5e00 << 16) | (0xc904 >> 2),
  248. 0x00000000,
  249. (0x6e00 << 16) | (0xc904 >> 2),
  250. 0x00000000,
  251. (0x7e00 << 16) | (0xc904 >> 2),
  252. 0x00000000,
  253. (0x8e00 << 16) | (0xc904 >> 2),
  254. 0x00000000,
  255. (0x9e00 << 16) | (0xc904 >> 2),
  256. 0x00000000,
  257. (0xae00 << 16) | (0xc904 >> 2),
  258. 0x00000000,
  259. (0xbe00 << 16) | (0xc904 >> 2),
  260. 0x00000000,
  261. (0x4e00 << 16) | (0xc908 >> 2),
  262. 0x00000000,
  263. (0x5e00 << 16) | (0xc908 >> 2),
  264. 0x00000000,
  265. (0x6e00 << 16) | (0xc908 >> 2),
  266. 0x00000000,
  267. (0x7e00 << 16) | (0xc908 >> 2),
  268. 0x00000000,
  269. (0x8e00 << 16) | (0xc908 >> 2),
  270. 0x00000000,
  271. (0x9e00 << 16) | (0xc908 >> 2),
  272. 0x00000000,
  273. (0xae00 << 16) | (0xc908 >> 2),
  274. 0x00000000,
  275. (0xbe00 << 16) | (0xc908 >> 2),
  276. 0x00000000,
  277. (0x4e00 << 16) | (0xc90c >> 2),
  278. 0x00000000,
  279. (0x5e00 << 16) | (0xc90c >> 2),
  280. 0x00000000,
  281. (0x6e00 << 16) | (0xc90c >> 2),
  282. 0x00000000,
  283. (0x7e00 << 16) | (0xc90c >> 2),
  284. 0x00000000,
  285. (0x8e00 << 16) | (0xc90c >> 2),
  286. 0x00000000,
  287. (0x9e00 << 16) | (0xc90c >> 2),
  288. 0x00000000,
  289. (0xae00 << 16) | (0xc90c >> 2),
  290. 0x00000000,
  291. (0xbe00 << 16) | (0xc90c >> 2),
  292. 0x00000000,
  293. (0x4e00 << 16) | (0xc910 >> 2),
  294. 0x00000000,
  295. (0x5e00 << 16) | (0xc910 >> 2),
  296. 0x00000000,
  297. (0x6e00 << 16) | (0xc910 >> 2),
  298. 0x00000000,
  299. (0x7e00 << 16) | (0xc910 >> 2),
  300. 0x00000000,
  301. (0x8e00 << 16) | (0xc910 >> 2),
  302. 0x00000000,
  303. (0x9e00 << 16) | (0xc910 >> 2),
  304. 0x00000000,
  305. (0xae00 << 16) | (0xc910 >> 2),
  306. 0x00000000,
  307. (0xbe00 << 16) | (0xc910 >> 2),
  308. 0x00000000,
  309. (0x0e00 << 16) | (0xc99c >> 2),
  310. 0x00000000,
  311. (0x0e00 << 16) | (0x9834 >> 2),
  312. 0x00000000,
  313. (0x0000 << 16) | (0x30f00 >> 2),
  314. 0x00000000,
  315. (0x0001 << 16) | (0x30f00 >> 2),
  316. 0x00000000,
  317. (0x0000 << 16) | (0x30f04 >> 2),
  318. 0x00000000,
  319. (0x0001 << 16) | (0x30f04 >> 2),
  320. 0x00000000,
  321. (0x0000 << 16) | (0x30f08 >> 2),
  322. 0x00000000,
  323. (0x0001 << 16) | (0x30f08 >> 2),
  324. 0x00000000,
  325. (0x0000 << 16) | (0x30f0c >> 2),
  326. 0x00000000,
  327. (0x0001 << 16) | (0x30f0c >> 2),
  328. 0x00000000,
  329. (0x0600 << 16) | (0x9b7c >> 2),
  330. 0x00000000,
  331. (0x0e00 << 16) | (0x8a14 >> 2),
  332. 0x00000000,
  333. (0x0e00 << 16) | (0x8a18 >> 2),
  334. 0x00000000,
  335. (0x0600 << 16) | (0x30a00 >> 2),
  336. 0x00000000,
  337. (0x0e00 << 16) | (0x8bf0 >> 2),
  338. 0x00000000,
  339. (0x0e00 << 16) | (0x8bcc >> 2),
  340. 0x00000000,
  341. (0x0e00 << 16) | (0x8b24 >> 2),
  342. 0x00000000,
  343. (0x0e00 << 16) | (0x30a04 >> 2),
  344. 0x00000000,
  345. (0x0600 << 16) | (0x30a10 >> 2),
  346. 0x00000000,
  347. (0x0600 << 16) | (0x30a14 >> 2),
  348. 0x00000000,
  349. (0x0600 << 16) | (0x30a18 >> 2),
  350. 0x00000000,
  351. (0x0600 << 16) | (0x30a2c >> 2),
  352. 0x00000000,
  353. (0x0e00 << 16) | (0xc700 >> 2),
  354. 0x00000000,
  355. (0x0e00 << 16) | (0xc704 >> 2),
  356. 0x00000000,
  357. (0x0e00 << 16) | (0xc708 >> 2),
  358. 0x00000000,
  359. (0x0e00 << 16) | (0xc768 >> 2),
  360. 0x00000000,
  361. (0x0400 << 16) | (0xc770 >> 2),
  362. 0x00000000,
  363. (0x0400 << 16) | (0xc774 >> 2),
  364. 0x00000000,
  365. (0x0400 << 16) | (0xc778 >> 2),
  366. 0x00000000,
  367. (0x0400 << 16) | (0xc77c >> 2),
  368. 0x00000000,
  369. (0x0400 << 16) | (0xc780 >> 2),
  370. 0x00000000,
  371. (0x0400 << 16) | (0xc784 >> 2),
  372. 0x00000000,
  373. (0x0400 << 16) | (0xc788 >> 2),
  374. 0x00000000,
  375. (0x0400 << 16) | (0xc78c >> 2),
  376. 0x00000000,
  377. (0x0400 << 16) | (0xc798 >> 2),
  378. 0x00000000,
  379. (0x0400 << 16) | (0xc79c >> 2),
  380. 0x00000000,
  381. (0x0400 << 16) | (0xc7a0 >> 2),
  382. 0x00000000,
  383. (0x0400 << 16) | (0xc7a4 >> 2),
  384. 0x00000000,
  385. (0x0400 << 16) | (0xc7a8 >> 2),
  386. 0x00000000,
  387. (0x0400 << 16) | (0xc7ac >> 2),
  388. 0x00000000,
  389. (0x0400 << 16) | (0xc7b0 >> 2),
  390. 0x00000000,
  391. (0x0400 << 16) | (0xc7b4 >> 2),
  392. 0x00000000,
  393. (0x0e00 << 16) | (0x9100 >> 2),
  394. 0x00000000,
  395. (0x0e00 << 16) | (0x3c010 >> 2),
  396. 0x00000000,
  397. (0x0e00 << 16) | (0x92a8 >> 2),
  398. 0x00000000,
  399. (0x0e00 << 16) | (0x92ac >> 2),
  400. 0x00000000,
  401. (0x0e00 << 16) | (0x92b4 >> 2),
  402. 0x00000000,
  403. (0x0e00 << 16) | (0x92b8 >> 2),
  404. 0x00000000,
  405. (0x0e00 << 16) | (0x92bc >> 2),
  406. 0x00000000,
  407. (0x0e00 << 16) | (0x92c0 >> 2),
  408. 0x00000000,
  409. (0x0e00 << 16) | (0x92c4 >> 2),
  410. 0x00000000,
  411. (0x0e00 << 16) | (0x92c8 >> 2),
  412. 0x00000000,
  413. (0x0e00 << 16) | (0x92cc >> 2),
  414. 0x00000000,
  415. (0x0e00 << 16) | (0x92d0 >> 2),
  416. 0x00000000,
  417. (0x0e00 << 16) | (0x8c00 >> 2),
  418. 0x00000000,
  419. (0x0e00 << 16) | (0x8c04 >> 2),
  420. 0x00000000,
  421. (0x0e00 << 16) | (0x8c20 >> 2),
  422. 0x00000000,
  423. (0x0e00 << 16) | (0x8c38 >> 2),
  424. 0x00000000,
  425. (0x0e00 << 16) | (0x8c3c >> 2),
  426. 0x00000000,
  427. (0x0e00 << 16) | (0xae00 >> 2),
  428. 0x00000000,
  429. (0x0e00 << 16) | (0x9604 >> 2),
  430. 0x00000000,
  431. (0x0e00 << 16) | (0xac08 >> 2),
  432. 0x00000000,
  433. (0x0e00 << 16) | (0xac0c >> 2),
  434. 0x00000000,
  435. (0x0e00 << 16) | (0xac10 >> 2),
  436. 0x00000000,
  437. (0x0e00 << 16) | (0xac14 >> 2),
  438. 0x00000000,
  439. (0x0e00 << 16) | (0xac58 >> 2),
  440. 0x00000000,
  441. (0x0e00 << 16) | (0xac68 >> 2),
  442. 0x00000000,
  443. (0x0e00 << 16) | (0xac6c >> 2),
  444. 0x00000000,
  445. (0x0e00 << 16) | (0xac70 >> 2),
  446. 0x00000000,
  447. (0x0e00 << 16) | (0xac74 >> 2),
  448. 0x00000000,
  449. (0x0e00 << 16) | (0xac78 >> 2),
  450. 0x00000000,
  451. (0x0e00 << 16) | (0xac7c >> 2),
  452. 0x00000000,
  453. (0x0e00 << 16) | (0xac80 >> 2),
  454. 0x00000000,
  455. (0x0e00 << 16) | (0xac84 >> 2),
  456. 0x00000000,
  457. (0x0e00 << 16) | (0xac88 >> 2),
  458. 0x00000000,
  459. (0x0e00 << 16) | (0xac8c >> 2),
  460. 0x00000000,
  461. (0x0e00 << 16) | (0x970c >> 2),
  462. 0x00000000,
  463. (0x0e00 << 16) | (0x9714 >> 2),
  464. 0x00000000,
  465. (0x0e00 << 16) | (0x9718 >> 2),
  466. 0x00000000,
  467. (0x0e00 << 16) | (0x971c >> 2),
  468. 0x00000000,
  469. (0x0e00 << 16) | (0x31068 >> 2),
  470. 0x00000000,
  471. (0x4e00 << 16) | (0x31068 >> 2),
  472. 0x00000000,
  473. (0x5e00 << 16) | (0x31068 >> 2),
  474. 0x00000000,
  475. (0x6e00 << 16) | (0x31068 >> 2),
  476. 0x00000000,
  477. (0x7e00 << 16) | (0x31068 >> 2),
  478. 0x00000000,
  479. (0x8e00 << 16) | (0x31068 >> 2),
  480. 0x00000000,
  481. (0x9e00 << 16) | (0x31068 >> 2),
  482. 0x00000000,
  483. (0xae00 << 16) | (0x31068 >> 2),
  484. 0x00000000,
  485. (0xbe00 << 16) | (0x31068 >> 2),
  486. 0x00000000,
  487. (0x0e00 << 16) | (0xcd10 >> 2),
  488. 0x00000000,
  489. (0x0e00 << 16) | (0xcd14 >> 2),
  490. 0x00000000,
  491. (0x0e00 << 16) | (0x88b0 >> 2),
  492. 0x00000000,
  493. (0x0e00 << 16) | (0x88b4 >> 2),
  494. 0x00000000,
  495. (0x0e00 << 16) | (0x88b8 >> 2),
  496. 0x00000000,
  497. (0x0e00 << 16) | (0x88bc >> 2),
  498. 0x00000000,
  499. (0x0400 << 16) | (0x89c0 >> 2),
  500. 0x00000000,
  501. (0x0e00 << 16) | (0x88c4 >> 2),
  502. 0x00000000,
  503. (0x0e00 << 16) | (0x88c8 >> 2),
  504. 0x00000000,
  505. (0x0e00 << 16) | (0x88d0 >> 2),
  506. 0x00000000,
  507. (0x0e00 << 16) | (0x88d4 >> 2),
  508. 0x00000000,
  509. (0x0e00 << 16) | (0x88d8 >> 2),
  510. 0x00000000,
  511. (0x0e00 << 16) | (0x8980 >> 2),
  512. 0x00000000,
  513. (0x0e00 << 16) | (0x30938 >> 2),
  514. 0x00000000,
  515. (0x0e00 << 16) | (0x3093c >> 2),
  516. 0x00000000,
  517. (0x0e00 << 16) | (0x30940 >> 2),
  518. 0x00000000,
  519. (0x0e00 << 16) | (0x89a0 >> 2),
  520. 0x00000000,
  521. (0x0e00 << 16) | (0x30900 >> 2),
  522. 0x00000000,
  523. (0x0e00 << 16) | (0x30904 >> 2),
  524. 0x00000000,
  525. (0x0e00 << 16) | (0x89b4 >> 2),
  526. 0x00000000,
  527. (0x0e00 << 16) | (0x3c210 >> 2),
  528. 0x00000000,
  529. (0x0e00 << 16) | (0x3c214 >> 2),
  530. 0x00000000,
  531. (0x0e00 << 16) | (0x3c218 >> 2),
  532. 0x00000000,
  533. (0x0e00 << 16) | (0x8904 >> 2),
  534. 0x00000000,
  535. 0x5,
  536. (0x0e00 << 16) | (0x8c28 >> 2),
  537. (0x0e00 << 16) | (0x8c2c >> 2),
  538. (0x0e00 << 16) | (0x8c30 >> 2),
  539. (0x0e00 << 16) | (0x8c34 >> 2),
  540. (0x0e00 << 16) | (0x9600 >> 2),
  541. };
  542. static const u32 kalindi_rlc_save_restore_register_list[] =
  543. {
  544. (0x0e00 << 16) | (0xc12c >> 2),
  545. 0x00000000,
  546. (0x0e00 << 16) | (0xc140 >> 2),
  547. 0x00000000,
  548. (0x0e00 << 16) | (0xc150 >> 2),
  549. 0x00000000,
  550. (0x0e00 << 16) | (0xc15c >> 2),
  551. 0x00000000,
  552. (0x0e00 << 16) | (0xc168 >> 2),
  553. 0x00000000,
  554. (0x0e00 << 16) | (0xc170 >> 2),
  555. 0x00000000,
  556. (0x0e00 << 16) | (0xc204 >> 2),
  557. 0x00000000,
  558. (0x0e00 << 16) | (0xc2b4 >> 2),
  559. 0x00000000,
  560. (0x0e00 << 16) | (0xc2b8 >> 2),
  561. 0x00000000,
  562. (0x0e00 << 16) | (0xc2bc >> 2),
  563. 0x00000000,
  564. (0x0e00 << 16) | (0xc2c0 >> 2),
  565. 0x00000000,
  566. (0x0e00 << 16) | (0x8228 >> 2),
  567. 0x00000000,
  568. (0x0e00 << 16) | (0x829c >> 2),
  569. 0x00000000,
  570. (0x0e00 << 16) | (0x869c >> 2),
  571. 0x00000000,
  572. (0x0600 << 16) | (0x98f4 >> 2),
  573. 0x00000000,
  574. (0x0e00 << 16) | (0x98f8 >> 2),
  575. 0x00000000,
  576. (0x0e00 << 16) | (0x9900 >> 2),
  577. 0x00000000,
  578. (0x0e00 << 16) | (0xc260 >> 2),
  579. 0x00000000,
  580. (0x0e00 << 16) | (0x90e8 >> 2),
  581. 0x00000000,
  582. (0x0e00 << 16) | (0x3c000 >> 2),
  583. 0x00000000,
  584. (0x0e00 << 16) | (0x3c00c >> 2),
  585. 0x00000000,
  586. (0x0e00 << 16) | (0x8c1c >> 2),
  587. 0x00000000,
  588. (0x0e00 << 16) | (0x9700 >> 2),
  589. 0x00000000,
  590. (0x0e00 << 16) | (0xcd20 >> 2),
  591. 0x00000000,
  592. (0x4e00 << 16) | (0xcd20 >> 2),
  593. 0x00000000,
  594. (0x5e00 << 16) | (0xcd20 >> 2),
  595. 0x00000000,
  596. (0x6e00 << 16) | (0xcd20 >> 2),
  597. 0x00000000,
  598. (0x7e00 << 16) | (0xcd20 >> 2),
  599. 0x00000000,
  600. (0x0e00 << 16) | (0x89bc >> 2),
  601. 0x00000000,
  602. (0x0e00 << 16) | (0x8900 >> 2),
  603. 0x00000000,
  604. 0x3,
  605. (0x0e00 << 16) | (0xc130 >> 2),
  606. 0x00000000,
  607. (0x0e00 << 16) | (0xc134 >> 2),
  608. 0x00000000,
  609. (0x0e00 << 16) | (0xc1fc >> 2),
  610. 0x00000000,
  611. (0x0e00 << 16) | (0xc208 >> 2),
  612. 0x00000000,
  613. (0x0e00 << 16) | (0xc264 >> 2),
  614. 0x00000000,
  615. (0x0e00 << 16) | (0xc268 >> 2),
  616. 0x00000000,
  617. (0x0e00 << 16) | (0xc26c >> 2),
  618. 0x00000000,
  619. (0x0e00 << 16) | (0xc270 >> 2),
  620. 0x00000000,
  621. (0x0e00 << 16) | (0xc274 >> 2),
  622. 0x00000000,
  623. (0x0e00 << 16) | (0xc28c >> 2),
  624. 0x00000000,
  625. (0x0e00 << 16) | (0xc290 >> 2),
  626. 0x00000000,
  627. (0x0e00 << 16) | (0xc294 >> 2),
  628. 0x00000000,
  629. (0x0e00 << 16) | (0xc298 >> 2),
  630. 0x00000000,
  631. (0x0e00 << 16) | (0xc2a0 >> 2),
  632. 0x00000000,
  633. (0x0e00 << 16) | (0xc2a4 >> 2),
  634. 0x00000000,
  635. (0x0e00 << 16) | (0xc2a8 >> 2),
  636. 0x00000000,
  637. (0x0e00 << 16) | (0xc2ac >> 2),
  638. 0x00000000,
  639. (0x0e00 << 16) | (0x301d0 >> 2),
  640. 0x00000000,
  641. (0x0e00 << 16) | (0x30238 >> 2),
  642. 0x00000000,
  643. (0x0e00 << 16) | (0x30250 >> 2),
  644. 0x00000000,
  645. (0x0e00 << 16) | (0x30254 >> 2),
  646. 0x00000000,
  647. (0x0e00 << 16) | (0x30258 >> 2),
  648. 0x00000000,
  649. (0x0e00 << 16) | (0x3025c >> 2),
  650. 0x00000000,
  651. (0x4e00 << 16) | (0xc900 >> 2),
  652. 0x00000000,
  653. (0x5e00 << 16) | (0xc900 >> 2),
  654. 0x00000000,
  655. (0x6e00 << 16) | (0xc900 >> 2),
  656. 0x00000000,
  657. (0x7e00 << 16) | (0xc900 >> 2),
  658. 0x00000000,
  659. (0x4e00 << 16) | (0xc904 >> 2),
  660. 0x00000000,
  661. (0x5e00 << 16) | (0xc904 >> 2),
  662. 0x00000000,
  663. (0x6e00 << 16) | (0xc904 >> 2),
  664. 0x00000000,
  665. (0x7e00 << 16) | (0xc904 >> 2),
  666. 0x00000000,
  667. (0x4e00 << 16) | (0xc908 >> 2),
  668. 0x00000000,
  669. (0x5e00 << 16) | (0xc908 >> 2),
  670. 0x00000000,
  671. (0x6e00 << 16) | (0xc908 >> 2),
  672. 0x00000000,
  673. (0x7e00 << 16) | (0xc908 >> 2),
  674. 0x00000000,
  675. (0x4e00 << 16) | (0xc90c >> 2),
  676. 0x00000000,
  677. (0x5e00 << 16) | (0xc90c >> 2),
  678. 0x00000000,
  679. (0x6e00 << 16) | (0xc90c >> 2),
  680. 0x00000000,
  681. (0x7e00 << 16) | (0xc90c >> 2),
  682. 0x00000000,
  683. (0x4e00 << 16) | (0xc910 >> 2),
  684. 0x00000000,
  685. (0x5e00 << 16) | (0xc910 >> 2),
  686. 0x00000000,
  687. (0x6e00 << 16) | (0xc910 >> 2),
  688. 0x00000000,
  689. (0x7e00 << 16) | (0xc910 >> 2),
  690. 0x00000000,
  691. (0x0e00 << 16) | (0xc99c >> 2),
  692. 0x00000000,
  693. (0x0e00 << 16) | (0x9834 >> 2),
  694. 0x00000000,
  695. (0x0000 << 16) | (0x30f00 >> 2),
  696. 0x00000000,
  697. (0x0000 << 16) | (0x30f04 >> 2),
  698. 0x00000000,
  699. (0x0000 << 16) | (0x30f08 >> 2),
  700. 0x00000000,
  701. (0x0000 << 16) | (0x30f0c >> 2),
  702. 0x00000000,
  703. (0x0600 << 16) | (0x9b7c >> 2),
  704. 0x00000000,
  705. (0x0e00 << 16) | (0x8a14 >> 2),
  706. 0x00000000,
  707. (0x0e00 << 16) | (0x8a18 >> 2),
  708. 0x00000000,
  709. (0x0600 << 16) | (0x30a00 >> 2),
  710. 0x00000000,
  711. (0x0e00 << 16) | (0x8bf0 >> 2),
  712. 0x00000000,
  713. (0x0e00 << 16) | (0x8bcc >> 2),
  714. 0x00000000,
  715. (0x0e00 << 16) | (0x8b24 >> 2),
  716. 0x00000000,
  717. (0x0e00 << 16) | (0x30a04 >> 2),
  718. 0x00000000,
  719. (0x0600 << 16) | (0x30a10 >> 2),
  720. 0x00000000,
  721. (0x0600 << 16) | (0x30a14 >> 2),
  722. 0x00000000,
  723. (0x0600 << 16) | (0x30a18 >> 2),
  724. 0x00000000,
  725. (0x0600 << 16) | (0x30a2c >> 2),
  726. 0x00000000,
  727. (0x0e00 << 16) | (0xc700 >> 2),
  728. 0x00000000,
  729. (0x0e00 << 16) | (0xc704 >> 2),
  730. 0x00000000,
  731. (0x0e00 << 16) | (0xc708 >> 2),
  732. 0x00000000,
  733. (0x0e00 << 16) | (0xc768 >> 2),
  734. 0x00000000,
  735. (0x0400 << 16) | (0xc770 >> 2),
  736. 0x00000000,
  737. (0x0400 << 16) | (0xc774 >> 2),
  738. 0x00000000,
  739. (0x0400 << 16) | (0xc798 >> 2),
  740. 0x00000000,
  741. (0x0400 << 16) | (0xc79c >> 2),
  742. 0x00000000,
  743. (0x0e00 << 16) | (0x9100 >> 2),
  744. 0x00000000,
  745. (0x0e00 << 16) | (0x3c010 >> 2),
  746. 0x00000000,
  747. (0x0e00 << 16) | (0x8c00 >> 2),
  748. 0x00000000,
  749. (0x0e00 << 16) | (0x8c04 >> 2),
  750. 0x00000000,
  751. (0x0e00 << 16) | (0x8c20 >> 2),
  752. 0x00000000,
  753. (0x0e00 << 16) | (0x8c38 >> 2),
  754. 0x00000000,
  755. (0x0e00 << 16) | (0x8c3c >> 2),
  756. 0x00000000,
  757. (0x0e00 << 16) | (0xae00 >> 2),
  758. 0x00000000,
  759. (0x0e00 << 16) | (0x9604 >> 2),
  760. 0x00000000,
  761. (0x0e00 << 16) | (0xac08 >> 2),
  762. 0x00000000,
  763. (0x0e00 << 16) | (0xac0c >> 2),
  764. 0x00000000,
  765. (0x0e00 << 16) | (0xac10 >> 2),
  766. 0x00000000,
  767. (0x0e00 << 16) | (0xac14 >> 2),
  768. 0x00000000,
  769. (0x0e00 << 16) | (0xac58 >> 2),
  770. 0x00000000,
  771. (0x0e00 << 16) | (0xac68 >> 2),
  772. 0x00000000,
  773. (0x0e00 << 16) | (0xac6c >> 2),
  774. 0x00000000,
  775. (0x0e00 << 16) | (0xac70 >> 2),
  776. 0x00000000,
  777. (0x0e00 << 16) | (0xac74 >> 2),
  778. 0x00000000,
  779. (0x0e00 << 16) | (0xac78 >> 2),
  780. 0x00000000,
  781. (0x0e00 << 16) | (0xac7c >> 2),
  782. 0x00000000,
  783. (0x0e00 << 16) | (0xac80 >> 2),
  784. 0x00000000,
  785. (0x0e00 << 16) | (0xac84 >> 2),
  786. 0x00000000,
  787. (0x0e00 << 16) | (0xac88 >> 2),
  788. 0x00000000,
  789. (0x0e00 << 16) | (0xac8c >> 2),
  790. 0x00000000,
  791. (0x0e00 << 16) | (0x970c >> 2),
  792. 0x00000000,
  793. (0x0e00 << 16) | (0x9714 >> 2),
  794. 0x00000000,
  795. (0x0e00 << 16) | (0x9718 >> 2),
  796. 0x00000000,
  797. (0x0e00 << 16) | (0x971c >> 2),
  798. 0x00000000,
  799. (0x0e00 << 16) | (0x31068 >> 2),
  800. 0x00000000,
  801. (0x4e00 << 16) | (0x31068 >> 2),
  802. 0x00000000,
  803. (0x5e00 << 16) | (0x31068 >> 2),
  804. 0x00000000,
  805. (0x6e00 << 16) | (0x31068 >> 2),
  806. 0x00000000,
  807. (0x7e00 << 16) | (0x31068 >> 2),
  808. 0x00000000,
  809. (0x0e00 << 16) | (0xcd10 >> 2),
  810. 0x00000000,
  811. (0x0e00 << 16) | (0xcd14 >> 2),
  812. 0x00000000,
  813. (0x0e00 << 16) | (0x88b0 >> 2),
  814. 0x00000000,
  815. (0x0e00 << 16) | (0x88b4 >> 2),
  816. 0x00000000,
  817. (0x0e00 << 16) | (0x88b8 >> 2),
  818. 0x00000000,
  819. (0x0e00 << 16) | (0x88bc >> 2),
  820. 0x00000000,
  821. (0x0400 << 16) | (0x89c0 >> 2),
  822. 0x00000000,
  823. (0x0e00 << 16) | (0x88c4 >> 2),
  824. 0x00000000,
  825. (0x0e00 << 16) | (0x88c8 >> 2),
  826. 0x00000000,
  827. (0x0e00 << 16) | (0x88d0 >> 2),
  828. 0x00000000,
  829. (0x0e00 << 16) | (0x88d4 >> 2),
  830. 0x00000000,
  831. (0x0e00 << 16) | (0x88d8 >> 2),
  832. 0x00000000,
  833. (0x0e00 << 16) | (0x8980 >> 2),
  834. 0x00000000,
  835. (0x0e00 << 16) | (0x30938 >> 2),
  836. 0x00000000,
  837. (0x0e00 << 16) | (0x3093c >> 2),
  838. 0x00000000,
  839. (0x0e00 << 16) | (0x30940 >> 2),
  840. 0x00000000,
  841. (0x0e00 << 16) | (0x89a0 >> 2),
  842. 0x00000000,
  843. (0x0e00 << 16) | (0x30900 >> 2),
  844. 0x00000000,
  845. (0x0e00 << 16) | (0x30904 >> 2),
  846. 0x00000000,
  847. (0x0e00 << 16) | (0x89b4 >> 2),
  848. 0x00000000,
  849. (0x0e00 << 16) | (0x3e1fc >> 2),
  850. 0x00000000,
  851. (0x0e00 << 16) | (0x3c210 >> 2),
  852. 0x00000000,
  853. (0x0e00 << 16) | (0x3c214 >> 2),
  854. 0x00000000,
  855. (0x0e00 << 16) | (0x3c218 >> 2),
  856. 0x00000000,
  857. (0x0e00 << 16) | (0x8904 >> 2),
  858. 0x00000000,
  859. 0x5,
  860. (0x0e00 << 16) | (0x8c28 >> 2),
  861. (0x0e00 << 16) | (0x8c2c >> 2),
  862. (0x0e00 << 16) | (0x8c30 >> 2),
  863. (0x0e00 << 16) | (0x8c34 >> 2),
  864. (0x0e00 << 16) | (0x9600 >> 2),
  865. };
  866. static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
  867. static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
  868. static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
  869. static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
  870. /*
  871. * Core functions
  872. */
  873. /**
  874. * gfx_v7_0_init_microcode - load ucode images from disk
  875. *
  876. * @adev: amdgpu_device pointer
  877. *
  878. * Use the firmware interface to load the ucode images into
  879. * the driver (not loaded into hw).
  880. * Returns 0 on success, error on failure.
  881. */
  882. static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
  883. {
  884. const char *chip_name;
  885. char fw_name[30];
  886. int err;
  887. DRM_DEBUG("\n");
  888. switch (adev->asic_type) {
  889. case CHIP_BONAIRE:
  890. chip_name = "bonaire";
  891. break;
  892. case CHIP_HAWAII:
  893. chip_name = "hawaii";
  894. break;
  895. case CHIP_KAVERI:
  896. chip_name = "kaveri";
  897. break;
  898. case CHIP_KABINI:
  899. chip_name = "kabini";
  900. break;
  901. case CHIP_MULLINS:
  902. chip_name = "mullins";
  903. break;
  904. default: BUG();
  905. }
  906. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  907. err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
  908. if (err)
  909. goto out;
  910. err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
  911. if (err)
  912. goto out;
  913. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  914. err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
  915. if (err)
  916. goto out;
  917. err = amdgpu_ucode_validate(adev->gfx.me_fw);
  918. if (err)
  919. goto out;
  920. snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
  921. err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
  922. if (err)
  923. goto out;
  924. err = amdgpu_ucode_validate(adev->gfx.ce_fw);
  925. if (err)
  926. goto out;
  927. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
  928. err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
  929. if (err)
  930. goto out;
  931. err = amdgpu_ucode_validate(adev->gfx.mec_fw);
  932. if (err)
  933. goto out;
  934. if (adev->asic_type == CHIP_KAVERI) {
  935. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
  936. err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
  937. if (err)
  938. goto out;
  939. err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
  940. if (err)
  941. goto out;
  942. }
  943. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
  944. err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
  945. if (err)
  946. goto out;
  947. err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
  948. out:
  949. if (err) {
  950. printk(KERN_ERR
  951. "gfx7: Failed to load firmware \"%s\"\n",
  952. fw_name);
  953. release_firmware(adev->gfx.pfp_fw);
  954. adev->gfx.pfp_fw = NULL;
  955. release_firmware(adev->gfx.me_fw);
  956. adev->gfx.me_fw = NULL;
  957. release_firmware(adev->gfx.ce_fw);
  958. adev->gfx.ce_fw = NULL;
  959. release_firmware(adev->gfx.mec_fw);
  960. adev->gfx.mec_fw = NULL;
  961. release_firmware(adev->gfx.mec2_fw);
  962. adev->gfx.mec2_fw = NULL;
  963. release_firmware(adev->gfx.rlc_fw);
  964. adev->gfx.rlc_fw = NULL;
  965. }
  966. return err;
  967. }
  968. /**
  969. * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
  970. *
  971. * @adev: amdgpu_device pointer
  972. *
  973. * Starting with SI, the tiling setup is done globally in a
  974. * set of 32 tiling modes. Rather than selecting each set of
  975. * parameters per surface as on older asics, we just select
  976. * which index in the tiling table we want to use, and the
  977. * surface uses those parameters (CIK).
  978. */
  979. static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
  980. {
  981. const u32 num_tile_mode_states = 32;
  982. const u32 num_secondary_tile_mode_states = 16;
  983. u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
  984. switch (adev->gfx.config.mem_row_size_in_kb) {
  985. case 1:
  986. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
  987. break;
  988. case 2:
  989. default:
  990. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
  991. break;
  992. case 4:
  993. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
  994. break;
  995. }
  996. switch (adev->asic_type) {
  997. case CHIP_BONAIRE:
  998. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  999. switch (reg_offset) {
  1000. case 0:
  1001. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1002. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1003. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  1004. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1005. break;
  1006. case 1:
  1007. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1008. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1009. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  1010. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1011. break;
  1012. case 2:
  1013. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1014. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1015. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  1016. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1017. break;
  1018. case 3:
  1019. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1020. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1021. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  1022. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1023. break;
  1024. case 4:
  1025. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1026. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1027. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1028. TILE_SPLIT(split_equal_to_row_size));
  1029. break;
  1030. case 5:
  1031. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1032. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1033. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1034. break;
  1035. case 6:
  1036. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1037. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1038. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1039. TILE_SPLIT(split_equal_to_row_size));
  1040. break;
  1041. case 7:
  1042. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1043. break;
  1044. case 8:
  1045. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1046. PIPE_CONFIG(ADDR_SURF_P4_16x16));
  1047. break;
  1048. case 9:
  1049. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1050. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1051. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1052. break;
  1053. case 10:
  1054. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1055. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1056. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1057. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1058. break;
  1059. case 11:
  1060. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1061. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1062. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1063. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1064. break;
  1065. case 12:
  1066. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1067. break;
  1068. case 13:
  1069. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1070. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1071. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1072. break;
  1073. case 14:
  1074. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1075. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1076. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1077. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1078. break;
  1079. case 15:
  1080. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  1081. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1082. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1083. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1084. break;
  1085. case 16:
  1086. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1087. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1088. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1089. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1090. break;
  1091. case 17:
  1092. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1093. break;
  1094. case 18:
  1095. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  1096. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1097. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1098. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1099. break;
  1100. case 19:
  1101. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  1102. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1103. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1104. break;
  1105. case 20:
  1106. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  1107. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1108. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1109. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1110. break;
  1111. case 21:
  1112. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  1113. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1114. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1115. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1116. break;
  1117. case 22:
  1118. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  1119. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1120. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1121. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1122. break;
  1123. case 23:
  1124. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1125. break;
  1126. case 24:
  1127. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  1128. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1129. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1130. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1131. break;
  1132. case 25:
  1133. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  1134. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1135. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1136. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1137. break;
  1138. case 26:
  1139. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  1140. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1141. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1142. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1143. break;
  1144. case 27:
  1145. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1146. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1147. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  1148. break;
  1149. case 28:
  1150. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1151. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1152. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1153. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1154. break;
  1155. case 29:
  1156. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1157. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1158. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1159. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1160. break;
  1161. case 30:
  1162. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1163. break;
  1164. default:
  1165. gb_tile_moden = 0;
  1166. break;
  1167. }
  1168. adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
  1169. WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
  1170. }
  1171. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  1172. switch (reg_offset) {
  1173. case 0:
  1174. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1175. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1176. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1177. NUM_BANKS(ADDR_SURF_16_BANK));
  1178. break;
  1179. case 1:
  1180. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1181. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1182. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1183. NUM_BANKS(ADDR_SURF_16_BANK));
  1184. break;
  1185. case 2:
  1186. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1187. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1188. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1189. NUM_BANKS(ADDR_SURF_16_BANK));
  1190. break;
  1191. case 3:
  1192. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1193. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1194. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1195. NUM_BANKS(ADDR_SURF_16_BANK));
  1196. break;
  1197. case 4:
  1198. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1199. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1200. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1201. NUM_BANKS(ADDR_SURF_16_BANK));
  1202. break;
  1203. case 5:
  1204. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1205. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1206. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1207. NUM_BANKS(ADDR_SURF_8_BANK));
  1208. break;
  1209. case 6:
  1210. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1211. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1212. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1213. NUM_BANKS(ADDR_SURF_4_BANK));
  1214. break;
  1215. case 8:
  1216. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  1217. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  1218. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1219. NUM_BANKS(ADDR_SURF_16_BANK));
  1220. break;
  1221. case 9:
  1222. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  1223. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1224. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1225. NUM_BANKS(ADDR_SURF_16_BANK));
  1226. break;
  1227. case 10:
  1228. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1229. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1230. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1231. NUM_BANKS(ADDR_SURF_16_BANK));
  1232. break;
  1233. case 11:
  1234. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1235. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1236. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1237. NUM_BANKS(ADDR_SURF_16_BANK));
  1238. break;
  1239. case 12:
  1240. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1241. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1242. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1243. NUM_BANKS(ADDR_SURF_16_BANK));
  1244. break;
  1245. case 13:
  1246. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1247. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1248. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1249. NUM_BANKS(ADDR_SURF_8_BANK));
  1250. break;
  1251. case 14:
  1252. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1253. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1254. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1255. NUM_BANKS(ADDR_SURF_4_BANK));
  1256. break;
  1257. default:
  1258. gb_tile_moden = 0;
  1259. break;
  1260. }
  1261. adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
  1262. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
  1263. }
  1264. break;
  1265. case CHIP_HAWAII:
  1266. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  1267. switch (reg_offset) {
  1268. case 0:
  1269. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1270. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1271. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  1272. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1273. break;
  1274. case 1:
  1275. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1276. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1277. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  1278. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1279. break;
  1280. case 2:
  1281. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1282. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1283. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  1284. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1285. break;
  1286. case 3:
  1287. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1288. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1289. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  1290. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1291. break;
  1292. case 4:
  1293. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1294. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1295. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1296. TILE_SPLIT(split_equal_to_row_size));
  1297. break;
  1298. case 5:
  1299. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1300. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1301. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1302. TILE_SPLIT(split_equal_to_row_size));
  1303. break;
  1304. case 6:
  1305. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1306. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1307. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1308. TILE_SPLIT(split_equal_to_row_size));
  1309. break;
  1310. case 7:
  1311. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1312. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1313. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1314. TILE_SPLIT(split_equal_to_row_size));
  1315. break;
  1316. case 8:
  1317. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1318. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
  1319. break;
  1320. case 9:
  1321. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1322. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1323. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1324. break;
  1325. case 10:
  1326. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1327. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1328. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1329. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1330. break;
  1331. case 11:
  1332. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1333. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1334. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1335. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1336. break;
  1337. case 12:
  1338. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  1339. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1340. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1341. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1342. break;
  1343. case 13:
  1344. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1345. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1346. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1347. break;
  1348. case 14:
  1349. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1350. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1351. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1352. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1353. break;
  1354. case 15:
  1355. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  1356. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1357. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1358. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1359. break;
  1360. case 16:
  1361. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1362. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1363. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1364. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1365. break;
  1366. case 17:
  1367. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1368. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1369. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1370. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1371. break;
  1372. case 18:
  1373. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  1374. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1375. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1376. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1377. break;
  1378. case 19:
  1379. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  1380. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1381. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
  1382. break;
  1383. case 20:
  1384. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  1385. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1386. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1387. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1388. break;
  1389. case 21:
  1390. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  1391. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1392. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1393. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1394. break;
  1395. case 22:
  1396. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  1397. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1398. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1399. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1400. break;
  1401. case 23:
  1402. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  1403. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1404. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1405. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1406. break;
  1407. case 24:
  1408. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  1409. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1410. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1411. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1412. break;
  1413. case 25:
  1414. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  1415. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1416. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1417. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1418. break;
  1419. case 26:
  1420. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  1421. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1422. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1423. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1424. break;
  1425. case 27:
  1426. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1427. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1428. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  1429. break;
  1430. case 28:
  1431. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1432. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1433. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1434. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1435. break;
  1436. case 29:
  1437. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1438. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  1439. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1440. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1441. break;
  1442. case 30:
  1443. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1444. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  1445. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1446. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1447. break;
  1448. default:
  1449. gb_tile_moden = 0;
  1450. break;
  1451. }
  1452. adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
  1453. WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
  1454. }
  1455. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  1456. switch (reg_offset) {
  1457. case 0:
  1458. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1459. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1460. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1461. NUM_BANKS(ADDR_SURF_16_BANK));
  1462. break;
  1463. case 1:
  1464. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1465. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1466. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1467. NUM_BANKS(ADDR_SURF_16_BANK));
  1468. break;
  1469. case 2:
  1470. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1471. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1472. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1473. NUM_BANKS(ADDR_SURF_16_BANK));
  1474. break;
  1475. case 3:
  1476. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1477. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1478. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1479. NUM_BANKS(ADDR_SURF_16_BANK));
  1480. break;
  1481. case 4:
  1482. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1483. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1484. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1485. NUM_BANKS(ADDR_SURF_8_BANK));
  1486. break;
  1487. case 5:
  1488. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1489. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1490. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1491. NUM_BANKS(ADDR_SURF_4_BANK));
  1492. break;
  1493. case 6:
  1494. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1495. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1496. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1497. NUM_BANKS(ADDR_SURF_4_BANK));
  1498. break;
  1499. case 8:
  1500. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1501. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1502. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1503. NUM_BANKS(ADDR_SURF_16_BANK));
  1504. break;
  1505. case 9:
  1506. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1507. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1508. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1509. NUM_BANKS(ADDR_SURF_16_BANK));
  1510. break;
  1511. case 10:
  1512. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1513. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1514. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1515. NUM_BANKS(ADDR_SURF_16_BANK));
  1516. break;
  1517. case 11:
  1518. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1519. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1520. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1521. NUM_BANKS(ADDR_SURF_8_BANK));
  1522. break;
  1523. case 12:
  1524. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1525. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1526. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1527. NUM_BANKS(ADDR_SURF_16_BANK));
  1528. break;
  1529. case 13:
  1530. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1531. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1532. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1533. NUM_BANKS(ADDR_SURF_8_BANK));
  1534. break;
  1535. case 14:
  1536. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1537. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1538. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  1539. NUM_BANKS(ADDR_SURF_4_BANK));
  1540. break;
  1541. default:
  1542. gb_tile_moden = 0;
  1543. break;
  1544. }
  1545. adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
  1546. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
  1547. }
  1548. break;
  1549. case CHIP_KABINI:
  1550. case CHIP_KAVERI:
  1551. case CHIP_MULLINS:
  1552. default:
  1553. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
  1554. switch (reg_offset) {
  1555. case 0:
  1556. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1557. PIPE_CONFIG(ADDR_SURF_P2) |
  1558. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
  1559. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1560. break;
  1561. case 1:
  1562. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1563. PIPE_CONFIG(ADDR_SURF_P2) |
  1564. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
  1565. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1566. break;
  1567. case 2:
  1568. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1569. PIPE_CONFIG(ADDR_SURF_P2) |
  1570. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
  1571. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1572. break;
  1573. case 3:
  1574. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1575. PIPE_CONFIG(ADDR_SURF_P2) |
  1576. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
  1577. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1578. break;
  1579. case 4:
  1580. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1581. PIPE_CONFIG(ADDR_SURF_P2) |
  1582. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1583. TILE_SPLIT(split_equal_to_row_size));
  1584. break;
  1585. case 5:
  1586. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1587. PIPE_CONFIG(ADDR_SURF_P2) |
  1588. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  1589. break;
  1590. case 6:
  1591. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1592. PIPE_CONFIG(ADDR_SURF_P2) |
  1593. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  1594. TILE_SPLIT(split_equal_to_row_size));
  1595. break;
  1596. case 7:
  1597. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1598. break;
  1599. case 8:
  1600. gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  1601. PIPE_CONFIG(ADDR_SURF_P2));
  1602. break;
  1603. case 9:
  1604. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1605. PIPE_CONFIG(ADDR_SURF_P2) |
  1606. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  1607. break;
  1608. case 10:
  1609. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1610. PIPE_CONFIG(ADDR_SURF_P2) |
  1611. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1612. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1613. break;
  1614. case 11:
  1615. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1616. PIPE_CONFIG(ADDR_SURF_P2) |
  1617. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  1618. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1619. break;
  1620. case 12:
  1621. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1622. break;
  1623. case 13:
  1624. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1625. PIPE_CONFIG(ADDR_SURF_P2) |
  1626. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  1627. break;
  1628. case 14:
  1629. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1630. PIPE_CONFIG(ADDR_SURF_P2) |
  1631. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1632. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1633. break;
  1634. case 15:
  1635. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
  1636. PIPE_CONFIG(ADDR_SURF_P2) |
  1637. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1638. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1639. break;
  1640. case 16:
  1641. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1642. PIPE_CONFIG(ADDR_SURF_P2) |
  1643. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1644. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1645. break;
  1646. case 17:
  1647. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1648. break;
  1649. case 18:
  1650. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  1651. PIPE_CONFIG(ADDR_SURF_P2) |
  1652. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1653. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1654. break;
  1655. case 19:
  1656. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
  1657. PIPE_CONFIG(ADDR_SURF_P2) |
  1658. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
  1659. break;
  1660. case 20:
  1661. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  1662. PIPE_CONFIG(ADDR_SURF_P2) |
  1663. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1664. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1665. break;
  1666. case 21:
  1667. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
  1668. PIPE_CONFIG(ADDR_SURF_P2) |
  1669. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1670. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1671. break;
  1672. case 22:
  1673. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
  1674. PIPE_CONFIG(ADDR_SURF_P2) |
  1675. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1676. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1677. break;
  1678. case 23:
  1679. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1680. break;
  1681. case 24:
  1682. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
  1683. PIPE_CONFIG(ADDR_SURF_P2) |
  1684. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  1685. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1686. break;
  1687. case 25:
  1688. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
  1689. PIPE_CONFIG(ADDR_SURF_P2) |
  1690. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1691. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1692. break;
  1693. case 26:
  1694. gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
  1695. PIPE_CONFIG(ADDR_SURF_P2) |
  1696. MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
  1697. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
  1698. break;
  1699. case 27:
  1700. gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  1701. PIPE_CONFIG(ADDR_SURF_P2) |
  1702. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  1703. break;
  1704. case 28:
  1705. gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  1706. PIPE_CONFIG(ADDR_SURF_P2) |
  1707. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1708. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  1709. break;
  1710. case 29:
  1711. gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  1712. PIPE_CONFIG(ADDR_SURF_P2) |
  1713. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  1714. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
  1715. break;
  1716. case 30:
  1717. gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
  1718. break;
  1719. default:
  1720. gb_tile_moden = 0;
  1721. break;
  1722. }
  1723. adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
  1724. WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
  1725. }
  1726. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
  1727. switch (reg_offset) {
  1728. case 0:
  1729. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1730. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1731. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1732. NUM_BANKS(ADDR_SURF_8_BANK));
  1733. break;
  1734. case 1:
  1735. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1736. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1737. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1738. NUM_BANKS(ADDR_SURF_8_BANK));
  1739. break;
  1740. case 2:
  1741. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1742. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1743. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1744. NUM_BANKS(ADDR_SURF_8_BANK));
  1745. break;
  1746. case 3:
  1747. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1748. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1749. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1750. NUM_BANKS(ADDR_SURF_8_BANK));
  1751. break;
  1752. case 4:
  1753. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1754. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1755. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1756. NUM_BANKS(ADDR_SURF_8_BANK));
  1757. break;
  1758. case 5:
  1759. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1760. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1761. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1762. NUM_BANKS(ADDR_SURF_8_BANK));
  1763. break;
  1764. case 6:
  1765. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1766. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1767. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1768. NUM_BANKS(ADDR_SURF_8_BANK));
  1769. break;
  1770. case 8:
  1771. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  1772. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  1773. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1774. NUM_BANKS(ADDR_SURF_16_BANK));
  1775. break;
  1776. case 9:
  1777. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  1778. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1779. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1780. NUM_BANKS(ADDR_SURF_16_BANK));
  1781. break;
  1782. case 10:
  1783. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  1784. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  1785. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1786. NUM_BANKS(ADDR_SURF_16_BANK));
  1787. break;
  1788. case 11:
  1789. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  1790. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1791. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1792. NUM_BANKS(ADDR_SURF_16_BANK));
  1793. break;
  1794. case 12:
  1795. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1796. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  1797. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1798. NUM_BANKS(ADDR_SURF_16_BANK));
  1799. break;
  1800. case 13:
  1801. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1802. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1803. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  1804. NUM_BANKS(ADDR_SURF_16_BANK));
  1805. break;
  1806. case 14:
  1807. gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  1808. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  1809. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  1810. NUM_BANKS(ADDR_SURF_8_BANK));
  1811. break;
  1812. default:
  1813. gb_tile_moden = 0;
  1814. break;
  1815. }
  1816. adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
  1817. WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
  1818. }
  1819. break;
  1820. }
  1821. }
  1822. /**
  1823. * gfx_v7_0_select_se_sh - select which SE, SH to address
  1824. *
  1825. * @adev: amdgpu_device pointer
  1826. * @se_num: shader engine to address
  1827. * @sh_num: sh block to address
  1828. *
  1829. * Select which SE, SH combinations to address. Certain
  1830. * registers are instanced per SE or SH. 0xffffffff means
  1831. * broadcast to all SEs or SHs (CIK).
  1832. */
  1833. void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
  1834. {
  1835. u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
  1836. if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
  1837. data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
  1838. GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
  1839. else if (se_num == 0xffffffff)
  1840. data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
  1841. (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
  1842. else if (sh_num == 0xffffffff)
  1843. data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
  1844. (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
  1845. else
  1846. data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
  1847. (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
  1848. WREG32(mmGRBM_GFX_INDEX, data);
  1849. }
  1850. /**
  1851. * gfx_v7_0_create_bitmask - create a bitmask
  1852. *
  1853. * @bit_width: length of the mask
  1854. *
  1855. * create a variable length bit mask (CIK).
  1856. * Returns the bitmask.
  1857. */
  1858. static u32 gfx_v7_0_create_bitmask(u32 bit_width)
  1859. {
  1860. u32 i, mask = 0;
  1861. for (i = 0; i < bit_width; i++) {
  1862. mask <<= 1;
  1863. mask |= 1;
  1864. }
  1865. return mask;
  1866. }
  1867. /**
  1868. * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs
  1869. *
  1870. * @adev: amdgpu_device pointer
  1871. * @max_rb_num: max RBs (render backends) for the asic
  1872. * @se_num: number of SEs (shader engines) for the asic
  1873. * @sh_per_se: number of SH blocks per SE for the asic
  1874. *
  1875. * Calculates the bitmask of disabled RBs (CIK).
  1876. * Returns the disabled RB bitmask.
  1877. */
  1878. static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev,
  1879. u32 max_rb_num_per_se,
  1880. u32 sh_per_se)
  1881. {
  1882. u32 data, mask;
  1883. data = RREG32(mmCC_RB_BACKEND_DISABLE);
  1884. if (data & 1)
  1885. data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
  1886. else
  1887. data = 0;
  1888. data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
  1889. data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
  1890. mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se);
  1891. return data & mask;
  1892. }
  1893. /**
  1894. * gfx_v7_0_setup_rb - setup the RBs on the asic
  1895. *
  1896. * @adev: amdgpu_device pointer
  1897. * @se_num: number of SEs (shader engines) for the asic
  1898. * @sh_per_se: number of SH blocks per SE for the asic
  1899. * @max_rb_num: max RBs (render backends) for the asic
  1900. *
  1901. * Configures per-SE/SH RB registers (CIK).
  1902. */
  1903. static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
  1904. u32 se_num, u32 sh_per_se,
  1905. u32 max_rb_num_per_se)
  1906. {
  1907. int i, j;
  1908. u32 data, mask;
  1909. u32 disabled_rbs = 0;
  1910. u32 enabled_rbs = 0;
  1911. mutex_lock(&adev->grbm_idx_mutex);
  1912. for (i = 0; i < se_num; i++) {
  1913. for (j = 0; j < sh_per_se; j++) {
  1914. gfx_v7_0_select_se_sh(adev, i, j);
  1915. data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
  1916. if (adev->asic_type == CHIP_HAWAII)
  1917. disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
  1918. else
  1919. disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
  1920. }
  1921. }
  1922. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  1923. mutex_unlock(&adev->grbm_idx_mutex);
  1924. mask = 1;
  1925. for (i = 0; i < max_rb_num_per_se * se_num; i++) {
  1926. if (!(disabled_rbs & mask))
  1927. enabled_rbs |= mask;
  1928. mask <<= 1;
  1929. }
  1930. adev->gfx.config.backend_enable_mask = enabled_rbs;
  1931. mutex_lock(&adev->grbm_idx_mutex);
  1932. for (i = 0; i < se_num; i++) {
  1933. gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
  1934. data = 0;
  1935. for (j = 0; j < sh_per_se; j++) {
  1936. switch (enabled_rbs & 3) {
  1937. case 0:
  1938. if (j == 0)
  1939. data |= (RASTER_CONFIG_RB_MAP_3 <<
  1940. PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
  1941. else
  1942. data |= (RASTER_CONFIG_RB_MAP_0 <<
  1943. PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
  1944. break;
  1945. case 1:
  1946. data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
  1947. break;
  1948. case 2:
  1949. data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
  1950. break;
  1951. case 3:
  1952. default:
  1953. data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
  1954. break;
  1955. }
  1956. enabled_rbs >>= 2;
  1957. }
  1958. WREG32(mmPA_SC_RASTER_CONFIG, data);
  1959. }
  1960. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  1961. mutex_unlock(&adev->grbm_idx_mutex);
  1962. }
  1963. /**
  1964. * gmc_v7_0_init_compute_vmid - gart enable
  1965. *
  1966. * @rdev: amdgpu_device pointer
  1967. *
  1968. * Initialize compute vmid sh_mem registers
  1969. *
  1970. */
  1971. #define DEFAULT_SH_MEM_BASES (0x6000)
  1972. #define FIRST_COMPUTE_VMID (8)
  1973. #define LAST_COMPUTE_VMID (16)
  1974. static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
  1975. {
  1976. int i;
  1977. uint32_t sh_mem_config;
  1978. uint32_t sh_mem_bases;
  1979. /*
  1980. * Configure apertures:
  1981. * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
  1982. * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
  1983. * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
  1984. */
  1985. sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
  1986. sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
  1987. SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
  1988. sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
  1989. mutex_lock(&adev->srbm_mutex);
  1990. for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
  1991. cik_srbm_select(adev, 0, 0, 0, i);
  1992. /* CP and shaders */
  1993. WREG32(mmSH_MEM_CONFIG, sh_mem_config);
  1994. WREG32(mmSH_MEM_APE1_BASE, 1);
  1995. WREG32(mmSH_MEM_APE1_LIMIT, 0);
  1996. WREG32(mmSH_MEM_BASES, sh_mem_bases);
  1997. }
  1998. cik_srbm_select(adev, 0, 0, 0, 0);
  1999. mutex_unlock(&adev->srbm_mutex);
  2000. }
  2001. /**
  2002. * gfx_v7_0_gpu_init - setup the 3D engine
  2003. *
  2004. * @adev: amdgpu_device pointer
  2005. *
  2006. * Configures the 3D engine and tiling configuration
  2007. * registers so that the 3D engine is usable.
  2008. */
  2009. static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
  2010. {
  2011. u32 gb_addr_config;
  2012. u32 mc_shared_chmap, mc_arb_ramcfg;
  2013. u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
  2014. u32 sh_mem_cfg;
  2015. u32 tmp;
  2016. int i;
  2017. switch (adev->asic_type) {
  2018. case CHIP_BONAIRE:
  2019. adev->gfx.config.max_shader_engines = 2;
  2020. adev->gfx.config.max_tile_pipes = 4;
  2021. adev->gfx.config.max_cu_per_sh = 7;
  2022. adev->gfx.config.max_sh_per_se = 1;
  2023. adev->gfx.config.max_backends_per_se = 2;
  2024. adev->gfx.config.max_texture_channel_caches = 4;
  2025. adev->gfx.config.max_gprs = 256;
  2026. adev->gfx.config.max_gs_threads = 32;
  2027. adev->gfx.config.max_hw_contexts = 8;
  2028. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  2029. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  2030. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  2031. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  2032. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2033. break;
  2034. case CHIP_HAWAII:
  2035. adev->gfx.config.max_shader_engines = 4;
  2036. adev->gfx.config.max_tile_pipes = 16;
  2037. adev->gfx.config.max_cu_per_sh = 11;
  2038. adev->gfx.config.max_sh_per_se = 1;
  2039. adev->gfx.config.max_backends_per_se = 4;
  2040. adev->gfx.config.max_texture_channel_caches = 16;
  2041. adev->gfx.config.max_gprs = 256;
  2042. adev->gfx.config.max_gs_threads = 32;
  2043. adev->gfx.config.max_hw_contexts = 8;
  2044. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  2045. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  2046. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  2047. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  2048. gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
  2049. break;
  2050. case CHIP_KAVERI:
  2051. adev->gfx.config.max_shader_engines = 1;
  2052. adev->gfx.config.max_tile_pipes = 4;
  2053. if ((adev->pdev->device == 0x1304) ||
  2054. (adev->pdev->device == 0x1305) ||
  2055. (adev->pdev->device == 0x130C) ||
  2056. (adev->pdev->device == 0x130F) ||
  2057. (adev->pdev->device == 0x1310) ||
  2058. (adev->pdev->device == 0x1311) ||
  2059. (adev->pdev->device == 0x131C)) {
  2060. adev->gfx.config.max_cu_per_sh = 8;
  2061. adev->gfx.config.max_backends_per_se = 2;
  2062. } else if ((adev->pdev->device == 0x1309) ||
  2063. (adev->pdev->device == 0x130A) ||
  2064. (adev->pdev->device == 0x130D) ||
  2065. (adev->pdev->device == 0x1313) ||
  2066. (adev->pdev->device == 0x131D)) {
  2067. adev->gfx.config.max_cu_per_sh = 6;
  2068. adev->gfx.config.max_backends_per_se = 2;
  2069. } else if ((adev->pdev->device == 0x1306) ||
  2070. (adev->pdev->device == 0x1307) ||
  2071. (adev->pdev->device == 0x130B) ||
  2072. (adev->pdev->device == 0x130E) ||
  2073. (adev->pdev->device == 0x1315) ||
  2074. (adev->pdev->device == 0x131B)) {
  2075. adev->gfx.config.max_cu_per_sh = 4;
  2076. adev->gfx.config.max_backends_per_se = 1;
  2077. } else {
  2078. adev->gfx.config.max_cu_per_sh = 3;
  2079. adev->gfx.config.max_backends_per_se = 1;
  2080. }
  2081. adev->gfx.config.max_sh_per_se = 1;
  2082. adev->gfx.config.max_texture_channel_caches = 4;
  2083. adev->gfx.config.max_gprs = 256;
  2084. adev->gfx.config.max_gs_threads = 16;
  2085. adev->gfx.config.max_hw_contexts = 8;
  2086. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  2087. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  2088. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  2089. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  2090. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2091. break;
  2092. case CHIP_KABINI:
  2093. case CHIP_MULLINS:
  2094. default:
  2095. adev->gfx.config.max_shader_engines = 1;
  2096. adev->gfx.config.max_tile_pipes = 2;
  2097. adev->gfx.config.max_cu_per_sh = 2;
  2098. adev->gfx.config.max_sh_per_se = 1;
  2099. adev->gfx.config.max_backends_per_se = 1;
  2100. adev->gfx.config.max_texture_channel_caches = 2;
  2101. adev->gfx.config.max_gprs = 256;
  2102. adev->gfx.config.max_gs_threads = 16;
  2103. adev->gfx.config.max_hw_contexts = 8;
  2104. adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
  2105. adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
  2106. adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
  2107. adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
  2108. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  2109. break;
  2110. }
  2111. WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
  2112. mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
  2113. adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
  2114. mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
  2115. adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
  2116. adev->gfx.config.mem_max_burst_length_bytes = 256;
  2117. if (adev->flags & AMD_IS_APU) {
  2118. /* Get memory bank mapping mode. */
  2119. tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
  2120. dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
  2121. dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
  2122. tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
  2123. dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
  2124. dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
  2125. /* Validate settings in case only one DIMM installed. */
  2126. if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
  2127. dimm00_addr_map = 0;
  2128. if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
  2129. dimm01_addr_map = 0;
  2130. if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
  2131. dimm10_addr_map = 0;
  2132. if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
  2133. dimm11_addr_map = 0;
  2134. /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
  2135. /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
  2136. if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
  2137. adev->gfx.config.mem_row_size_in_kb = 2;
  2138. else
  2139. adev->gfx.config.mem_row_size_in_kb = 1;
  2140. } else {
  2141. tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
  2142. adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
  2143. if (adev->gfx.config.mem_row_size_in_kb > 4)
  2144. adev->gfx.config.mem_row_size_in_kb = 4;
  2145. }
  2146. /* XXX use MC settings? */
  2147. adev->gfx.config.shader_engine_tile_size = 32;
  2148. adev->gfx.config.num_gpus = 1;
  2149. adev->gfx.config.multi_gpu_tile_size = 64;
  2150. /* fix up row size */
  2151. gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
  2152. switch (adev->gfx.config.mem_row_size_in_kb) {
  2153. case 1:
  2154. default:
  2155. gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
  2156. break;
  2157. case 2:
  2158. gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
  2159. break;
  2160. case 4:
  2161. gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
  2162. break;
  2163. }
  2164. adev->gfx.config.gb_addr_config = gb_addr_config;
  2165. WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
  2166. WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
  2167. WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
  2168. WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
  2169. WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
  2170. WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
  2171. WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
  2172. WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
  2173. gfx_v7_0_tiling_mode_table_init(adev);
  2174. gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
  2175. adev->gfx.config.max_sh_per_se,
  2176. adev->gfx.config.max_backends_per_se);
  2177. /* set HW defaults for 3D engine */
  2178. WREG32(mmCP_MEQ_THRESHOLDS,
  2179. (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
  2180. (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
  2181. mutex_lock(&adev->grbm_idx_mutex);
  2182. /*
  2183. * making sure that the following register writes will be broadcasted
  2184. * to all the shaders
  2185. */
  2186. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  2187. /* XXX SH_MEM regs */
  2188. /* where to put LDS, scratch, GPUVM in FSA64 space */
  2189. sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
  2190. SH_MEM_ALIGNMENT_MODE_UNALIGNED);
  2191. mutex_lock(&adev->srbm_mutex);
  2192. for (i = 0; i < 16; i++) {
  2193. cik_srbm_select(adev, 0, 0, 0, i);
  2194. /* CP and shaders */
  2195. WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
  2196. WREG32(mmSH_MEM_APE1_BASE, 1);
  2197. WREG32(mmSH_MEM_APE1_LIMIT, 0);
  2198. WREG32(mmSH_MEM_BASES, 0);
  2199. }
  2200. cik_srbm_select(adev, 0, 0, 0, 0);
  2201. mutex_unlock(&adev->srbm_mutex);
  2202. gmc_v7_0_init_compute_vmid(adev);
  2203. WREG32(mmSX_DEBUG_1, 0x20);
  2204. WREG32(mmTA_CNTL_AUX, 0x00010000);
  2205. tmp = RREG32(mmSPI_CONFIG_CNTL);
  2206. tmp |= 0x03000000;
  2207. WREG32(mmSPI_CONFIG_CNTL, tmp);
  2208. WREG32(mmSQ_CONFIG, 1);
  2209. WREG32(mmDB_DEBUG, 0);
  2210. tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
  2211. tmp |= 0x00000400;
  2212. WREG32(mmDB_DEBUG2, tmp);
  2213. tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
  2214. tmp |= 0x00020200;
  2215. WREG32(mmDB_DEBUG3, tmp);
  2216. tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
  2217. tmp |= 0x00018208;
  2218. WREG32(mmCB_HW_CONTROL, tmp);
  2219. WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
  2220. WREG32(mmPA_SC_FIFO_SIZE,
  2221. ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
  2222. (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
  2223. (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
  2224. (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
  2225. WREG32(mmVGT_NUM_INSTANCES, 1);
  2226. WREG32(mmCP_PERFMON_CNTL, 0);
  2227. WREG32(mmSQ_CONFIG, 0);
  2228. WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
  2229. ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
  2230. (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
  2231. WREG32(mmVGT_CACHE_INVALIDATION,
  2232. (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
  2233. (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
  2234. WREG32(mmVGT_GS_VERTEX_REUSE, 16);
  2235. WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
  2236. WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
  2237. (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
  2238. WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
  2239. mutex_unlock(&adev->grbm_idx_mutex);
  2240. udelay(50);
  2241. }
  2242. /*
  2243. * GPU scratch registers helpers function.
  2244. */
  2245. /**
  2246. * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
  2247. *
  2248. * @adev: amdgpu_device pointer
  2249. *
  2250. * Set up the number and offset of the CP scratch registers.
  2251. * NOTE: use of CP scratch registers is a legacy inferface and
  2252. * is not used by default on newer asics (r6xx+). On newer asics,
  2253. * memory buffers are used for fences rather than scratch regs.
  2254. */
  2255. static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
  2256. {
  2257. int i;
  2258. adev->gfx.scratch.num_reg = 7;
  2259. adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
  2260. for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
  2261. adev->gfx.scratch.free[i] = true;
  2262. adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
  2263. }
  2264. }
  2265. /**
  2266. * gfx_v7_0_ring_test_ring - basic gfx ring test
  2267. *
  2268. * @adev: amdgpu_device pointer
  2269. * @ring: amdgpu_ring structure holding ring information
  2270. *
  2271. * Allocate a scratch register and write to it using the gfx ring (CIK).
  2272. * Provides a basic gfx ring test to verify that the ring is working.
  2273. * Used by gfx_v7_0_cp_gfx_resume();
  2274. * Returns 0 on success, error on failure.
  2275. */
  2276. static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
  2277. {
  2278. struct amdgpu_device *adev = ring->adev;
  2279. uint32_t scratch;
  2280. uint32_t tmp = 0;
  2281. unsigned i;
  2282. int r;
  2283. r = amdgpu_gfx_scratch_get(adev, &scratch);
  2284. if (r) {
  2285. DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
  2286. return r;
  2287. }
  2288. WREG32(scratch, 0xCAFEDEAD);
  2289. r = amdgpu_ring_lock(ring, 3);
  2290. if (r) {
  2291. DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
  2292. amdgpu_gfx_scratch_free(adev, scratch);
  2293. return r;
  2294. }
  2295. amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  2296. amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
  2297. amdgpu_ring_write(ring, 0xDEADBEEF);
  2298. amdgpu_ring_unlock_commit(ring);
  2299. for (i = 0; i < adev->usec_timeout; i++) {
  2300. tmp = RREG32(scratch);
  2301. if (tmp == 0xDEADBEEF)
  2302. break;
  2303. DRM_UDELAY(1);
  2304. }
  2305. if (i < adev->usec_timeout) {
  2306. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  2307. } else {
  2308. DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
  2309. ring->idx, scratch, tmp);
  2310. r = -EINVAL;
  2311. }
  2312. amdgpu_gfx_scratch_free(adev, scratch);
  2313. return r;
  2314. }
  2315. /**
  2316. * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
  2317. *
  2318. * @adev: amdgpu_device pointer
  2319. * @ridx: amdgpu ring index
  2320. *
  2321. * Emits an hdp flush on the cp.
  2322. */
  2323. static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
  2324. {
  2325. u32 ref_and_mask;
  2326. int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
  2327. if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
  2328. switch (ring->me) {
  2329. case 1:
  2330. ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
  2331. break;
  2332. case 2:
  2333. ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
  2334. break;
  2335. default:
  2336. return;
  2337. }
  2338. } else {
  2339. ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
  2340. }
  2341. amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
  2342. amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
  2343. WAIT_REG_MEM_FUNCTION(3) | /* == */
  2344. WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
  2345. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
  2346. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
  2347. amdgpu_ring_write(ring, ref_and_mask);
  2348. amdgpu_ring_write(ring, ref_and_mask);
  2349. amdgpu_ring_write(ring, 0x20); /* poll interval */
  2350. }
  2351. /**
  2352. * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
  2353. *
  2354. * @adev: amdgpu_device pointer
  2355. * @fence: amdgpu fence object
  2356. *
  2357. * Emits a fence sequnce number on the gfx ring and flushes
  2358. * GPU caches.
  2359. */
  2360. static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
  2361. u64 seq, unsigned flags)
  2362. {
  2363. bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  2364. bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
  2365. /* Workaround for cache flush problems. First send a dummy EOP
  2366. * event down the pipe with seq one below.
  2367. */
  2368. amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  2369. amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
  2370. EOP_TC_ACTION_EN |
  2371. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  2372. EVENT_INDEX(5)));
  2373. amdgpu_ring_write(ring, addr & 0xfffffffc);
  2374. amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
  2375. DATA_SEL(1) | INT_SEL(0));
  2376. amdgpu_ring_write(ring, lower_32_bits(seq - 1));
  2377. amdgpu_ring_write(ring, upper_32_bits(seq - 1));
  2378. /* Then send the real EOP event down the pipe. */
  2379. amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  2380. amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
  2381. EOP_TC_ACTION_EN |
  2382. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  2383. EVENT_INDEX(5)));
  2384. amdgpu_ring_write(ring, addr & 0xfffffffc);
  2385. amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
  2386. DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
  2387. amdgpu_ring_write(ring, lower_32_bits(seq));
  2388. amdgpu_ring_write(ring, upper_32_bits(seq));
  2389. }
  2390. /**
  2391. * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
  2392. *
  2393. * @adev: amdgpu_device pointer
  2394. * @fence: amdgpu fence object
  2395. *
  2396. * Emits a fence sequnce number on the compute ring and flushes
  2397. * GPU caches.
  2398. */
  2399. static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  2400. u64 addr, u64 seq,
  2401. unsigned flags)
  2402. {
  2403. bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  2404. bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
  2405. /* RELEASE_MEM - flush caches, send int */
  2406. amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
  2407. amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
  2408. EOP_TC_ACTION_EN |
  2409. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  2410. EVENT_INDEX(5)));
  2411. amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
  2412. amdgpu_ring_write(ring, addr & 0xfffffffc);
  2413. amdgpu_ring_write(ring, upper_32_bits(addr));
  2414. amdgpu_ring_write(ring, lower_32_bits(seq));
  2415. amdgpu_ring_write(ring, upper_32_bits(seq));
  2416. }
  2417. /**
  2418. * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring
  2419. *
  2420. * @ring: amdgpu ring buffer object
  2421. * @semaphore: amdgpu semaphore object
  2422. * @emit_wait: Is this a sempahore wait?
  2423. *
  2424. * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
  2425. * from running ahead of semaphore waits.
  2426. */
  2427. static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
  2428. struct amdgpu_semaphore *semaphore,
  2429. bool emit_wait)
  2430. {
  2431. uint64_t addr = semaphore->gpu_addr;
  2432. unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
  2433. amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
  2434. amdgpu_ring_write(ring, addr & 0xffffffff);
  2435. amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
  2436. if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
  2437. /* Prevent the PFP from running ahead of the semaphore wait */
  2438. amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  2439. amdgpu_ring_write(ring, 0x0);
  2440. }
  2441. return true;
  2442. }
  2443. /*
  2444. * IB stuff
  2445. */
  2446. /**
  2447. * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
  2448. *
  2449. * @ring: amdgpu_ring structure holding ring information
  2450. * @ib: amdgpu indirect buffer object
  2451. *
  2452. * Emits an DE (drawing engine) or CE (constant engine) IB
  2453. * on the gfx ring. IBs are usually generated by userspace
  2454. * acceleration drivers and submitted to the kernel for
  2455. * sheduling on the ring. This function schedules the IB
  2456. * on the gfx ring for execution by the GPU.
  2457. */
  2458. static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
  2459. struct amdgpu_ib *ib)
  2460. {
  2461. bool need_ctx_switch = ring->current_ctx != ib->ctx;
  2462. u32 header, control = 0;
  2463. u32 next_rptr = ring->wptr + 5;
  2464. /* drop the CE preamble IB for the same context */
  2465. if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
  2466. return;
  2467. if (need_ctx_switch)
  2468. next_rptr += 2;
  2469. next_rptr += 4;
  2470. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  2471. amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
  2472. amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  2473. amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
  2474. amdgpu_ring_write(ring, next_rptr);
  2475. /* insert SWITCH_BUFFER packet before first IB in the ring frame */
  2476. if (need_ctx_switch) {
  2477. amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  2478. amdgpu_ring_write(ring, 0);
  2479. }
  2480. if (ib->flags & AMDGPU_IB_FLAG_CE)
  2481. header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
  2482. else
  2483. header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
  2484. control |= ib->length_dw |
  2485. (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
  2486. amdgpu_ring_write(ring, header);
  2487. amdgpu_ring_write(ring,
  2488. #ifdef __BIG_ENDIAN
  2489. (2 << 0) |
  2490. #endif
  2491. (ib->gpu_addr & 0xFFFFFFFC));
  2492. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  2493. amdgpu_ring_write(ring, control);
  2494. }
  2495. static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
  2496. struct amdgpu_ib *ib)
  2497. {
  2498. u32 header, control = 0;
  2499. u32 next_rptr = ring->wptr + 5;
  2500. control |= INDIRECT_BUFFER_VALID;
  2501. next_rptr += 4;
  2502. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  2503. amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
  2504. amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  2505. amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
  2506. amdgpu_ring_write(ring, next_rptr);
  2507. header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
  2508. control |= ib->length_dw |
  2509. (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
  2510. amdgpu_ring_write(ring, header);
  2511. amdgpu_ring_write(ring,
  2512. #ifdef __BIG_ENDIAN
  2513. (2 << 0) |
  2514. #endif
  2515. (ib->gpu_addr & 0xFFFFFFFC));
  2516. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  2517. amdgpu_ring_write(ring, control);
  2518. }
  2519. /**
  2520. * gfx_v7_0_ring_test_ib - basic ring IB test
  2521. *
  2522. * @ring: amdgpu_ring structure holding ring information
  2523. *
  2524. * Allocate an IB and execute it on the gfx ring (CIK).
  2525. * Provides a basic gfx ring test to verify that IBs are working.
  2526. * Returns 0 on success, error on failure.
  2527. */
  2528. static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
  2529. {
  2530. struct amdgpu_device *adev = ring->adev;
  2531. struct amdgpu_ib ib;
  2532. struct fence *f = NULL;
  2533. uint32_t scratch;
  2534. uint32_t tmp = 0;
  2535. unsigned i;
  2536. int r;
  2537. r = amdgpu_gfx_scratch_get(adev, &scratch);
  2538. if (r) {
  2539. DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
  2540. return r;
  2541. }
  2542. WREG32(scratch, 0xCAFEDEAD);
  2543. memset(&ib, 0, sizeof(ib));
  2544. r = amdgpu_ib_get(ring, NULL, 256, &ib);
  2545. if (r) {
  2546. DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
  2547. goto err1;
  2548. }
  2549. ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
  2550. ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
  2551. ib.ptr[2] = 0xDEADBEEF;
  2552. ib.length_dw = 3;
  2553. r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
  2554. AMDGPU_FENCE_OWNER_UNDEFINED,
  2555. &f);
  2556. if (r)
  2557. goto err2;
  2558. r = fence_wait(f, false);
  2559. if (r) {
  2560. DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
  2561. goto err2;
  2562. }
  2563. for (i = 0; i < adev->usec_timeout; i++) {
  2564. tmp = RREG32(scratch);
  2565. if (tmp == 0xDEADBEEF)
  2566. break;
  2567. DRM_UDELAY(1);
  2568. }
  2569. if (i < adev->usec_timeout) {
  2570. DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
  2571. ring->idx, i);
  2572. goto err2;
  2573. } else {
  2574. DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
  2575. scratch, tmp);
  2576. r = -EINVAL;
  2577. }
  2578. err2:
  2579. fence_put(f);
  2580. amdgpu_ib_free(adev, &ib);
  2581. err1:
  2582. amdgpu_gfx_scratch_free(adev, scratch);
  2583. return r;
  2584. }
  2585. /*
  2586. * CP.
  2587. * On CIK, gfx and compute now have independant command processors.
  2588. *
  2589. * GFX
  2590. * Gfx consists of a single ring and can process both gfx jobs and
  2591. * compute jobs. The gfx CP consists of three microengines (ME):
  2592. * PFP - Pre-Fetch Parser
  2593. * ME - Micro Engine
  2594. * CE - Constant Engine
  2595. * The PFP and ME make up what is considered the Drawing Engine (DE).
  2596. * The CE is an asynchronous engine used for updating buffer desciptors
  2597. * used by the DE so that they can be loaded into cache in parallel
  2598. * while the DE is processing state update packets.
  2599. *
  2600. * Compute
  2601. * The compute CP consists of two microengines (ME):
  2602. * MEC1 - Compute MicroEngine 1
  2603. * MEC2 - Compute MicroEngine 2
  2604. * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
  2605. * The queues are exposed to userspace and are programmed directly
  2606. * by the compute runtime.
  2607. */
  2608. /**
  2609. * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
  2610. *
  2611. * @adev: amdgpu_device pointer
  2612. * @enable: enable or disable the MEs
  2613. *
  2614. * Halts or unhalts the gfx MEs.
  2615. */
  2616. static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
  2617. {
  2618. int i;
  2619. if (enable) {
  2620. WREG32(mmCP_ME_CNTL, 0);
  2621. } else {
  2622. WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
  2623. for (i = 0; i < adev->gfx.num_gfx_rings; i++)
  2624. adev->gfx.gfx_ring[i].ready = false;
  2625. }
  2626. udelay(50);
  2627. }
  2628. /**
  2629. * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
  2630. *
  2631. * @adev: amdgpu_device pointer
  2632. *
  2633. * Loads the gfx PFP, ME, and CE ucode.
  2634. * Returns 0 for success, -EINVAL if the ucode is not available.
  2635. */
  2636. static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
  2637. {
  2638. const struct gfx_firmware_header_v1_0 *pfp_hdr;
  2639. const struct gfx_firmware_header_v1_0 *ce_hdr;
  2640. const struct gfx_firmware_header_v1_0 *me_hdr;
  2641. const __le32 *fw_data;
  2642. unsigned i, fw_size;
  2643. if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
  2644. return -EINVAL;
  2645. pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
  2646. ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
  2647. me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
  2648. amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
  2649. amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
  2650. amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
  2651. adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
  2652. adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
  2653. adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
  2654. adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
  2655. adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
  2656. adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
  2657. gfx_v7_0_cp_gfx_enable(adev, false);
  2658. /* PFP */
  2659. fw_data = (const __le32 *)
  2660. (adev->gfx.pfp_fw->data +
  2661. le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
  2662. fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
  2663. WREG32(mmCP_PFP_UCODE_ADDR, 0);
  2664. for (i = 0; i < fw_size; i++)
  2665. WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
  2666. WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
  2667. /* CE */
  2668. fw_data = (const __le32 *)
  2669. (adev->gfx.ce_fw->data +
  2670. le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
  2671. fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
  2672. WREG32(mmCP_CE_UCODE_ADDR, 0);
  2673. for (i = 0; i < fw_size; i++)
  2674. WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
  2675. WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
  2676. /* ME */
  2677. fw_data = (const __le32 *)
  2678. (adev->gfx.me_fw->data +
  2679. le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
  2680. fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
  2681. WREG32(mmCP_ME_RAM_WADDR, 0);
  2682. for (i = 0; i < fw_size; i++)
  2683. WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
  2684. WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
  2685. return 0;
  2686. }
  2687. /**
  2688. * gfx_v7_0_cp_gfx_start - start the gfx ring
  2689. *
  2690. * @adev: amdgpu_device pointer
  2691. *
  2692. * Enables the ring and loads the clear state context and other
  2693. * packets required to init the ring.
  2694. * Returns 0 for success, error for failure.
  2695. */
  2696. static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
  2697. {
  2698. struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
  2699. const struct cs_section_def *sect = NULL;
  2700. const struct cs_extent_def *ext = NULL;
  2701. int r, i;
  2702. /* init the CP */
  2703. WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
  2704. WREG32(mmCP_ENDIAN_SWAP, 0);
  2705. WREG32(mmCP_DEVICE_ID, 1);
  2706. gfx_v7_0_cp_gfx_enable(adev, true);
  2707. r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8);
  2708. if (r) {
  2709. DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
  2710. return r;
  2711. }
  2712. /* init the CE partitions. CE only used for gfx on CIK */
  2713. amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
  2714. amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
  2715. amdgpu_ring_write(ring, 0x8000);
  2716. amdgpu_ring_write(ring, 0x8000);
  2717. /* clear state buffer */
  2718. amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  2719. amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  2720. amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  2721. amdgpu_ring_write(ring, 0x80000000);
  2722. amdgpu_ring_write(ring, 0x80000000);
  2723. for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
  2724. for (ext = sect->section; ext->extent != NULL; ++ext) {
  2725. if (sect->id == SECT_CONTEXT) {
  2726. amdgpu_ring_write(ring,
  2727. PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
  2728. amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
  2729. for (i = 0; i < ext->reg_count; i++)
  2730. amdgpu_ring_write(ring, ext->extent[i]);
  2731. }
  2732. }
  2733. }
  2734. amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  2735. amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
  2736. switch (adev->asic_type) {
  2737. case CHIP_BONAIRE:
  2738. amdgpu_ring_write(ring, 0x16000012);
  2739. amdgpu_ring_write(ring, 0x00000000);
  2740. break;
  2741. case CHIP_KAVERI:
  2742. amdgpu_ring_write(ring, 0x00000000); /* XXX */
  2743. amdgpu_ring_write(ring, 0x00000000);
  2744. break;
  2745. case CHIP_KABINI:
  2746. case CHIP_MULLINS:
  2747. amdgpu_ring_write(ring, 0x00000000); /* XXX */
  2748. amdgpu_ring_write(ring, 0x00000000);
  2749. break;
  2750. case CHIP_HAWAII:
  2751. amdgpu_ring_write(ring, 0x3a00161a);
  2752. amdgpu_ring_write(ring, 0x0000002e);
  2753. break;
  2754. default:
  2755. amdgpu_ring_write(ring, 0x00000000);
  2756. amdgpu_ring_write(ring, 0x00000000);
  2757. break;
  2758. }
  2759. amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  2760. amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  2761. amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  2762. amdgpu_ring_write(ring, 0);
  2763. amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  2764. amdgpu_ring_write(ring, 0x00000316);
  2765. amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
  2766. amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
  2767. amdgpu_ring_unlock_commit(ring);
  2768. return 0;
  2769. }
  2770. /**
  2771. * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
  2772. *
  2773. * @adev: amdgpu_device pointer
  2774. *
  2775. * Program the location and size of the gfx ring buffer
  2776. * and test it to make sure it's working.
  2777. * Returns 0 for success, error for failure.
  2778. */
  2779. static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
  2780. {
  2781. struct amdgpu_ring *ring;
  2782. u32 tmp;
  2783. u32 rb_bufsz;
  2784. u64 rb_addr, rptr_addr;
  2785. int r;
  2786. WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
  2787. if (adev->asic_type != CHIP_HAWAII)
  2788. WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
  2789. /* Set the write pointer delay */
  2790. WREG32(mmCP_RB_WPTR_DELAY, 0);
  2791. /* set the RB to use vmid 0 */
  2792. WREG32(mmCP_RB_VMID, 0);
  2793. WREG32(mmSCRATCH_ADDR, 0);
  2794. /* ring 0 - compute and gfx */
  2795. /* Set ring buffer size */
  2796. ring = &adev->gfx.gfx_ring[0];
  2797. rb_bufsz = order_base_2(ring->ring_size / 8);
  2798. tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  2799. #ifdef __BIG_ENDIAN
  2800. tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
  2801. #endif
  2802. WREG32(mmCP_RB0_CNTL, tmp);
  2803. /* Initialize the ring buffer's read and write pointers */
  2804. WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
  2805. ring->wptr = 0;
  2806. WREG32(mmCP_RB0_WPTR, ring->wptr);
  2807. /* set the wb address wether it's enabled or not */
  2808. rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
  2809. WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
  2810. WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
  2811. /* scratch register shadowing is no longer supported */
  2812. WREG32(mmSCRATCH_UMSK, 0);
  2813. mdelay(1);
  2814. WREG32(mmCP_RB0_CNTL, tmp);
  2815. rb_addr = ring->gpu_addr >> 8;
  2816. WREG32(mmCP_RB0_BASE, rb_addr);
  2817. WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
  2818. /* start the ring */
  2819. gfx_v7_0_cp_gfx_start(adev);
  2820. ring->ready = true;
  2821. r = amdgpu_ring_test_ring(ring);
  2822. if (r) {
  2823. ring->ready = false;
  2824. return r;
  2825. }
  2826. return 0;
  2827. }
  2828. static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
  2829. {
  2830. u32 rptr;
  2831. rptr = ring->adev->wb.wb[ring->rptr_offs];
  2832. return rptr;
  2833. }
  2834. static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
  2835. {
  2836. struct amdgpu_device *adev = ring->adev;
  2837. u32 wptr;
  2838. wptr = RREG32(mmCP_RB0_WPTR);
  2839. return wptr;
  2840. }
  2841. static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
  2842. {
  2843. struct amdgpu_device *adev = ring->adev;
  2844. WREG32(mmCP_RB0_WPTR, ring->wptr);
  2845. (void)RREG32(mmCP_RB0_WPTR);
  2846. }
  2847. static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
  2848. {
  2849. u32 rptr;
  2850. rptr = ring->adev->wb.wb[ring->rptr_offs];
  2851. return rptr;
  2852. }
  2853. static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
  2854. {
  2855. u32 wptr;
  2856. /* XXX check if swapping is necessary on BE */
  2857. wptr = ring->adev->wb.wb[ring->wptr_offs];
  2858. return wptr;
  2859. }
  2860. static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
  2861. {
  2862. struct amdgpu_device *adev = ring->adev;
  2863. /* XXX check if swapping is necessary on BE */
  2864. adev->wb.wb[ring->wptr_offs] = ring->wptr;
  2865. WDOORBELL32(ring->doorbell_index, ring->wptr);
  2866. }
  2867. /**
  2868. * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
  2869. *
  2870. * @adev: amdgpu_device pointer
  2871. * @enable: enable or disable the MEs
  2872. *
  2873. * Halts or unhalts the compute MEs.
  2874. */
  2875. static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
  2876. {
  2877. int i;
  2878. if (enable) {
  2879. WREG32(mmCP_MEC_CNTL, 0);
  2880. } else {
  2881. WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
  2882. for (i = 0; i < adev->gfx.num_compute_rings; i++)
  2883. adev->gfx.compute_ring[i].ready = false;
  2884. }
  2885. udelay(50);
  2886. }
  2887. /**
  2888. * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
  2889. *
  2890. * @adev: amdgpu_device pointer
  2891. *
  2892. * Loads the compute MEC1&2 ucode.
  2893. * Returns 0 for success, -EINVAL if the ucode is not available.
  2894. */
  2895. static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
  2896. {
  2897. const struct gfx_firmware_header_v1_0 *mec_hdr;
  2898. const __le32 *fw_data;
  2899. unsigned i, fw_size;
  2900. if (!adev->gfx.mec_fw)
  2901. return -EINVAL;
  2902. mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
  2903. amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
  2904. adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
  2905. adev->gfx.mec_feature_version = le32_to_cpu(
  2906. mec_hdr->ucode_feature_version);
  2907. gfx_v7_0_cp_compute_enable(adev, false);
  2908. /* MEC1 */
  2909. fw_data = (const __le32 *)
  2910. (adev->gfx.mec_fw->data +
  2911. le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
  2912. fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
  2913. WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
  2914. for (i = 0; i < fw_size; i++)
  2915. WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
  2916. WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
  2917. if (adev->asic_type == CHIP_KAVERI) {
  2918. const struct gfx_firmware_header_v1_0 *mec2_hdr;
  2919. if (!adev->gfx.mec2_fw)
  2920. return -EINVAL;
  2921. mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
  2922. amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
  2923. adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
  2924. adev->gfx.mec2_feature_version = le32_to_cpu(
  2925. mec2_hdr->ucode_feature_version);
  2926. /* MEC2 */
  2927. fw_data = (const __le32 *)
  2928. (adev->gfx.mec2_fw->data +
  2929. le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
  2930. fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
  2931. WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
  2932. for (i = 0; i < fw_size; i++)
  2933. WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
  2934. WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
  2935. }
  2936. return 0;
  2937. }
  2938. /**
  2939. * gfx_v7_0_cp_compute_start - start the compute queues
  2940. *
  2941. * @adev: amdgpu_device pointer
  2942. *
  2943. * Enable the compute queues.
  2944. * Returns 0 for success, error for failure.
  2945. */
  2946. static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev)
  2947. {
  2948. gfx_v7_0_cp_compute_enable(adev, true);
  2949. return 0;
  2950. }
  2951. /**
  2952. * gfx_v7_0_cp_compute_fini - stop the compute queues
  2953. *
  2954. * @adev: amdgpu_device pointer
  2955. *
  2956. * Stop the compute queues and tear down the driver queue
  2957. * info.
  2958. */
  2959. static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
  2960. {
  2961. int i, r;
  2962. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  2963. struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
  2964. if (ring->mqd_obj) {
  2965. r = amdgpu_bo_reserve(ring->mqd_obj, false);
  2966. if (unlikely(r != 0))
  2967. dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
  2968. amdgpu_bo_unpin(ring->mqd_obj);
  2969. amdgpu_bo_unreserve(ring->mqd_obj);
  2970. amdgpu_bo_unref(&ring->mqd_obj);
  2971. ring->mqd_obj = NULL;
  2972. }
  2973. }
  2974. }
  2975. static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
  2976. {
  2977. int r;
  2978. if (adev->gfx.mec.hpd_eop_obj) {
  2979. r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
  2980. if (unlikely(r != 0))
  2981. dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
  2982. amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
  2983. amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
  2984. amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
  2985. adev->gfx.mec.hpd_eop_obj = NULL;
  2986. }
  2987. }
  2988. #define MEC_HPD_SIZE 2048
  2989. static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
  2990. {
  2991. int r;
  2992. u32 *hpd;
  2993. /*
  2994. * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
  2995. * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
  2996. * Nonetheless, we assign only 1 pipe because all other pipes will
  2997. * be handled by KFD
  2998. */
  2999. adev->gfx.mec.num_mec = 1;
  3000. adev->gfx.mec.num_pipe = 1;
  3001. adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
  3002. if (adev->gfx.mec.hpd_eop_obj == NULL) {
  3003. r = amdgpu_bo_create(adev,
  3004. adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
  3005. PAGE_SIZE, true,
  3006. AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  3007. &adev->gfx.mec.hpd_eop_obj);
  3008. if (r) {
  3009. dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
  3010. return r;
  3011. }
  3012. }
  3013. r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
  3014. if (unlikely(r != 0)) {
  3015. gfx_v7_0_mec_fini(adev);
  3016. return r;
  3017. }
  3018. r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
  3019. &adev->gfx.mec.hpd_eop_gpu_addr);
  3020. if (r) {
  3021. dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
  3022. gfx_v7_0_mec_fini(adev);
  3023. return r;
  3024. }
  3025. r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
  3026. if (r) {
  3027. dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
  3028. gfx_v7_0_mec_fini(adev);
  3029. return r;
  3030. }
  3031. /* clear memory. Not sure if this is required or not */
  3032. memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
  3033. amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
  3034. amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
  3035. return 0;
  3036. }
  3037. struct hqd_registers
  3038. {
  3039. u32 cp_mqd_base_addr;
  3040. u32 cp_mqd_base_addr_hi;
  3041. u32 cp_hqd_active;
  3042. u32 cp_hqd_vmid;
  3043. u32 cp_hqd_persistent_state;
  3044. u32 cp_hqd_pipe_priority;
  3045. u32 cp_hqd_queue_priority;
  3046. u32 cp_hqd_quantum;
  3047. u32 cp_hqd_pq_base;
  3048. u32 cp_hqd_pq_base_hi;
  3049. u32 cp_hqd_pq_rptr;
  3050. u32 cp_hqd_pq_rptr_report_addr;
  3051. u32 cp_hqd_pq_rptr_report_addr_hi;
  3052. u32 cp_hqd_pq_wptr_poll_addr;
  3053. u32 cp_hqd_pq_wptr_poll_addr_hi;
  3054. u32 cp_hqd_pq_doorbell_control;
  3055. u32 cp_hqd_pq_wptr;
  3056. u32 cp_hqd_pq_control;
  3057. u32 cp_hqd_ib_base_addr;
  3058. u32 cp_hqd_ib_base_addr_hi;
  3059. u32 cp_hqd_ib_rptr;
  3060. u32 cp_hqd_ib_control;
  3061. u32 cp_hqd_iq_timer;
  3062. u32 cp_hqd_iq_rptr;
  3063. u32 cp_hqd_dequeue_request;
  3064. u32 cp_hqd_dma_offload;
  3065. u32 cp_hqd_sema_cmd;
  3066. u32 cp_hqd_msg_type;
  3067. u32 cp_hqd_atomic0_preop_lo;
  3068. u32 cp_hqd_atomic0_preop_hi;
  3069. u32 cp_hqd_atomic1_preop_lo;
  3070. u32 cp_hqd_atomic1_preop_hi;
  3071. u32 cp_hqd_hq_scheduler0;
  3072. u32 cp_hqd_hq_scheduler1;
  3073. u32 cp_mqd_control;
  3074. };
  3075. struct bonaire_mqd
  3076. {
  3077. u32 header;
  3078. u32 dispatch_initiator;
  3079. u32 dimensions[3];
  3080. u32 start_idx[3];
  3081. u32 num_threads[3];
  3082. u32 pipeline_stat_enable;
  3083. u32 perf_counter_enable;
  3084. u32 pgm[2];
  3085. u32 tba[2];
  3086. u32 tma[2];
  3087. u32 pgm_rsrc[2];
  3088. u32 vmid;
  3089. u32 resource_limits;
  3090. u32 static_thread_mgmt01[2];
  3091. u32 tmp_ring_size;
  3092. u32 static_thread_mgmt23[2];
  3093. u32 restart[3];
  3094. u32 thread_trace_enable;
  3095. u32 reserved1;
  3096. u32 user_data[16];
  3097. u32 vgtcs_invoke_count[2];
  3098. struct hqd_registers queue_state;
  3099. u32 dequeue_cntr;
  3100. u32 interrupt_queue[64];
  3101. };
  3102. /**
  3103. * gfx_v7_0_cp_compute_resume - setup the compute queue registers
  3104. *
  3105. * @adev: amdgpu_device pointer
  3106. *
  3107. * Program the compute queues and test them to make sure they
  3108. * are working.
  3109. * Returns 0 for success, error for failure.
  3110. */
  3111. static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
  3112. {
  3113. int r, i, j;
  3114. u32 tmp;
  3115. bool use_doorbell = true;
  3116. u64 hqd_gpu_addr;
  3117. u64 mqd_gpu_addr;
  3118. u64 eop_gpu_addr;
  3119. u64 wb_gpu_addr;
  3120. u32 *buf;
  3121. struct bonaire_mqd *mqd;
  3122. r = gfx_v7_0_cp_compute_start(adev);
  3123. if (r)
  3124. return r;
  3125. /* fix up chicken bits */
  3126. tmp = RREG32(mmCP_CPF_DEBUG);
  3127. tmp |= (1 << 23);
  3128. WREG32(mmCP_CPF_DEBUG, tmp);
  3129. /* init the pipes */
  3130. mutex_lock(&adev->srbm_mutex);
  3131. for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
  3132. int me = (i < 4) ? 1 : 2;
  3133. int pipe = (i < 4) ? i : (i - 4);
  3134. eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
  3135. cik_srbm_select(adev, me, pipe, 0, 0);
  3136. /* write the EOP addr */
  3137. WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
  3138. WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
  3139. /* set the VMID assigned */
  3140. WREG32(mmCP_HPD_EOP_VMID, 0);
  3141. /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
  3142. tmp = RREG32(mmCP_HPD_EOP_CONTROL);
  3143. tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
  3144. tmp |= order_base_2(MEC_HPD_SIZE / 8);
  3145. WREG32(mmCP_HPD_EOP_CONTROL, tmp);
  3146. }
  3147. cik_srbm_select(adev, 0, 0, 0, 0);
  3148. mutex_unlock(&adev->srbm_mutex);
  3149. /* init the queues. Just two for now. */
  3150. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  3151. struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
  3152. if (ring->mqd_obj == NULL) {
  3153. r = amdgpu_bo_create(adev,
  3154. sizeof(struct bonaire_mqd),
  3155. PAGE_SIZE, true,
  3156. AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
  3157. &ring->mqd_obj);
  3158. if (r) {
  3159. dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
  3160. return r;
  3161. }
  3162. }
  3163. r = amdgpu_bo_reserve(ring->mqd_obj, false);
  3164. if (unlikely(r != 0)) {
  3165. gfx_v7_0_cp_compute_fini(adev);
  3166. return r;
  3167. }
  3168. r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
  3169. &mqd_gpu_addr);
  3170. if (r) {
  3171. dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
  3172. gfx_v7_0_cp_compute_fini(adev);
  3173. return r;
  3174. }
  3175. r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
  3176. if (r) {
  3177. dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
  3178. gfx_v7_0_cp_compute_fini(adev);
  3179. return r;
  3180. }
  3181. /* init the mqd struct */
  3182. memset(buf, 0, sizeof(struct bonaire_mqd));
  3183. mqd = (struct bonaire_mqd *)buf;
  3184. mqd->header = 0xC0310800;
  3185. mqd->static_thread_mgmt01[0] = 0xffffffff;
  3186. mqd->static_thread_mgmt01[1] = 0xffffffff;
  3187. mqd->static_thread_mgmt23[0] = 0xffffffff;
  3188. mqd->static_thread_mgmt23[1] = 0xffffffff;
  3189. mutex_lock(&adev->srbm_mutex);
  3190. cik_srbm_select(adev, ring->me,
  3191. ring->pipe,
  3192. ring->queue, 0);
  3193. /* disable wptr polling */
  3194. tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
  3195. tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
  3196. WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
  3197. /* enable doorbell? */
  3198. mqd->queue_state.cp_hqd_pq_doorbell_control =
  3199. RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
  3200. if (use_doorbell)
  3201. mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
  3202. else
  3203. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
  3204. WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
  3205. mqd->queue_state.cp_hqd_pq_doorbell_control);
  3206. /* disable the queue if it's active */
  3207. mqd->queue_state.cp_hqd_dequeue_request = 0;
  3208. mqd->queue_state.cp_hqd_pq_rptr = 0;
  3209. mqd->queue_state.cp_hqd_pq_wptr= 0;
  3210. if (RREG32(mmCP_HQD_ACTIVE) & 1) {
  3211. WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
  3212. for (j = 0; j < adev->usec_timeout; j++) {
  3213. if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
  3214. break;
  3215. udelay(1);
  3216. }
  3217. WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
  3218. WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
  3219. WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  3220. }
  3221. /* set the pointer to the MQD */
  3222. mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
  3223. mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
  3224. WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
  3225. WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
  3226. /* set MQD vmid to 0 */
  3227. mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
  3228. mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
  3229. WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
  3230. /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
  3231. hqd_gpu_addr = ring->gpu_addr >> 8;
  3232. mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
  3233. mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
  3234. WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
  3235. WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
  3236. /* set up the HQD, this is similar to CP_RB0_CNTL */
  3237. mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
  3238. mqd->queue_state.cp_hqd_pq_control &=
  3239. ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
  3240. CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
  3241. mqd->queue_state.cp_hqd_pq_control |=
  3242. order_base_2(ring->ring_size / 8);
  3243. mqd->queue_state.cp_hqd_pq_control |=
  3244. (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
  3245. #ifdef __BIG_ENDIAN
  3246. mqd->queue_state.cp_hqd_pq_control |=
  3247. 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
  3248. #endif
  3249. mqd->queue_state.cp_hqd_pq_control &=
  3250. ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
  3251. CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
  3252. CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
  3253. mqd->queue_state.cp_hqd_pq_control |=
  3254. CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
  3255. CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
  3256. WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
  3257. /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
  3258. wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
  3259. mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
  3260. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
  3261. WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
  3262. WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
  3263. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
  3264. /* set the wb address wether it's enabled or not */
  3265. wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
  3266. mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
  3267. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
  3268. upper_32_bits(wb_gpu_addr) & 0xffff;
  3269. WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
  3270. mqd->queue_state.cp_hqd_pq_rptr_report_addr);
  3271. WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
  3272. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
  3273. /* enable the doorbell if requested */
  3274. if (use_doorbell) {
  3275. mqd->queue_state.cp_hqd_pq_doorbell_control =
  3276. RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
  3277. mqd->queue_state.cp_hqd_pq_doorbell_control &=
  3278. ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
  3279. mqd->queue_state.cp_hqd_pq_doorbell_control |=
  3280. (ring->doorbell_index <<
  3281. CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
  3282. mqd->queue_state.cp_hqd_pq_doorbell_control |=
  3283. CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
  3284. mqd->queue_state.cp_hqd_pq_doorbell_control &=
  3285. ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
  3286. CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
  3287. } else {
  3288. mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
  3289. }
  3290. WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
  3291. mqd->queue_state.cp_hqd_pq_doorbell_control);
  3292. /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
  3293. ring->wptr = 0;
  3294. mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
  3295. WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  3296. mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
  3297. /* set the vmid for the queue */
  3298. mqd->queue_state.cp_hqd_vmid = 0;
  3299. WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
  3300. /* activate the queue */
  3301. mqd->queue_state.cp_hqd_active = 1;
  3302. WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
  3303. cik_srbm_select(adev, 0, 0, 0, 0);
  3304. mutex_unlock(&adev->srbm_mutex);
  3305. amdgpu_bo_kunmap(ring->mqd_obj);
  3306. amdgpu_bo_unreserve(ring->mqd_obj);
  3307. ring->ready = true;
  3308. r = amdgpu_ring_test_ring(ring);
  3309. if (r)
  3310. ring->ready = false;
  3311. }
  3312. return 0;
  3313. }
  3314. static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
  3315. {
  3316. gfx_v7_0_cp_gfx_enable(adev, enable);
  3317. gfx_v7_0_cp_compute_enable(adev, enable);
  3318. }
  3319. static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
  3320. {
  3321. int r;
  3322. r = gfx_v7_0_cp_gfx_load_microcode(adev);
  3323. if (r)
  3324. return r;
  3325. r = gfx_v7_0_cp_compute_load_microcode(adev);
  3326. if (r)
  3327. return r;
  3328. return 0;
  3329. }
  3330. static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
  3331. bool enable)
  3332. {
  3333. u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
  3334. if (enable)
  3335. tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
  3336. CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
  3337. else
  3338. tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
  3339. CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
  3340. WREG32(mmCP_INT_CNTL_RING0, tmp);
  3341. }
  3342. static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
  3343. {
  3344. int r;
  3345. gfx_v7_0_enable_gui_idle_interrupt(adev, false);
  3346. r = gfx_v7_0_cp_load_microcode(adev);
  3347. if (r)
  3348. return r;
  3349. r = gfx_v7_0_cp_gfx_resume(adev);
  3350. if (r)
  3351. return r;
  3352. r = gfx_v7_0_cp_compute_resume(adev);
  3353. if (r)
  3354. return r;
  3355. gfx_v7_0_enable_gui_idle_interrupt(adev, true);
  3356. return 0;
  3357. }
  3358. /*
  3359. * vm
  3360. * VMID 0 is the physical GPU addresses as used by the kernel.
  3361. * VMIDs 1-15 are used for userspace clients and are handled
  3362. * by the amdgpu vm/hsa code.
  3363. */
  3364. /**
  3365. * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
  3366. *
  3367. * @adev: amdgpu_device pointer
  3368. *
  3369. * Update the page table base and flush the VM TLB
  3370. * using the CP (CIK).
  3371. */
  3372. static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
  3373. unsigned vm_id, uint64_t pd_addr)
  3374. {
  3375. int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
  3376. if (usepfp) {
  3377. /* synce CE with ME to prevent CE fetch CEIB before context switch done */
  3378. amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  3379. amdgpu_ring_write(ring, 0);
  3380. amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  3381. amdgpu_ring_write(ring, 0);
  3382. }
  3383. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3384. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
  3385. WRITE_DATA_DST_SEL(0)));
  3386. if (vm_id < 8) {
  3387. amdgpu_ring_write(ring,
  3388. (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
  3389. } else {
  3390. amdgpu_ring_write(ring,
  3391. (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
  3392. }
  3393. amdgpu_ring_write(ring, 0);
  3394. amdgpu_ring_write(ring, pd_addr >> 12);
  3395. /* bits 0-15 are the VM contexts0-15 */
  3396. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3397. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  3398. WRITE_DATA_DST_SEL(0)));
  3399. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
  3400. amdgpu_ring_write(ring, 0);
  3401. amdgpu_ring_write(ring, 1 << vm_id);
  3402. /* wait for the invalidate to complete */
  3403. amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
  3404. amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
  3405. WAIT_REG_MEM_FUNCTION(0) | /* always */
  3406. WAIT_REG_MEM_ENGINE(0))); /* me */
  3407. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
  3408. amdgpu_ring_write(ring, 0);
  3409. amdgpu_ring_write(ring, 0); /* ref */
  3410. amdgpu_ring_write(ring, 0); /* mask */
  3411. amdgpu_ring_write(ring, 0x20); /* poll interval */
  3412. /* compute doesn't have PFP */
  3413. if (usepfp) {
  3414. /* sync PFP to ME, otherwise we might get invalid PFP reads */
  3415. amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  3416. amdgpu_ring_write(ring, 0x0);
  3417. /* synce CE with ME to prevent CE fetch CEIB before context switch done */
  3418. amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  3419. amdgpu_ring_write(ring, 0);
  3420. amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  3421. amdgpu_ring_write(ring, 0);
  3422. }
  3423. }
  3424. /*
  3425. * RLC
  3426. * The RLC is a multi-purpose microengine that handles a
  3427. * variety of functions.
  3428. */
  3429. static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
  3430. {
  3431. int r;
  3432. /* save restore block */
  3433. if (adev->gfx.rlc.save_restore_obj) {
  3434. r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
  3435. if (unlikely(r != 0))
  3436. dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
  3437. amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
  3438. amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
  3439. amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
  3440. adev->gfx.rlc.save_restore_obj = NULL;
  3441. }
  3442. /* clear state block */
  3443. if (adev->gfx.rlc.clear_state_obj) {
  3444. r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
  3445. if (unlikely(r != 0))
  3446. dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
  3447. amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
  3448. amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
  3449. amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
  3450. adev->gfx.rlc.clear_state_obj = NULL;
  3451. }
  3452. /* clear state block */
  3453. if (adev->gfx.rlc.cp_table_obj) {
  3454. r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
  3455. if (unlikely(r != 0))
  3456. dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
  3457. amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
  3458. amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
  3459. amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
  3460. adev->gfx.rlc.cp_table_obj = NULL;
  3461. }
  3462. }
  3463. static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
  3464. {
  3465. const u32 *src_ptr;
  3466. volatile u32 *dst_ptr;
  3467. u32 dws, i;
  3468. const struct cs_section_def *cs_data;
  3469. int r;
  3470. /* allocate rlc buffers */
  3471. if (adev->flags & AMD_IS_APU) {
  3472. if (adev->asic_type == CHIP_KAVERI) {
  3473. adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
  3474. adev->gfx.rlc.reg_list_size =
  3475. (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
  3476. } else {
  3477. adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
  3478. adev->gfx.rlc.reg_list_size =
  3479. (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
  3480. }
  3481. }
  3482. adev->gfx.rlc.cs_data = ci_cs_data;
  3483. adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
  3484. src_ptr = adev->gfx.rlc.reg_list;
  3485. dws = adev->gfx.rlc.reg_list_size;
  3486. dws += (5 * 16) + 48 + 48 + 64;
  3487. cs_data = adev->gfx.rlc.cs_data;
  3488. if (src_ptr) {
  3489. /* save restore block */
  3490. if (adev->gfx.rlc.save_restore_obj == NULL) {
  3491. r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
  3492. AMDGPU_GEM_DOMAIN_VRAM,
  3493. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
  3494. NULL, NULL,
  3495. &adev->gfx.rlc.save_restore_obj);
  3496. if (r) {
  3497. dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
  3498. return r;
  3499. }
  3500. }
  3501. r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
  3502. if (unlikely(r != 0)) {
  3503. gfx_v7_0_rlc_fini(adev);
  3504. return r;
  3505. }
  3506. r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
  3507. &adev->gfx.rlc.save_restore_gpu_addr);
  3508. if (r) {
  3509. amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
  3510. dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
  3511. gfx_v7_0_rlc_fini(adev);
  3512. return r;
  3513. }
  3514. r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
  3515. if (r) {
  3516. dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
  3517. gfx_v7_0_rlc_fini(adev);
  3518. return r;
  3519. }
  3520. /* write the sr buffer */
  3521. dst_ptr = adev->gfx.rlc.sr_ptr;
  3522. for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
  3523. dst_ptr[i] = cpu_to_le32(src_ptr[i]);
  3524. amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
  3525. amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
  3526. }
  3527. if (cs_data) {
  3528. /* clear state block */
  3529. adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
  3530. if (adev->gfx.rlc.clear_state_obj == NULL) {
  3531. r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
  3532. AMDGPU_GEM_DOMAIN_VRAM,
  3533. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
  3534. NULL, NULL,
  3535. &adev->gfx.rlc.clear_state_obj);
  3536. if (r) {
  3537. dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
  3538. gfx_v7_0_rlc_fini(adev);
  3539. return r;
  3540. }
  3541. }
  3542. r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
  3543. if (unlikely(r != 0)) {
  3544. gfx_v7_0_rlc_fini(adev);
  3545. return r;
  3546. }
  3547. r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
  3548. &adev->gfx.rlc.clear_state_gpu_addr);
  3549. if (r) {
  3550. amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
  3551. dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
  3552. gfx_v7_0_rlc_fini(adev);
  3553. return r;
  3554. }
  3555. r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
  3556. if (r) {
  3557. dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
  3558. gfx_v7_0_rlc_fini(adev);
  3559. return r;
  3560. }
  3561. /* set up the cs buffer */
  3562. dst_ptr = adev->gfx.rlc.cs_ptr;
  3563. gfx_v7_0_get_csb_buffer(adev, dst_ptr);
  3564. amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
  3565. amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
  3566. }
  3567. if (adev->gfx.rlc.cp_table_size) {
  3568. if (adev->gfx.rlc.cp_table_obj == NULL) {
  3569. r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
  3570. AMDGPU_GEM_DOMAIN_VRAM,
  3571. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
  3572. NULL, NULL,
  3573. &adev->gfx.rlc.cp_table_obj);
  3574. if (r) {
  3575. dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
  3576. gfx_v7_0_rlc_fini(adev);
  3577. return r;
  3578. }
  3579. }
  3580. r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
  3581. if (unlikely(r != 0)) {
  3582. dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
  3583. gfx_v7_0_rlc_fini(adev);
  3584. return r;
  3585. }
  3586. r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
  3587. &adev->gfx.rlc.cp_table_gpu_addr);
  3588. if (r) {
  3589. amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
  3590. dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
  3591. gfx_v7_0_rlc_fini(adev);
  3592. return r;
  3593. }
  3594. r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
  3595. if (r) {
  3596. dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
  3597. gfx_v7_0_rlc_fini(adev);
  3598. return r;
  3599. }
  3600. gfx_v7_0_init_cp_pg_table(adev);
  3601. amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
  3602. amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
  3603. }
  3604. return 0;
  3605. }
  3606. static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
  3607. {
  3608. u32 tmp;
  3609. tmp = RREG32(mmRLC_LB_CNTL);
  3610. if (enable)
  3611. tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
  3612. else
  3613. tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
  3614. WREG32(mmRLC_LB_CNTL, tmp);
  3615. }
  3616. static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
  3617. {
  3618. u32 i, j, k;
  3619. u32 mask;
  3620. mutex_lock(&adev->grbm_idx_mutex);
  3621. for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
  3622. for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
  3623. gfx_v7_0_select_se_sh(adev, i, j);
  3624. for (k = 0; k < adev->usec_timeout; k++) {
  3625. if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
  3626. break;
  3627. udelay(1);
  3628. }
  3629. }
  3630. }
  3631. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  3632. mutex_unlock(&adev->grbm_idx_mutex);
  3633. mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
  3634. RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
  3635. RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
  3636. RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
  3637. for (k = 0; k < adev->usec_timeout; k++) {
  3638. if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
  3639. break;
  3640. udelay(1);
  3641. }
  3642. }
  3643. static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
  3644. {
  3645. u32 tmp;
  3646. tmp = RREG32(mmRLC_CNTL);
  3647. if (tmp != rlc)
  3648. WREG32(mmRLC_CNTL, rlc);
  3649. }
  3650. static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
  3651. {
  3652. u32 data, orig;
  3653. orig = data = RREG32(mmRLC_CNTL);
  3654. if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
  3655. u32 i;
  3656. data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
  3657. WREG32(mmRLC_CNTL, data);
  3658. for (i = 0; i < adev->usec_timeout; i++) {
  3659. if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
  3660. break;
  3661. udelay(1);
  3662. }
  3663. gfx_v7_0_wait_for_rlc_serdes(adev);
  3664. }
  3665. return orig;
  3666. }
  3667. void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
  3668. {
  3669. u32 tmp, i, mask;
  3670. tmp = 0x1 | (1 << 1);
  3671. WREG32(mmRLC_GPR_REG2, tmp);
  3672. mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
  3673. RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
  3674. for (i = 0; i < adev->usec_timeout; i++) {
  3675. if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
  3676. break;
  3677. udelay(1);
  3678. }
  3679. for (i = 0; i < adev->usec_timeout; i++) {
  3680. if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
  3681. break;
  3682. udelay(1);
  3683. }
  3684. }
  3685. void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
  3686. {
  3687. u32 tmp;
  3688. tmp = 0x1 | (0 << 1);
  3689. WREG32(mmRLC_GPR_REG2, tmp);
  3690. }
  3691. /**
  3692. * gfx_v7_0_rlc_stop - stop the RLC ME
  3693. *
  3694. * @adev: amdgpu_device pointer
  3695. *
  3696. * Halt the RLC ME (MicroEngine) (CIK).
  3697. */
  3698. void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
  3699. {
  3700. WREG32(mmRLC_CNTL, 0);
  3701. gfx_v7_0_enable_gui_idle_interrupt(adev, false);
  3702. gfx_v7_0_wait_for_rlc_serdes(adev);
  3703. }
  3704. /**
  3705. * gfx_v7_0_rlc_start - start the RLC ME
  3706. *
  3707. * @adev: amdgpu_device pointer
  3708. *
  3709. * Unhalt the RLC ME (MicroEngine) (CIK).
  3710. */
  3711. static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
  3712. {
  3713. WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
  3714. gfx_v7_0_enable_gui_idle_interrupt(adev, true);
  3715. udelay(50);
  3716. }
  3717. static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
  3718. {
  3719. u32 tmp = RREG32(mmGRBM_SOFT_RESET);
  3720. tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
  3721. WREG32(mmGRBM_SOFT_RESET, tmp);
  3722. udelay(50);
  3723. tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
  3724. WREG32(mmGRBM_SOFT_RESET, tmp);
  3725. udelay(50);
  3726. }
  3727. /**
  3728. * gfx_v7_0_rlc_resume - setup the RLC hw
  3729. *
  3730. * @adev: amdgpu_device pointer
  3731. *
  3732. * Initialize the RLC registers, load the ucode,
  3733. * and start the RLC (CIK).
  3734. * Returns 0 for success, -EINVAL if the ucode is not available.
  3735. */
  3736. static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
  3737. {
  3738. const struct rlc_firmware_header_v1_0 *hdr;
  3739. const __le32 *fw_data;
  3740. unsigned i, fw_size;
  3741. u32 tmp;
  3742. if (!adev->gfx.rlc_fw)
  3743. return -EINVAL;
  3744. hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
  3745. amdgpu_ucode_print_rlc_hdr(&hdr->header);
  3746. adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
  3747. adev->gfx.rlc_feature_version = le32_to_cpu(
  3748. hdr->ucode_feature_version);
  3749. gfx_v7_0_rlc_stop(adev);
  3750. /* disable CG */
  3751. tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
  3752. WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
  3753. gfx_v7_0_rlc_reset(adev);
  3754. gfx_v7_0_init_pg(adev);
  3755. WREG32(mmRLC_LB_CNTR_INIT, 0);
  3756. WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
  3757. mutex_lock(&adev->grbm_idx_mutex);
  3758. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  3759. WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
  3760. WREG32(mmRLC_LB_PARAMS, 0x00600408);
  3761. WREG32(mmRLC_LB_CNTL, 0x80000004);
  3762. mutex_unlock(&adev->grbm_idx_mutex);
  3763. WREG32(mmRLC_MC_CNTL, 0);
  3764. WREG32(mmRLC_UCODE_CNTL, 0);
  3765. fw_data = (const __le32 *)
  3766. (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  3767. fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  3768. WREG32(mmRLC_GPM_UCODE_ADDR, 0);
  3769. for (i = 0; i < fw_size; i++)
  3770. WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
  3771. WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
  3772. /* XXX - find out what chips support lbpw */
  3773. gfx_v7_0_enable_lbpw(adev, false);
  3774. if (adev->asic_type == CHIP_BONAIRE)
  3775. WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
  3776. gfx_v7_0_rlc_start(adev);
  3777. return 0;
  3778. }
  3779. static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
  3780. {
  3781. u32 data, orig, tmp, tmp2;
  3782. orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
  3783. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) {
  3784. gfx_v7_0_enable_gui_idle_interrupt(adev, true);
  3785. tmp = gfx_v7_0_halt_rlc(adev);
  3786. mutex_lock(&adev->grbm_idx_mutex);
  3787. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  3788. WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  3789. WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  3790. tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
  3791. RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
  3792. RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
  3793. WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
  3794. mutex_unlock(&adev->grbm_idx_mutex);
  3795. gfx_v7_0_update_rlc(adev, tmp);
  3796. data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
  3797. } else {
  3798. gfx_v7_0_enable_gui_idle_interrupt(adev, false);
  3799. RREG32(mmCB_CGTT_SCLK_CTRL);
  3800. RREG32(mmCB_CGTT_SCLK_CTRL);
  3801. RREG32(mmCB_CGTT_SCLK_CTRL);
  3802. RREG32(mmCB_CGTT_SCLK_CTRL);
  3803. data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
  3804. }
  3805. if (orig != data)
  3806. WREG32(mmRLC_CGCG_CGLS_CTRL, data);
  3807. }
  3808. static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
  3809. {
  3810. u32 data, orig, tmp = 0;
  3811. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) {
  3812. if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) {
  3813. if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) {
  3814. orig = data = RREG32(mmCP_MEM_SLP_CNTL);
  3815. data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
  3816. if (orig != data)
  3817. WREG32(mmCP_MEM_SLP_CNTL, data);
  3818. }
  3819. }
  3820. orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
  3821. data |= 0x00000001;
  3822. data &= 0xfffffffd;
  3823. if (orig != data)
  3824. WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
  3825. tmp = gfx_v7_0_halt_rlc(adev);
  3826. mutex_lock(&adev->grbm_idx_mutex);
  3827. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  3828. WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  3829. WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  3830. data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
  3831. RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
  3832. WREG32(mmRLC_SERDES_WR_CTRL, data);
  3833. mutex_unlock(&adev->grbm_idx_mutex);
  3834. gfx_v7_0_update_rlc(adev, tmp);
  3835. if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) {
  3836. orig = data = RREG32(mmCGTS_SM_CTRL_REG);
  3837. data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
  3838. data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
  3839. data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
  3840. data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
  3841. if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) &&
  3842. (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS))
  3843. data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
  3844. data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
  3845. data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
  3846. data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
  3847. if (orig != data)
  3848. WREG32(mmCGTS_SM_CTRL_REG, data);
  3849. }
  3850. } else {
  3851. orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
  3852. data |= 0x00000003;
  3853. if (orig != data)
  3854. WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
  3855. data = RREG32(mmRLC_MEM_SLP_CNTL);
  3856. if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
  3857. data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
  3858. WREG32(mmRLC_MEM_SLP_CNTL, data);
  3859. }
  3860. data = RREG32(mmCP_MEM_SLP_CNTL);
  3861. if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
  3862. data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
  3863. WREG32(mmCP_MEM_SLP_CNTL, data);
  3864. }
  3865. orig = data = RREG32(mmCGTS_SM_CTRL_REG);
  3866. data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
  3867. if (orig != data)
  3868. WREG32(mmCGTS_SM_CTRL_REG, data);
  3869. tmp = gfx_v7_0_halt_rlc(adev);
  3870. mutex_lock(&adev->grbm_idx_mutex);
  3871. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  3872. WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  3873. WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  3874. data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
  3875. WREG32(mmRLC_SERDES_WR_CTRL, data);
  3876. mutex_unlock(&adev->grbm_idx_mutex);
  3877. gfx_v7_0_update_rlc(adev, tmp);
  3878. }
  3879. }
  3880. static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
  3881. bool enable)
  3882. {
  3883. gfx_v7_0_enable_gui_idle_interrupt(adev, false);
  3884. /* order matters! */
  3885. if (enable) {
  3886. gfx_v7_0_enable_mgcg(adev, true);
  3887. gfx_v7_0_enable_cgcg(adev, true);
  3888. } else {
  3889. gfx_v7_0_enable_cgcg(adev, false);
  3890. gfx_v7_0_enable_mgcg(adev, false);
  3891. }
  3892. gfx_v7_0_enable_gui_idle_interrupt(adev, true);
  3893. }
  3894. static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
  3895. bool enable)
  3896. {
  3897. u32 data, orig;
  3898. orig = data = RREG32(mmRLC_PG_CNTL);
  3899. if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
  3900. data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
  3901. else
  3902. data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
  3903. if (orig != data)
  3904. WREG32(mmRLC_PG_CNTL, data);
  3905. }
  3906. static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
  3907. bool enable)
  3908. {
  3909. u32 data, orig;
  3910. orig = data = RREG32(mmRLC_PG_CNTL);
  3911. if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
  3912. data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
  3913. else
  3914. data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
  3915. if (orig != data)
  3916. WREG32(mmRLC_PG_CNTL, data);
  3917. }
  3918. static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
  3919. {
  3920. u32 data, orig;
  3921. orig = data = RREG32(mmRLC_PG_CNTL);
  3922. if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP))
  3923. data &= ~0x8000;
  3924. else
  3925. data |= 0x8000;
  3926. if (orig != data)
  3927. WREG32(mmRLC_PG_CNTL, data);
  3928. }
  3929. static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
  3930. {
  3931. u32 data, orig;
  3932. orig = data = RREG32(mmRLC_PG_CNTL);
  3933. if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS))
  3934. data &= ~0x2000;
  3935. else
  3936. data |= 0x2000;
  3937. if (orig != data)
  3938. WREG32(mmRLC_PG_CNTL, data);
  3939. }
  3940. static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
  3941. {
  3942. const __le32 *fw_data;
  3943. volatile u32 *dst_ptr;
  3944. int me, i, max_me = 4;
  3945. u32 bo_offset = 0;
  3946. u32 table_offset, table_size;
  3947. if (adev->asic_type == CHIP_KAVERI)
  3948. max_me = 5;
  3949. if (adev->gfx.rlc.cp_table_ptr == NULL)
  3950. return;
  3951. /* write the cp table buffer */
  3952. dst_ptr = adev->gfx.rlc.cp_table_ptr;
  3953. for (me = 0; me < max_me; me++) {
  3954. if (me == 0) {
  3955. const struct gfx_firmware_header_v1_0 *hdr =
  3956. (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
  3957. fw_data = (const __le32 *)
  3958. (adev->gfx.ce_fw->data +
  3959. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  3960. table_offset = le32_to_cpu(hdr->jt_offset);
  3961. table_size = le32_to_cpu(hdr->jt_size);
  3962. } else if (me == 1) {
  3963. const struct gfx_firmware_header_v1_0 *hdr =
  3964. (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
  3965. fw_data = (const __le32 *)
  3966. (adev->gfx.pfp_fw->data +
  3967. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  3968. table_offset = le32_to_cpu(hdr->jt_offset);
  3969. table_size = le32_to_cpu(hdr->jt_size);
  3970. } else if (me == 2) {
  3971. const struct gfx_firmware_header_v1_0 *hdr =
  3972. (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
  3973. fw_data = (const __le32 *)
  3974. (adev->gfx.me_fw->data +
  3975. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  3976. table_offset = le32_to_cpu(hdr->jt_offset);
  3977. table_size = le32_to_cpu(hdr->jt_size);
  3978. } else if (me == 3) {
  3979. const struct gfx_firmware_header_v1_0 *hdr =
  3980. (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
  3981. fw_data = (const __le32 *)
  3982. (adev->gfx.mec_fw->data +
  3983. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  3984. table_offset = le32_to_cpu(hdr->jt_offset);
  3985. table_size = le32_to_cpu(hdr->jt_size);
  3986. } else {
  3987. const struct gfx_firmware_header_v1_0 *hdr =
  3988. (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
  3989. fw_data = (const __le32 *)
  3990. (adev->gfx.mec2_fw->data +
  3991. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  3992. table_offset = le32_to_cpu(hdr->jt_offset);
  3993. table_size = le32_to_cpu(hdr->jt_size);
  3994. }
  3995. for (i = 0; i < table_size; i ++) {
  3996. dst_ptr[bo_offset + i] =
  3997. cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
  3998. }
  3999. bo_offset += table_size;
  4000. }
  4001. }
  4002. static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
  4003. bool enable)
  4004. {
  4005. u32 data, orig;
  4006. if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) {
  4007. orig = data = RREG32(mmRLC_PG_CNTL);
  4008. data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
  4009. if (orig != data)
  4010. WREG32(mmRLC_PG_CNTL, data);
  4011. orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
  4012. data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
  4013. if (orig != data)
  4014. WREG32(mmRLC_AUTO_PG_CTRL, data);
  4015. } else {
  4016. orig = data = RREG32(mmRLC_PG_CNTL);
  4017. data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
  4018. if (orig != data)
  4019. WREG32(mmRLC_PG_CNTL, data);
  4020. orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
  4021. data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
  4022. if (orig != data)
  4023. WREG32(mmRLC_AUTO_PG_CTRL, data);
  4024. data = RREG32(mmDB_RENDER_CONTROL);
  4025. }
  4026. }
  4027. static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev,
  4028. u32 se, u32 sh)
  4029. {
  4030. u32 mask = 0, tmp, tmp1;
  4031. int i;
  4032. gfx_v7_0_select_se_sh(adev, se, sh);
  4033. tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
  4034. tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
  4035. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  4036. tmp &= 0xffff0000;
  4037. tmp |= tmp1;
  4038. tmp >>= 16;
  4039. for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
  4040. mask <<= 1;
  4041. mask |= 1;
  4042. }
  4043. return (~tmp) & mask;
  4044. }
  4045. static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
  4046. {
  4047. uint32_t tmp, active_cu_number;
  4048. struct amdgpu_cu_info cu_info;
  4049. gfx_v7_0_get_cu_info(adev, &cu_info);
  4050. tmp = cu_info.ao_cu_mask;
  4051. active_cu_number = cu_info.number;
  4052. WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp);
  4053. tmp = RREG32(mmRLC_MAX_PG_CU);
  4054. tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
  4055. tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
  4056. WREG32(mmRLC_MAX_PG_CU, tmp);
  4057. }
  4058. static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
  4059. bool enable)
  4060. {
  4061. u32 data, orig;
  4062. orig = data = RREG32(mmRLC_PG_CNTL);
  4063. if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG))
  4064. data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
  4065. else
  4066. data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
  4067. if (orig != data)
  4068. WREG32(mmRLC_PG_CNTL, data);
  4069. }
  4070. static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
  4071. bool enable)
  4072. {
  4073. u32 data, orig;
  4074. orig = data = RREG32(mmRLC_PG_CNTL);
  4075. if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG))
  4076. data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
  4077. else
  4078. data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
  4079. if (orig != data)
  4080. WREG32(mmRLC_PG_CNTL, data);
  4081. }
  4082. #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
  4083. #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
  4084. static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
  4085. {
  4086. u32 data, orig;
  4087. u32 i;
  4088. if (adev->gfx.rlc.cs_data) {
  4089. WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  4090. WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
  4091. WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
  4092. WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
  4093. } else {
  4094. WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  4095. for (i = 0; i < 3; i++)
  4096. WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
  4097. }
  4098. if (adev->gfx.rlc.reg_list) {
  4099. WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
  4100. for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
  4101. WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
  4102. }
  4103. orig = data = RREG32(mmRLC_PG_CNTL);
  4104. data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
  4105. if (orig != data)
  4106. WREG32(mmRLC_PG_CNTL, data);
  4107. WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
  4108. WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
  4109. data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
  4110. data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
  4111. data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
  4112. WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
  4113. data = 0x10101010;
  4114. WREG32(mmRLC_PG_DELAY, data);
  4115. data = RREG32(mmRLC_PG_DELAY_2);
  4116. data &= ~0xff;
  4117. data |= 0x3;
  4118. WREG32(mmRLC_PG_DELAY_2, data);
  4119. data = RREG32(mmRLC_AUTO_PG_CTRL);
  4120. data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
  4121. data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
  4122. WREG32(mmRLC_AUTO_PG_CTRL, data);
  4123. }
  4124. static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
  4125. {
  4126. gfx_v7_0_enable_gfx_cgpg(adev, enable);
  4127. gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
  4128. gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
  4129. }
  4130. static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
  4131. {
  4132. u32 count = 0;
  4133. const struct cs_section_def *sect = NULL;
  4134. const struct cs_extent_def *ext = NULL;
  4135. if (adev->gfx.rlc.cs_data == NULL)
  4136. return 0;
  4137. /* begin clear state */
  4138. count += 2;
  4139. /* context control state */
  4140. count += 3;
  4141. for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
  4142. for (ext = sect->section; ext->extent != NULL; ++ext) {
  4143. if (sect->id == SECT_CONTEXT)
  4144. count += 2 + ext->reg_count;
  4145. else
  4146. return 0;
  4147. }
  4148. }
  4149. /* pa_sc_raster_config/pa_sc_raster_config1 */
  4150. count += 4;
  4151. /* end clear state */
  4152. count += 2;
  4153. /* clear state */
  4154. count += 2;
  4155. return count;
  4156. }
  4157. static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
  4158. volatile u32 *buffer)
  4159. {
  4160. u32 count = 0, i;
  4161. const struct cs_section_def *sect = NULL;
  4162. const struct cs_extent_def *ext = NULL;
  4163. if (adev->gfx.rlc.cs_data == NULL)
  4164. return;
  4165. if (buffer == NULL)
  4166. return;
  4167. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  4168. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  4169. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  4170. buffer[count++] = cpu_to_le32(0x80000000);
  4171. buffer[count++] = cpu_to_le32(0x80000000);
  4172. for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
  4173. for (ext = sect->section; ext->extent != NULL; ++ext) {
  4174. if (sect->id == SECT_CONTEXT) {
  4175. buffer[count++] =
  4176. cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
  4177. buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
  4178. for (i = 0; i < ext->reg_count; i++)
  4179. buffer[count++] = cpu_to_le32(ext->extent[i]);
  4180. } else {
  4181. return;
  4182. }
  4183. }
  4184. }
  4185. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  4186. buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
  4187. switch (adev->asic_type) {
  4188. case CHIP_BONAIRE:
  4189. buffer[count++] = cpu_to_le32(0x16000012);
  4190. buffer[count++] = cpu_to_le32(0x00000000);
  4191. break;
  4192. case CHIP_KAVERI:
  4193. buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
  4194. buffer[count++] = cpu_to_le32(0x00000000);
  4195. break;
  4196. case CHIP_KABINI:
  4197. case CHIP_MULLINS:
  4198. buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
  4199. buffer[count++] = cpu_to_le32(0x00000000);
  4200. break;
  4201. case CHIP_HAWAII:
  4202. buffer[count++] = cpu_to_le32(0x3a00161a);
  4203. buffer[count++] = cpu_to_le32(0x0000002e);
  4204. break;
  4205. default:
  4206. buffer[count++] = cpu_to_le32(0x00000000);
  4207. buffer[count++] = cpu_to_le32(0x00000000);
  4208. break;
  4209. }
  4210. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  4211. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
  4212. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
  4213. buffer[count++] = cpu_to_le32(0);
  4214. }
  4215. static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
  4216. {
  4217. if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
  4218. AMDGPU_PG_SUPPORT_GFX_SMG |
  4219. AMDGPU_PG_SUPPORT_GFX_DMG |
  4220. AMDGPU_PG_SUPPORT_CP |
  4221. AMDGPU_PG_SUPPORT_GDS |
  4222. AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
  4223. gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
  4224. gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
  4225. if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
  4226. gfx_v7_0_init_gfx_cgpg(adev);
  4227. gfx_v7_0_enable_cp_pg(adev, true);
  4228. gfx_v7_0_enable_gds_pg(adev, true);
  4229. }
  4230. gfx_v7_0_init_ao_cu_mask(adev);
  4231. gfx_v7_0_update_gfx_pg(adev, true);
  4232. }
  4233. }
  4234. static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
  4235. {
  4236. if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
  4237. AMDGPU_PG_SUPPORT_GFX_SMG |
  4238. AMDGPU_PG_SUPPORT_GFX_DMG |
  4239. AMDGPU_PG_SUPPORT_CP |
  4240. AMDGPU_PG_SUPPORT_GDS |
  4241. AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
  4242. gfx_v7_0_update_gfx_pg(adev, false);
  4243. if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
  4244. gfx_v7_0_enable_cp_pg(adev, false);
  4245. gfx_v7_0_enable_gds_pg(adev, false);
  4246. }
  4247. }
  4248. }
  4249. /**
  4250. * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
  4251. *
  4252. * @adev: amdgpu_device pointer
  4253. *
  4254. * Fetches a GPU clock counter snapshot (SI).
  4255. * Returns the 64 bit clock counter snapshot.
  4256. */
  4257. uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
  4258. {
  4259. uint64_t clock;
  4260. mutex_lock(&adev->gfx.gpu_clock_mutex);
  4261. WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
  4262. clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
  4263. ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
  4264. mutex_unlock(&adev->gfx.gpu_clock_mutex);
  4265. return clock;
  4266. }
  4267. static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
  4268. uint32_t vmid,
  4269. uint32_t gds_base, uint32_t gds_size,
  4270. uint32_t gws_base, uint32_t gws_size,
  4271. uint32_t oa_base, uint32_t oa_size)
  4272. {
  4273. gds_base = gds_base >> AMDGPU_GDS_SHIFT;
  4274. gds_size = gds_size >> AMDGPU_GDS_SHIFT;
  4275. gws_base = gws_base >> AMDGPU_GWS_SHIFT;
  4276. gws_size = gws_size >> AMDGPU_GWS_SHIFT;
  4277. oa_base = oa_base >> AMDGPU_OA_SHIFT;
  4278. oa_size = oa_size >> AMDGPU_OA_SHIFT;
  4279. /* GDS Base */
  4280. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4281. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4282. WRITE_DATA_DST_SEL(0)));
  4283. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
  4284. amdgpu_ring_write(ring, 0);
  4285. amdgpu_ring_write(ring, gds_base);
  4286. /* GDS Size */
  4287. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4288. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4289. WRITE_DATA_DST_SEL(0)));
  4290. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
  4291. amdgpu_ring_write(ring, 0);
  4292. amdgpu_ring_write(ring, gds_size);
  4293. /* GWS */
  4294. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4295. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4296. WRITE_DATA_DST_SEL(0)));
  4297. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
  4298. amdgpu_ring_write(ring, 0);
  4299. amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
  4300. /* OA */
  4301. amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  4302. amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
  4303. WRITE_DATA_DST_SEL(0)));
  4304. amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
  4305. amdgpu_ring_write(ring, 0);
  4306. amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
  4307. }
  4308. static int gfx_v7_0_early_init(void *handle)
  4309. {
  4310. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4311. adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
  4312. adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
  4313. gfx_v7_0_set_ring_funcs(adev);
  4314. gfx_v7_0_set_irq_funcs(adev);
  4315. gfx_v7_0_set_gds_init(adev);
  4316. return 0;
  4317. }
  4318. static int gfx_v7_0_sw_init(void *handle)
  4319. {
  4320. struct amdgpu_ring *ring;
  4321. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4322. int i, r;
  4323. /* EOP Event */
  4324. r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
  4325. if (r)
  4326. return r;
  4327. /* Privileged reg */
  4328. r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
  4329. if (r)
  4330. return r;
  4331. /* Privileged inst */
  4332. r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
  4333. if (r)
  4334. return r;
  4335. gfx_v7_0_scratch_init(adev);
  4336. r = gfx_v7_0_init_microcode(adev);
  4337. if (r) {
  4338. DRM_ERROR("Failed to load gfx firmware!\n");
  4339. return r;
  4340. }
  4341. r = gfx_v7_0_rlc_init(adev);
  4342. if (r) {
  4343. DRM_ERROR("Failed to init rlc BOs!\n");
  4344. return r;
  4345. }
  4346. /* allocate mec buffers */
  4347. r = gfx_v7_0_mec_init(adev);
  4348. if (r) {
  4349. DRM_ERROR("Failed to init MEC BOs!\n");
  4350. return r;
  4351. }
  4352. for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
  4353. ring = &adev->gfx.gfx_ring[i];
  4354. ring->ring_obj = NULL;
  4355. sprintf(ring->name, "gfx");
  4356. r = amdgpu_ring_init(adev, ring, 1024 * 1024,
  4357. PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
  4358. &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
  4359. AMDGPU_RING_TYPE_GFX);
  4360. if (r)
  4361. return r;
  4362. }
  4363. /* set up the compute queues */
  4364. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  4365. unsigned irq_type;
  4366. /* max 32 queues per MEC */
  4367. if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
  4368. DRM_ERROR("Too many (%d) compute rings!\n", i);
  4369. break;
  4370. }
  4371. ring = &adev->gfx.compute_ring[i];
  4372. ring->ring_obj = NULL;
  4373. ring->use_doorbell = true;
  4374. ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
  4375. ring->me = 1; /* first MEC */
  4376. ring->pipe = i / 8;
  4377. ring->queue = i % 8;
  4378. sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
  4379. irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
  4380. /* type-2 packets are deprecated on MEC, use type-3 instead */
  4381. r = amdgpu_ring_init(adev, ring, 1024 * 1024,
  4382. PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
  4383. &adev->gfx.eop_irq, irq_type,
  4384. AMDGPU_RING_TYPE_COMPUTE);
  4385. if (r)
  4386. return r;
  4387. }
  4388. /* reserve GDS, GWS and OA resource for gfx */
  4389. r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
  4390. PAGE_SIZE, true,
  4391. AMDGPU_GEM_DOMAIN_GDS, 0,
  4392. NULL, NULL, &adev->gds.gds_gfx_bo);
  4393. if (r)
  4394. return r;
  4395. r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
  4396. PAGE_SIZE, true,
  4397. AMDGPU_GEM_DOMAIN_GWS, 0,
  4398. NULL, NULL, &adev->gds.gws_gfx_bo);
  4399. if (r)
  4400. return r;
  4401. r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
  4402. PAGE_SIZE, true,
  4403. AMDGPU_GEM_DOMAIN_OA, 0,
  4404. NULL, NULL, &adev->gds.oa_gfx_bo);
  4405. if (r)
  4406. return r;
  4407. return r;
  4408. }
  4409. static int gfx_v7_0_sw_fini(void *handle)
  4410. {
  4411. int i;
  4412. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4413. amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
  4414. amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
  4415. amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
  4416. for (i = 0; i < adev->gfx.num_gfx_rings; i++)
  4417. amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
  4418. for (i = 0; i < adev->gfx.num_compute_rings; i++)
  4419. amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
  4420. gfx_v7_0_cp_compute_fini(adev);
  4421. gfx_v7_0_rlc_fini(adev);
  4422. gfx_v7_0_mec_fini(adev);
  4423. return 0;
  4424. }
  4425. static int gfx_v7_0_hw_init(void *handle)
  4426. {
  4427. int r;
  4428. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4429. gfx_v7_0_gpu_init(adev);
  4430. /* init rlc */
  4431. r = gfx_v7_0_rlc_resume(adev);
  4432. if (r)
  4433. return r;
  4434. r = gfx_v7_0_cp_resume(adev);
  4435. if (r)
  4436. return r;
  4437. adev->gfx.ce_ram_size = 0x8000;
  4438. return r;
  4439. }
  4440. static int gfx_v7_0_hw_fini(void *handle)
  4441. {
  4442. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4443. gfx_v7_0_cp_enable(adev, false);
  4444. gfx_v7_0_rlc_stop(adev);
  4445. gfx_v7_0_fini_pg(adev);
  4446. return 0;
  4447. }
  4448. static int gfx_v7_0_suspend(void *handle)
  4449. {
  4450. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4451. return gfx_v7_0_hw_fini(adev);
  4452. }
  4453. static int gfx_v7_0_resume(void *handle)
  4454. {
  4455. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4456. return gfx_v7_0_hw_init(adev);
  4457. }
  4458. static bool gfx_v7_0_is_idle(void *handle)
  4459. {
  4460. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4461. if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
  4462. return false;
  4463. else
  4464. return true;
  4465. }
  4466. static int gfx_v7_0_wait_for_idle(void *handle)
  4467. {
  4468. unsigned i;
  4469. u32 tmp;
  4470. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4471. for (i = 0; i < adev->usec_timeout; i++) {
  4472. /* read MC_STATUS */
  4473. tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
  4474. if (!tmp)
  4475. return 0;
  4476. udelay(1);
  4477. }
  4478. return -ETIMEDOUT;
  4479. }
  4480. static void gfx_v7_0_print_status(void *handle)
  4481. {
  4482. int i;
  4483. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4484. dev_info(adev->dev, "GFX 7.x registers\n");
  4485. dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
  4486. RREG32(mmGRBM_STATUS));
  4487. dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
  4488. RREG32(mmGRBM_STATUS2));
  4489. dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  4490. RREG32(mmGRBM_STATUS_SE0));
  4491. dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  4492. RREG32(mmGRBM_STATUS_SE1));
  4493. dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
  4494. RREG32(mmGRBM_STATUS_SE2));
  4495. dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
  4496. RREG32(mmGRBM_STATUS_SE3));
  4497. dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
  4498. dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
  4499. RREG32(mmCP_STALLED_STAT1));
  4500. dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
  4501. RREG32(mmCP_STALLED_STAT2));
  4502. dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
  4503. RREG32(mmCP_STALLED_STAT3));
  4504. dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
  4505. RREG32(mmCP_CPF_BUSY_STAT));
  4506. dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
  4507. RREG32(mmCP_CPF_STALLED_STAT1));
  4508. dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
  4509. dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
  4510. dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
  4511. RREG32(mmCP_CPC_STALLED_STAT1));
  4512. dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
  4513. for (i = 0; i < 32; i++) {
  4514. dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
  4515. i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
  4516. }
  4517. for (i = 0; i < 16; i++) {
  4518. dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
  4519. i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
  4520. }
  4521. for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
  4522. dev_info(adev->dev, " se: %d\n", i);
  4523. gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
  4524. dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
  4525. RREG32(mmPA_SC_RASTER_CONFIG));
  4526. dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
  4527. RREG32(mmPA_SC_RASTER_CONFIG_1));
  4528. }
  4529. gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
  4530. dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
  4531. RREG32(mmGB_ADDR_CONFIG));
  4532. dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n",
  4533. RREG32(mmHDP_ADDR_CONFIG));
  4534. dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
  4535. RREG32(mmDMIF_ADDR_CALC));
  4536. dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n",
  4537. RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
  4538. dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n",
  4539. RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
  4540. dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
  4541. RREG32(mmUVD_UDEC_ADDR_CONFIG));
  4542. dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
  4543. RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
  4544. dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
  4545. RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
  4546. dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
  4547. RREG32(mmCP_MEQ_THRESHOLDS));
  4548. dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
  4549. RREG32(mmSX_DEBUG_1));
  4550. dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
  4551. RREG32(mmTA_CNTL_AUX));
  4552. dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
  4553. RREG32(mmSPI_CONFIG_CNTL));
  4554. dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
  4555. RREG32(mmSQ_CONFIG));
  4556. dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
  4557. RREG32(mmDB_DEBUG));
  4558. dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
  4559. RREG32(mmDB_DEBUG2));
  4560. dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
  4561. RREG32(mmDB_DEBUG3));
  4562. dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
  4563. RREG32(mmCB_HW_CONTROL));
  4564. dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
  4565. RREG32(mmSPI_CONFIG_CNTL_1));
  4566. dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
  4567. RREG32(mmPA_SC_FIFO_SIZE));
  4568. dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
  4569. RREG32(mmVGT_NUM_INSTANCES));
  4570. dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
  4571. RREG32(mmCP_PERFMON_CNTL));
  4572. dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
  4573. RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
  4574. dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
  4575. RREG32(mmVGT_CACHE_INVALIDATION));
  4576. dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
  4577. RREG32(mmVGT_GS_VERTEX_REUSE));
  4578. dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
  4579. RREG32(mmPA_SC_LINE_STIPPLE_STATE));
  4580. dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
  4581. RREG32(mmPA_CL_ENHANCE));
  4582. dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
  4583. RREG32(mmPA_SC_ENHANCE));
  4584. dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
  4585. RREG32(mmCP_ME_CNTL));
  4586. dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
  4587. RREG32(mmCP_MAX_CONTEXT));
  4588. dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n",
  4589. RREG32(mmCP_ENDIAN_SWAP));
  4590. dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
  4591. RREG32(mmCP_DEVICE_ID));
  4592. dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
  4593. RREG32(mmCP_SEM_WAIT_TIMER));
  4594. if (adev->asic_type != CHIP_HAWAII)
  4595. dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
  4596. RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL));
  4597. dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
  4598. RREG32(mmCP_RB_WPTR_DELAY));
  4599. dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
  4600. RREG32(mmCP_RB_VMID));
  4601. dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
  4602. RREG32(mmCP_RB0_CNTL));
  4603. dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
  4604. RREG32(mmCP_RB0_WPTR));
  4605. dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
  4606. RREG32(mmCP_RB0_RPTR_ADDR));
  4607. dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
  4608. RREG32(mmCP_RB0_RPTR_ADDR_HI));
  4609. dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
  4610. RREG32(mmCP_RB0_CNTL));
  4611. dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
  4612. RREG32(mmCP_RB0_BASE));
  4613. dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
  4614. RREG32(mmCP_RB0_BASE_HI));
  4615. dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
  4616. RREG32(mmCP_MEC_CNTL));
  4617. dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n",
  4618. RREG32(mmCP_CPF_DEBUG));
  4619. dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
  4620. RREG32(mmSCRATCH_ADDR));
  4621. dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
  4622. RREG32(mmSCRATCH_UMSK));
  4623. /* init the pipes */
  4624. mutex_lock(&adev->srbm_mutex);
  4625. for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
  4626. int me = (i < 4) ? 1 : 2;
  4627. int pipe = (i < 4) ? i : (i - 4);
  4628. int queue;
  4629. dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe);
  4630. cik_srbm_select(adev, me, pipe, 0, 0);
  4631. dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n",
  4632. RREG32(mmCP_HPD_EOP_BASE_ADDR));
  4633. dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
  4634. RREG32(mmCP_HPD_EOP_BASE_ADDR_HI));
  4635. dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n",
  4636. RREG32(mmCP_HPD_EOP_VMID));
  4637. dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
  4638. RREG32(mmCP_HPD_EOP_CONTROL));
  4639. for (queue = 0; queue < 8; queue++) {
  4640. cik_srbm_select(adev, me, pipe, queue, 0);
  4641. dev_info(adev->dev, " queue: %d\n", queue);
  4642. dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
  4643. RREG32(mmCP_PQ_WPTR_POLL_CNTL));
  4644. dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
  4645. RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
  4646. dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n",
  4647. RREG32(mmCP_HQD_ACTIVE));
  4648. dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
  4649. RREG32(mmCP_HQD_DEQUEUE_REQUEST));
  4650. dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n",
  4651. RREG32(mmCP_HQD_PQ_RPTR));
  4652. dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
  4653. RREG32(mmCP_HQD_PQ_WPTR));
  4654. dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n",
  4655. RREG32(mmCP_HQD_PQ_BASE));
  4656. dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n",
  4657. RREG32(mmCP_HQD_PQ_BASE_HI));
  4658. dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n",
  4659. RREG32(mmCP_HQD_PQ_CONTROL));
  4660. dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
  4661. RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR));
  4662. dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
  4663. RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI));
  4664. dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
  4665. RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR));
  4666. dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
  4667. RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI));
  4668. dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
  4669. RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
  4670. dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
  4671. RREG32(mmCP_HQD_PQ_WPTR));
  4672. dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n",
  4673. RREG32(mmCP_HQD_VMID));
  4674. dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n",
  4675. RREG32(mmCP_MQD_BASE_ADDR));
  4676. dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n",
  4677. RREG32(mmCP_MQD_BASE_ADDR_HI));
  4678. dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n",
  4679. RREG32(mmCP_MQD_CONTROL));
  4680. }
  4681. }
  4682. cik_srbm_select(adev, 0, 0, 0, 0);
  4683. mutex_unlock(&adev->srbm_mutex);
  4684. dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
  4685. RREG32(mmCP_INT_CNTL_RING0));
  4686. dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
  4687. RREG32(mmRLC_LB_CNTL));
  4688. dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
  4689. RREG32(mmRLC_CNTL));
  4690. dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
  4691. RREG32(mmRLC_CGCG_CGLS_CTRL));
  4692. dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
  4693. RREG32(mmRLC_LB_CNTR_INIT));
  4694. dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
  4695. RREG32(mmRLC_LB_CNTR_MAX));
  4696. dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
  4697. RREG32(mmRLC_LB_INIT_CU_MASK));
  4698. dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
  4699. RREG32(mmRLC_LB_PARAMS));
  4700. dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
  4701. RREG32(mmRLC_LB_CNTL));
  4702. dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n",
  4703. RREG32(mmRLC_MC_CNTL));
  4704. dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
  4705. RREG32(mmRLC_UCODE_CNTL));
  4706. if (adev->asic_type == CHIP_BONAIRE)
  4707. dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
  4708. RREG32(mmRLC_DRIVER_CPDMA_STATUS));
  4709. mutex_lock(&adev->srbm_mutex);
  4710. for (i = 0; i < 16; i++) {
  4711. cik_srbm_select(adev, 0, 0, 0, i);
  4712. dev_info(adev->dev, " VM %d:\n", i);
  4713. dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
  4714. RREG32(mmSH_MEM_CONFIG));
  4715. dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n",
  4716. RREG32(mmSH_MEM_APE1_BASE));
  4717. dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n",
  4718. RREG32(mmSH_MEM_APE1_LIMIT));
  4719. dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
  4720. RREG32(mmSH_MEM_BASES));
  4721. }
  4722. cik_srbm_select(adev, 0, 0, 0, 0);
  4723. mutex_unlock(&adev->srbm_mutex);
  4724. }
  4725. static int gfx_v7_0_soft_reset(void *handle)
  4726. {
  4727. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  4728. u32 tmp;
  4729. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4730. /* GRBM_STATUS */
  4731. tmp = RREG32(mmGRBM_STATUS);
  4732. if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
  4733. GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
  4734. GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
  4735. GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
  4736. GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
  4737. GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
  4738. grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
  4739. GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
  4740. if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
  4741. grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
  4742. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
  4743. }
  4744. /* GRBM_STATUS2 */
  4745. tmp = RREG32(mmGRBM_STATUS2);
  4746. if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
  4747. grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
  4748. /* SRBM_STATUS */
  4749. tmp = RREG32(mmSRBM_STATUS);
  4750. if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
  4751. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
  4752. if (grbm_soft_reset || srbm_soft_reset) {
  4753. gfx_v7_0_print_status((void *)adev);
  4754. /* disable CG/PG */
  4755. gfx_v7_0_fini_pg(adev);
  4756. gfx_v7_0_update_cg(adev, false);
  4757. /* stop the rlc */
  4758. gfx_v7_0_rlc_stop(adev);
  4759. /* Disable GFX parsing/prefetching */
  4760. WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
  4761. /* Disable MEC parsing/prefetching */
  4762. WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
  4763. if (grbm_soft_reset) {
  4764. tmp = RREG32(mmGRBM_SOFT_RESET);
  4765. tmp |= grbm_soft_reset;
  4766. dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  4767. WREG32(mmGRBM_SOFT_RESET, tmp);
  4768. tmp = RREG32(mmGRBM_SOFT_RESET);
  4769. udelay(50);
  4770. tmp &= ~grbm_soft_reset;
  4771. WREG32(mmGRBM_SOFT_RESET, tmp);
  4772. tmp = RREG32(mmGRBM_SOFT_RESET);
  4773. }
  4774. if (srbm_soft_reset) {
  4775. tmp = RREG32(mmSRBM_SOFT_RESET);
  4776. tmp |= srbm_soft_reset;
  4777. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  4778. WREG32(mmSRBM_SOFT_RESET, tmp);
  4779. tmp = RREG32(mmSRBM_SOFT_RESET);
  4780. udelay(50);
  4781. tmp &= ~srbm_soft_reset;
  4782. WREG32(mmSRBM_SOFT_RESET, tmp);
  4783. tmp = RREG32(mmSRBM_SOFT_RESET);
  4784. }
  4785. /* Wait a little for things to settle down */
  4786. udelay(50);
  4787. gfx_v7_0_print_status((void *)adev);
  4788. }
  4789. return 0;
  4790. }
  4791. static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
  4792. enum amdgpu_interrupt_state state)
  4793. {
  4794. u32 cp_int_cntl;
  4795. switch (state) {
  4796. case AMDGPU_IRQ_STATE_DISABLE:
  4797. cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
  4798. cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
  4799. WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
  4800. break;
  4801. case AMDGPU_IRQ_STATE_ENABLE:
  4802. cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
  4803. cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
  4804. WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
  4805. break;
  4806. default:
  4807. break;
  4808. }
  4809. }
  4810. static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
  4811. int me, int pipe,
  4812. enum amdgpu_interrupt_state state)
  4813. {
  4814. u32 mec_int_cntl, mec_int_cntl_reg;
  4815. /*
  4816. * amdgpu controls only pipe 0 of MEC1. That's why this function only
  4817. * handles the setting of interrupts for this specific pipe. All other
  4818. * pipes' interrupts are set by amdkfd.
  4819. */
  4820. if (me == 1) {
  4821. switch (pipe) {
  4822. case 0:
  4823. mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
  4824. break;
  4825. default:
  4826. DRM_DEBUG("invalid pipe %d\n", pipe);
  4827. return;
  4828. }
  4829. } else {
  4830. DRM_DEBUG("invalid me %d\n", me);
  4831. return;
  4832. }
  4833. switch (state) {
  4834. case AMDGPU_IRQ_STATE_DISABLE:
  4835. mec_int_cntl = RREG32(mec_int_cntl_reg);
  4836. mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
  4837. WREG32(mec_int_cntl_reg, mec_int_cntl);
  4838. break;
  4839. case AMDGPU_IRQ_STATE_ENABLE:
  4840. mec_int_cntl = RREG32(mec_int_cntl_reg);
  4841. mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
  4842. WREG32(mec_int_cntl_reg, mec_int_cntl);
  4843. break;
  4844. default:
  4845. break;
  4846. }
  4847. }
  4848. static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
  4849. struct amdgpu_irq_src *src,
  4850. unsigned type,
  4851. enum amdgpu_interrupt_state state)
  4852. {
  4853. u32 cp_int_cntl;
  4854. switch (state) {
  4855. case AMDGPU_IRQ_STATE_DISABLE:
  4856. cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
  4857. cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
  4858. WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
  4859. break;
  4860. case AMDGPU_IRQ_STATE_ENABLE:
  4861. cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
  4862. cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
  4863. WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
  4864. break;
  4865. default:
  4866. break;
  4867. }
  4868. return 0;
  4869. }
  4870. static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
  4871. struct amdgpu_irq_src *src,
  4872. unsigned type,
  4873. enum amdgpu_interrupt_state state)
  4874. {
  4875. u32 cp_int_cntl;
  4876. switch (state) {
  4877. case AMDGPU_IRQ_STATE_DISABLE:
  4878. cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
  4879. cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
  4880. WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
  4881. break;
  4882. case AMDGPU_IRQ_STATE_ENABLE:
  4883. cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
  4884. cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
  4885. WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
  4886. break;
  4887. default:
  4888. break;
  4889. }
  4890. return 0;
  4891. }
  4892. static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
  4893. struct amdgpu_irq_src *src,
  4894. unsigned type,
  4895. enum amdgpu_interrupt_state state)
  4896. {
  4897. switch (type) {
  4898. case AMDGPU_CP_IRQ_GFX_EOP:
  4899. gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
  4900. break;
  4901. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
  4902. gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
  4903. break;
  4904. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
  4905. gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
  4906. break;
  4907. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
  4908. gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
  4909. break;
  4910. case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
  4911. gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
  4912. break;
  4913. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
  4914. gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
  4915. break;
  4916. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
  4917. gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
  4918. break;
  4919. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
  4920. gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
  4921. break;
  4922. case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
  4923. gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
  4924. break;
  4925. default:
  4926. break;
  4927. }
  4928. return 0;
  4929. }
  4930. static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
  4931. struct amdgpu_irq_src *source,
  4932. struct amdgpu_iv_entry *entry)
  4933. {
  4934. u8 me_id, pipe_id;
  4935. struct amdgpu_ring *ring;
  4936. int i;
  4937. DRM_DEBUG("IH: CP EOP\n");
  4938. me_id = (entry->ring_id & 0x0c) >> 2;
  4939. pipe_id = (entry->ring_id & 0x03) >> 0;
  4940. switch (me_id) {
  4941. case 0:
  4942. amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
  4943. break;
  4944. case 1:
  4945. case 2:
  4946. for (i = 0; i < adev->gfx.num_compute_rings; i++) {
  4947. ring = &adev->gfx.compute_ring[i];
  4948. if ((ring->me == me_id) & (ring->pipe == pipe_id))
  4949. amdgpu_fence_process(ring);
  4950. }
  4951. break;
  4952. }
  4953. return 0;
  4954. }
  4955. static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
  4956. struct amdgpu_irq_src *source,
  4957. struct amdgpu_iv_entry *entry)
  4958. {
  4959. DRM_ERROR("Illegal register access in command stream\n");
  4960. schedule_work(&adev->reset_work);
  4961. return 0;
  4962. }
  4963. static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
  4964. struct amdgpu_irq_src *source,
  4965. struct amdgpu_iv_entry *entry)
  4966. {
  4967. DRM_ERROR("Illegal instruction in command stream\n");
  4968. // XXX soft reset the gfx block only
  4969. schedule_work(&adev->reset_work);
  4970. return 0;
  4971. }
  4972. static int gfx_v7_0_set_clockgating_state(void *handle,
  4973. enum amd_clockgating_state state)
  4974. {
  4975. bool gate = false;
  4976. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4977. if (state == AMD_CG_STATE_GATE)
  4978. gate = true;
  4979. gfx_v7_0_enable_gui_idle_interrupt(adev, false);
  4980. /* order matters! */
  4981. if (gate) {
  4982. gfx_v7_0_enable_mgcg(adev, true);
  4983. gfx_v7_0_enable_cgcg(adev, true);
  4984. } else {
  4985. gfx_v7_0_enable_cgcg(adev, false);
  4986. gfx_v7_0_enable_mgcg(adev, false);
  4987. }
  4988. gfx_v7_0_enable_gui_idle_interrupt(adev, true);
  4989. return 0;
  4990. }
  4991. static int gfx_v7_0_set_powergating_state(void *handle,
  4992. enum amd_powergating_state state)
  4993. {
  4994. bool gate = false;
  4995. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  4996. if (state == AMD_PG_STATE_GATE)
  4997. gate = true;
  4998. if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
  4999. AMDGPU_PG_SUPPORT_GFX_SMG |
  5000. AMDGPU_PG_SUPPORT_GFX_DMG |
  5001. AMDGPU_PG_SUPPORT_CP |
  5002. AMDGPU_PG_SUPPORT_GDS |
  5003. AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
  5004. gfx_v7_0_update_gfx_pg(adev, gate);
  5005. if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
  5006. gfx_v7_0_enable_cp_pg(adev, gate);
  5007. gfx_v7_0_enable_gds_pg(adev, gate);
  5008. }
  5009. }
  5010. return 0;
  5011. }
  5012. const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
  5013. .early_init = gfx_v7_0_early_init,
  5014. .late_init = NULL,
  5015. .sw_init = gfx_v7_0_sw_init,
  5016. .sw_fini = gfx_v7_0_sw_fini,
  5017. .hw_init = gfx_v7_0_hw_init,
  5018. .hw_fini = gfx_v7_0_hw_fini,
  5019. .suspend = gfx_v7_0_suspend,
  5020. .resume = gfx_v7_0_resume,
  5021. .is_idle = gfx_v7_0_is_idle,
  5022. .wait_for_idle = gfx_v7_0_wait_for_idle,
  5023. .soft_reset = gfx_v7_0_soft_reset,
  5024. .print_status = gfx_v7_0_print_status,
  5025. .set_clockgating_state = gfx_v7_0_set_clockgating_state,
  5026. .set_powergating_state = gfx_v7_0_set_powergating_state,
  5027. };
  5028. static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
  5029. .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
  5030. .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
  5031. .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
  5032. .parse_cs = NULL,
  5033. .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
  5034. .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
  5035. .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
  5036. .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
  5037. .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
  5038. .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
  5039. .test_ring = gfx_v7_0_ring_test_ring,
  5040. .test_ib = gfx_v7_0_ring_test_ib,
  5041. .insert_nop = amdgpu_ring_insert_nop,
  5042. };
  5043. static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
  5044. .get_rptr = gfx_v7_0_ring_get_rptr_compute,
  5045. .get_wptr = gfx_v7_0_ring_get_wptr_compute,
  5046. .set_wptr = gfx_v7_0_ring_set_wptr_compute,
  5047. .parse_cs = NULL,
  5048. .emit_ib = gfx_v7_0_ring_emit_ib_compute,
  5049. .emit_fence = gfx_v7_0_ring_emit_fence_compute,
  5050. .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
  5051. .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
  5052. .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
  5053. .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
  5054. .test_ring = gfx_v7_0_ring_test_ring,
  5055. .test_ib = gfx_v7_0_ring_test_ib,
  5056. .insert_nop = amdgpu_ring_insert_nop,
  5057. };
  5058. static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
  5059. {
  5060. int i;
  5061. for (i = 0; i < adev->gfx.num_gfx_rings; i++)
  5062. adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
  5063. for (i = 0; i < adev->gfx.num_compute_rings; i++)
  5064. adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
  5065. }
  5066. static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
  5067. .set = gfx_v7_0_set_eop_interrupt_state,
  5068. .process = gfx_v7_0_eop_irq,
  5069. };
  5070. static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
  5071. .set = gfx_v7_0_set_priv_reg_fault_state,
  5072. .process = gfx_v7_0_priv_reg_irq,
  5073. };
  5074. static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
  5075. .set = gfx_v7_0_set_priv_inst_fault_state,
  5076. .process = gfx_v7_0_priv_inst_irq,
  5077. };
  5078. static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
  5079. {
  5080. adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
  5081. adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
  5082. adev->gfx.priv_reg_irq.num_types = 1;
  5083. adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
  5084. adev->gfx.priv_inst_irq.num_types = 1;
  5085. adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
  5086. }
  5087. static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
  5088. {
  5089. /* init asci gds info */
  5090. adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
  5091. adev->gds.gws.total_size = 64;
  5092. adev->gds.oa.total_size = 16;
  5093. if (adev->gds.mem.total_size == 64 * 1024) {
  5094. adev->gds.mem.gfx_partition_size = 4096;
  5095. adev->gds.mem.cs_partition_size = 4096;
  5096. adev->gds.gws.gfx_partition_size = 4;
  5097. adev->gds.gws.cs_partition_size = 4;
  5098. adev->gds.oa.gfx_partition_size = 4;
  5099. adev->gds.oa.cs_partition_size = 1;
  5100. } else {
  5101. adev->gds.mem.gfx_partition_size = 1024;
  5102. adev->gds.mem.cs_partition_size = 1024;
  5103. adev->gds.gws.gfx_partition_size = 16;
  5104. adev->gds.gws.cs_partition_size = 16;
  5105. adev->gds.oa.gfx_partition_size = 4;
  5106. adev->gds.oa.cs_partition_size = 4;
  5107. }
  5108. }
  5109. int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
  5110. struct amdgpu_cu_info *cu_info)
  5111. {
  5112. int i, j, k, counter, active_cu_number = 0;
  5113. u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
  5114. if (!adev || !cu_info)
  5115. return -EINVAL;
  5116. mutex_lock(&adev->grbm_idx_mutex);
  5117. for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
  5118. for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
  5119. mask = 1;
  5120. ao_bitmap = 0;
  5121. counter = 0;
  5122. bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j);
  5123. cu_info->bitmap[i][j] = bitmap;
  5124. for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
  5125. if (bitmap & mask) {
  5126. if (counter < 2)
  5127. ao_bitmap |= mask;
  5128. counter ++;
  5129. }
  5130. mask <<= 1;
  5131. }
  5132. active_cu_number += counter;
  5133. ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
  5134. }
  5135. }
  5136. cu_info->number = active_cu_number;
  5137. cu_info->ao_cu_mask = ao_cu_mask;
  5138. mutex_unlock(&adev->grbm_idx_mutex);
  5139. return 0;
  5140. }