cik.c 278 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740
  1. /*
  2. * Copyright 2012 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include "drmP.h"
  28. #include "radeon.h"
  29. #include "radeon_asic.h"
  30. #include "radeon_audio.h"
  31. #include "cikd.h"
  32. #include "atom.h"
  33. #include "cik_blit_shaders.h"
  34. #include "radeon_ucode.h"
  35. #include "clearstate_ci.h"
  36. #include "radeon_kfd.h"
  37. MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
  38. MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
  39. MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
  40. MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
  41. MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
  42. MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
  43. MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
  44. MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
  45. MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
  46. MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
  47. MODULE_FIRMWARE("radeon/bonaire_me.bin");
  48. MODULE_FIRMWARE("radeon/bonaire_ce.bin");
  49. MODULE_FIRMWARE("radeon/bonaire_mec.bin");
  50. MODULE_FIRMWARE("radeon/bonaire_mc.bin");
  51. MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
  52. MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
  53. MODULE_FIRMWARE("radeon/bonaire_smc.bin");
  54. MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
  55. MODULE_FIRMWARE("radeon/HAWAII_me.bin");
  56. MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
  57. MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
  58. MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
  59. MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
  60. MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
  61. MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
  62. MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
  63. MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
  64. MODULE_FIRMWARE("radeon/hawaii_me.bin");
  65. MODULE_FIRMWARE("radeon/hawaii_ce.bin");
  66. MODULE_FIRMWARE("radeon/hawaii_mec.bin");
  67. MODULE_FIRMWARE("radeon/hawaii_mc.bin");
  68. MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
  69. MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
  70. MODULE_FIRMWARE("radeon/hawaii_smc.bin");
  71. MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
  72. MODULE_FIRMWARE("radeon/KAVERI_me.bin");
  73. MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
  74. MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
  75. MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
  76. MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
  77. MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
  78. MODULE_FIRMWARE("radeon/kaveri_me.bin");
  79. MODULE_FIRMWARE("radeon/kaveri_ce.bin");
  80. MODULE_FIRMWARE("radeon/kaveri_mec.bin");
  81. MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
  82. MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
  83. MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
  84. MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
  85. MODULE_FIRMWARE("radeon/KABINI_me.bin");
  86. MODULE_FIRMWARE("radeon/KABINI_ce.bin");
  87. MODULE_FIRMWARE("radeon/KABINI_mec.bin");
  88. MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
  89. MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
  90. MODULE_FIRMWARE("radeon/kabini_pfp.bin");
  91. MODULE_FIRMWARE("radeon/kabini_me.bin");
  92. MODULE_FIRMWARE("radeon/kabini_ce.bin");
  93. MODULE_FIRMWARE("radeon/kabini_mec.bin");
  94. MODULE_FIRMWARE("radeon/kabini_rlc.bin");
  95. MODULE_FIRMWARE("radeon/kabini_sdma.bin");
  96. MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
  97. MODULE_FIRMWARE("radeon/MULLINS_me.bin");
  98. MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
  99. MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
  100. MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
  101. MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
  102. MODULE_FIRMWARE("radeon/mullins_pfp.bin");
  103. MODULE_FIRMWARE("radeon/mullins_me.bin");
  104. MODULE_FIRMWARE("radeon/mullins_ce.bin");
  105. MODULE_FIRMWARE("radeon/mullins_mec.bin");
  106. MODULE_FIRMWARE("radeon/mullins_rlc.bin");
  107. MODULE_FIRMWARE("radeon/mullins_sdma.bin");
  108. extern int r600_ih_ring_alloc(struct radeon_device *rdev);
  109. extern void r600_ih_ring_fini(struct radeon_device *rdev);
  110. extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
  111. extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
  112. extern bool evergreen_is_display_hung(struct radeon_device *rdev);
  113. extern void sumo_rlc_fini(struct radeon_device *rdev);
  114. extern int sumo_rlc_init(struct radeon_device *rdev);
  115. extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
  116. extern void si_rlc_reset(struct radeon_device *rdev);
  117. extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
  118. static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
  119. extern int cik_sdma_resume(struct radeon_device *rdev);
  120. extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
  121. extern void cik_sdma_fini(struct radeon_device *rdev);
  122. extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable);
  123. static void cik_rlc_stop(struct radeon_device *rdev);
  124. static void cik_pcie_gen3_enable(struct radeon_device *rdev);
  125. static void cik_program_aspm(struct radeon_device *rdev);
  126. static void cik_init_pg(struct radeon_device *rdev);
  127. static void cik_init_cg(struct radeon_device *rdev);
  128. static void cik_fini_pg(struct radeon_device *rdev);
  129. static void cik_fini_cg(struct radeon_device *rdev);
  130. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  131. bool enable);
  132. /**
  133. * cik_get_allowed_info_register - fetch the register for the info ioctl
  134. *
  135. * @rdev: radeon_device pointer
  136. * @reg: register offset in bytes
  137. * @val: register value
  138. *
  139. * Returns 0 for success or -EINVAL for an invalid register
  140. *
  141. */
  142. int cik_get_allowed_info_register(struct radeon_device *rdev,
  143. u32 reg, u32 *val)
  144. {
  145. switch (reg) {
  146. case GRBM_STATUS:
  147. case GRBM_STATUS2:
  148. case GRBM_STATUS_SE0:
  149. case GRBM_STATUS_SE1:
  150. case GRBM_STATUS_SE2:
  151. case GRBM_STATUS_SE3:
  152. case SRBM_STATUS:
  153. case SRBM_STATUS2:
  154. case (SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET):
  155. case (SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET):
  156. case UVD_STATUS:
  157. /* TODO VCE */
  158. *val = RREG32(reg);
  159. return 0;
  160. default:
  161. return -EINVAL;
  162. }
  163. }
  164. /*
  165. * Indirect registers accessor
  166. */
  167. u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
  168. {
  169. unsigned long flags;
  170. u32 r;
  171. spin_lock_irqsave(&rdev->didt_idx_lock, flags);
  172. WREG32(CIK_DIDT_IND_INDEX, (reg));
  173. r = RREG32(CIK_DIDT_IND_DATA);
  174. spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
  175. return r;
  176. }
  177. void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  178. {
  179. unsigned long flags;
  180. spin_lock_irqsave(&rdev->didt_idx_lock, flags);
  181. WREG32(CIK_DIDT_IND_INDEX, (reg));
  182. WREG32(CIK_DIDT_IND_DATA, (v));
  183. spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
  184. }
  185. /* get temperature in millidegrees */
  186. int ci_get_temp(struct radeon_device *rdev)
  187. {
  188. u32 temp;
  189. int actual_temp = 0;
  190. temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
  191. CTF_TEMP_SHIFT;
  192. if (temp & 0x200)
  193. actual_temp = 255;
  194. else
  195. actual_temp = temp & 0x1ff;
  196. actual_temp = actual_temp * 1000;
  197. return actual_temp;
  198. }
  199. /* get temperature in millidegrees */
  200. int kv_get_temp(struct radeon_device *rdev)
  201. {
  202. u32 temp;
  203. int actual_temp = 0;
  204. temp = RREG32_SMC(0xC0300E0C);
  205. if (temp)
  206. actual_temp = (temp / 8) - 49;
  207. else
  208. actual_temp = 0;
  209. actual_temp = actual_temp * 1000;
  210. return actual_temp;
  211. }
  212. /*
  213. * Indirect registers accessor
  214. */
  215. u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
  216. {
  217. unsigned long flags;
  218. u32 r;
  219. spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
  220. WREG32(PCIE_INDEX, reg);
  221. (void)RREG32(PCIE_INDEX);
  222. r = RREG32(PCIE_DATA);
  223. spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
  224. return r;
  225. }
  226. void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  227. {
  228. unsigned long flags;
  229. spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
  230. WREG32(PCIE_INDEX, reg);
  231. (void)RREG32(PCIE_INDEX);
  232. WREG32(PCIE_DATA, v);
  233. (void)RREG32(PCIE_DATA);
  234. spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
  235. }
  236. static const u32 spectre_rlc_save_restore_register_list[] =
  237. {
  238. (0x0e00 << 16) | (0xc12c >> 2),
  239. 0x00000000,
  240. (0x0e00 << 16) | (0xc140 >> 2),
  241. 0x00000000,
  242. (0x0e00 << 16) | (0xc150 >> 2),
  243. 0x00000000,
  244. (0x0e00 << 16) | (0xc15c >> 2),
  245. 0x00000000,
  246. (0x0e00 << 16) | (0xc168 >> 2),
  247. 0x00000000,
  248. (0x0e00 << 16) | (0xc170 >> 2),
  249. 0x00000000,
  250. (0x0e00 << 16) | (0xc178 >> 2),
  251. 0x00000000,
  252. (0x0e00 << 16) | (0xc204 >> 2),
  253. 0x00000000,
  254. (0x0e00 << 16) | (0xc2b4 >> 2),
  255. 0x00000000,
  256. (0x0e00 << 16) | (0xc2b8 >> 2),
  257. 0x00000000,
  258. (0x0e00 << 16) | (0xc2bc >> 2),
  259. 0x00000000,
  260. (0x0e00 << 16) | (0xc2c0 >> 2),
  261. 0x00000000,
  262. (0x0e00 << 16) | (0x8228 >> 2),
  263. 0x00000000,
  264. (0x0e00 << 16) | (0x829c >> 2),
  265. 0x00000000,
  266. (0x0e00 << 16) | (0x869c >> 2),
  267. 0x00000000,
  268. (0x0600 << 16) | (0x98f4 >> 2),
  269. 0x00000000,
  270. (0x0e00 << 16) | (0x98f8 >> 2),
  271. 0x00000000,
  272. (0x0e00 << 16) | (0x9900 >> 2),
  273. 0x00000000,
  274. (0x0e00 << 16) | (0xc260 >> 2),
  275. 0x00000000,
  276. (0x0e00 << 16) | (0x90e8 >> 2),
  277. 0x00000000,
  278. (0x0e00 << 16) | (0x3c000 >> 2),
  279. 0x00000000,
  280. (0x0e00 << 16) | (0x3c00c >> 2),
  281. 0x00000000,
  282. (0x0e00 << 16) | (0x8c1c >> 2),
  283. 0x00000000,
  284. (0x0e00 << 16) | (0x9700 >> 2),
  285. 0x00000000,
  286. (0x0e00 << 16) | (0xcd20 >> 2),
  287. 0x00000000,
  288. (0x4e00 << 16) | (0xcd20 >> 2),
  289. 0x00000000,
  290. (0x5e00 << 16) | (0xcd20 >> 2),
  291. 0x00000000,
  292. (0x6e00 << 16) | (0xcd20 >> 2),
  293. 0x00000000,
  294. (0x7e00 << 16) | (0xcd20 >> 2),
  295. 0x00000000,
  296. (0x8e00 << 16) | (0xcd20 >> 2),
  297. 0x00000000,
  298. (0x9e00 << 16) | (0xcd20 >> 2),
  299. 0x00000000,
  300. (0xae00 << 16) | (0xcd20 >> 2),
  301. 0x00000000,
  302. (0xbe00 << 16) | (0xcd20 >> 2),
  303. 0x00000000,
  304. (0x0e00 << 16) | (0x89bc >> 2),
  305. 0x00000000,
  306. (0x0e00 << 16) | (0x8900 >> 2),
  307. 0x00000000,
  308. 0x3,
  309. (0x0e00 << 16) | (0xc130 >> 2),
  310. 0x00000000,
  311. (0x0e00 << 16) | (0xc134 >> 2),
  312. 0x00000000,
  313. (0x0e00 << 16) | (0xc1fc >> 2),
  314. 0x00000000,
  315. (0x0e00 << 16) | (0xc208 >> 2),
  316. 0x00000000,
  317. (0x0e00 << 16) | (0xc264 >> 2),
  318. 0x00000000,
  319. (0x0e00 << 16) | (0xc268 >> 2),
  320. 0x00000000,
  321. (0x0e00 << 16) | (0xc26c >> 2),
  322. 0x00000000,
  323. (0x0e00 << 16) | (0xc270 >> 2),
  324. 0x00000000,
  325. (0x0e00 << 16) | (0xc274 >> 2),
  326. 0x00000000,
  327. (0x0e00 << 16) | (0xc278 >> 2),
  328. 0x00000000,
  329. (0x0e00 << 16) | (0xc27c >> 2),
  330. 0x00000000,
  331. (0x0e00 << 16) | (0xc280 >> 2),
  332. 0x00000000,
  333. (0x0e00 << 16) | (0xc284 >> 2),
  334. 0x00000000,
  335. (0x0e00 << 16) | (0xc288 >> 2),
  336. 0x00000000,
  337. (0x0e00 << 16) | (0xc28c >> 2),
  338. 0x00000000,
  339. (0x0e00 << 16) | (0xc290 >> 2),
  340. 0x00000000,
  341. (0x0e00 << 16) | (0xc294 >> 2),
  342. 0x00000000,
  343. (0x0e00 << 16) | (0xc298 >> 2),
  344. 0x00000000,
  345. (0x0e00 << 16) | (0xc29c >> 2),
  346. 0x00000000,
  347. (0x0e00 << 16) | (0xc2a0 >> 2),
  348. 0x00000000,
  349. (0x0e00 << 16) | (0xc2a4 >> 2),
  350. 0x00000000,
  351. (0x0e00 << 16) | (0xc2a8 >> 2),
  352. 0x00000000,
  353. (0x0e00 << 16) | (0xc2ac >> 2),
  354. 0x00000000,
  355. (0x0e00 << 16) | (0xc2b0 >> 2),
  356. 0x00000000,
  357. (0x0e00 << 16) | (0x301d0 >> 2),
  358. 0x00000000,
  359. (0x0e00 << 16) | (0x30238 >> 2),
  360. 0x00000000,
  361. (0x0e00 << 16) | (0x30250 >> 2),
  362. 0x00000000,
  363. (0x0e00 << 16) | (0x30254 >> 2),
  364. 0x00000000,
  365. (0x0e00 << 16) | (0x30258 >> 2),
  366. 0x00000000,
  367. (0x0e00 << 16) | (0x3025c >> 2),
  368. 0x00000000,
  369. (0x4e00 << 16) | (0xc900 >> 2),
  370. 0x00000000,
  371. (0x5e00 << 16) | (0xc900 >> 2),
  372. 0x00000000,
  373. (0x6e00 << 16) | (0xc900 >> 2),
  374. 0x00000000,
  375. (0x7e00 << 16) | (0xc900 >> 2),
  376. 0x00000000,
  377. (0x8e00 << 16) | (0xc900 >> 2),
  378. 0x00000000,
  379. (0x9e00 << 16) | (0xc900 >> 2),
  380. 0x00000000,
  381. (0xae00 << 16) | (0xc900 >> 2),
  382. 0x00000000,
  383. (0xbe00 << 16) | (0xc900 >> 2),
  384. 0x00000000,
  385. (0x4e00 << 16) | (0xc904 >> 2),
  386. 0x00000000,
  387. (0x5e00 << 16) | (0xc904 >> 2),
  388. 0x00000000,
  389. (0x6e00 << 16) | (0xc904 >> 2),
  390. 0x00000000,
  391. (0x7e00 << 16) | (0xc904 >> 2),
  392. 0x00000000,
  393. (0x8e00 << 16) | (0xc904 >> 2),
  394. 0x00000000,
  395. (0x9e00 << 16) | (0xc904 >> 2),
  396. 0x00000000,
  397. (0xae00 << 16) | (0xc904 >> 2),
  398. 0x00000000,
  399. (0xbe00 << 16) | (0xc904 >> 2),
  400. 0x00000000,
  401. (0x4e00 << 16) | (0xc908 >> 2),
  402. 0x00000000,
  403. (0x5e00 << 16) | (0xc908 >> 2),
  404. 0x00000000,
  405. (0x6e00 << 16) | (0xc908 >> 2),
  406. 0x00000000,
  407. (0x7e00 << 16) | (0xc908 >> 2),
  408. 0x00000000,
  409. (0x8e00 << 16) | (0xc908 >> 2),
  410. 0x00000000,
  411. (0x9e00 << 16) | (0xc908 >> 2),
  412. 0x00000000,
  413. (0xae00 << 16) | (0xc908 >> 2),
  414. 0x00000000,
  415. (0xbe00 << 16) | (0xc908 >> 2),
  416. 0x00000000,
  417. (0x4e00 << 16) | (0xc90c >> 2),
  418. 0x00000000,
  419. (0x5e00 << 16) | (0xc90c >> 2),
  420. 0x00000000,
  421. (0x6e00 << 16) | (0xc90c >> 2),
  422. 0x00000000,
  423. (0x7e00 << 16) | (0xc90c >> 2),
  424. 0x00000000,
  425. (0x8e00 << 16) | (0xc90c >> 2),
  426. 0x00000000,
  427. (0x9e00 << 16) | (0xc90c >> 2),
  428. 0x00000000,
  429. (0xae00 << 16) | (0xc90c >> 2),
  430. 0x00000000,
  431. (0xbe00 << 16) | (0xc90c >> 2),
  432. 0x00000000,
  433. (0x4e00 << 16) | (0xc910 >> 2),
  434. 0x00000000,
  435. (0x5e00 << 16) | (0xc910 >> 2),
  436. 0x00000000,
  437. (0x6e00 << 16) | (0xc910 >> 2),
  438. 0x00000000,
  439. (0x7e00 << 16) | (0xc910 >> 2),
  440. 0x00000000,
  441. (0x8e00 << 16) | (0xc910 >> 2),
  442. 0x00000000,
  443. (0x9e00 << 16) | (0xc910 >> 2),
  444. 0x00000000,
  445. (0xae00 << 16) | (0xc910 >> 2),
  446. 0x00000000,
  447. (0xbe00 << 16) | (0xc910 >> 2),
  448. 0x00000000,
  449. (0x0e00 << 16) | (0xc99c >> 2),
  450. 0x00000000,
  451. (0x0e00 << 16) | (0x9834 >> 2),
  452. 0x00000000,
  453. (0x0000 << 16) | (0x30f00 >> 2),
  454. 0x00000000,
  455. (0x0001 << 16) | (0x30f00 >> 2),
  456. 0x00000000,
  457. (0x0000 << 16) | (0x30f04 >> 2),
  458. 0x00000000,
  459. (0x0001 << 16) | (0x30f04 >> 2),
  460. 0x00000000,
  461. (0x0000 << 16) | (0x30f08 >> 2),
  462. 0x00000000,
  463. (0x0001 << 16) | (0x30f08 >> 2),
  464. 0x00000000,
  465. (0x0000 << 16) | (0x30f0c >> 2),
  466. 0x00000000,
  467. (0x0001 << 16) | (0x30f0c >> 2),
  468. 0x00000000,
  469. (0x0600 << 16) | (0x9b7c >> 2),
  470. 0x00000000,
  471. (0x0e00 << 16) | (0x8a14 >> 2),
  472. 0x00000000,
  473. (0x0e00 << 16) | (0x8a18 >> 2),
  474. 0x00000000,
  475. (0x0600 << 16) | (0x30a00 >> 2),
  476. 0x00000000,
  477. (0x0e00 << 16) | (0x8bf0 >> 2),
  478. 0x00000000,
  479. (0x0e00 << 16) | (0x8bcc >> 2),
  480. 0x00000000,
  481. (0x0e00 << 16) | (0x8b24 >> 2),
  482. 0x00000000,
  483. (0x0e00 << 16) | (0x30a04 >> 2),
  484. 0x00000000,
  485. (0x0600 << 16) | (0x30a10 >> 2),
  486. 0x00000000,
  487. (0x0600 << 16) | (0x30a14 >> 2),
  488. 0x00000000,
  489. (0x0600 << 16) | (0x30a18 >> 2),
  490. 0x00000000,
  491. (0x0600 << 16) | (0x30a2c >> 2),
  492. 0x00000000,
  493. (0x0e00 << 16) | (0xc700 >> 2),
  494. 0x00000000,
  495. (0x0e00 << 16) | (0xc704 >> 2),
  496. 0x00000000,
  497. (0x0e00 << 16) | (0xc708 >> 2),
  498. 0x00000000,
  499. (0x0e00 << 16) | (0xc768 >> 2),
  500. 0x00000000,
  501. (0x0400 << 16) | (0xc770 >> 2),
  502. 0x00000000,
  503. (0x0400 << 16) | (0xc774 >> 2),
  504. 0x00000000,
  505. (0x0400 << 16) | (0xc778 >> 2),
  506. 0x00000000,
  507. (0x0400 << 16) | (0xc77c >> 2),
  508. 0x00000000,
  509. (0x0400 << 16) | (0xc780 >> 2),
  510. 0x00000000,
  511. (0x0400 << 16) | (0xc784 >> 2),
  512. 0x00000000,
  513. (0x0400 << 16) | (0xc788 >> 2),
  514. 0x00000000,
  515. (0x0400 << 16) | (0xc78c >> 2),
  516. 0x00000000,
  517. (0x0400 << 16) | (0xc798 >> 2),
  518. 0x00000000,
  519. (0x0400 << 16) | (0xc79c >> 2),
  520. 0x00000000,
  521. (0x0400 << 16) | (0xc7a0 >> 2),
  522. 0x00000000,
  523. (0x0400 << 16) | (0xc7a4 >> 2),
  524. 0x00000000,
  525. (0x0400 << 16) | (0xc7a8 >> 2),
  526. 0x00000000,
  527. (0x0400 << 16) | (0xc7ac >> 2),
  528. 0x00000000,
  529. (0x0400 << 16) | (0xc7b0 >> 2),
  530. 0x00000000,
  531. (0x0400 << 16) | (0xc7b4 >> 2),
  532. 0x00000000,
  533. (0x0e00 << 16) | (0x9100 >> 2),
  534. 0x00000000,
  535. (0x0e00 << 16) | (0x3c010 >> 2),
  536. 0x00000000,
  537. (0x0e00 << 16) | (0x92a8 >> 2),
  538. 0x00000000,
  539. (0x0e00 << 16) | (0x92ac >> 2),
  540. 0x00000000,
  541. (0x0e00 << 16) | (0x92b4 >> 2),
  542. 0x00000000,
  543. (0x0e00 << 16) | (0x92b8 >> 2),
  544. 0x00000000,
  545. (0x0e00 << 16) | (0x92bc >> 2),
  546. 0x00000000,
  547. (0x0e00 << 16) | (0x92c0 >> 2),
  548. 0x00000000,
  549. (0x0e00 << 16) | (0x92c4 >> 2),
  550. 0x00000000,
  551. (0x0e00 << 16) | (0x92c8 >> 2),
  552. 0x00000000,
  553. (0x0e00 << 16) | (0x92cc >> 2),
  554. 0x00000000,
  555. (0x0e00 << 16) | (0x92d0 >> 2),
  556. 0x00000000,
  557. (0x0e00 << 16) | (0x8c00 >> 2),
  558. 0x00000000,
  559. (0x0e00 << 16) | (0x8c04 >> 2),
  560. 0x00000000,
  561. (0x0e00 << 16) | (0x8c20 >> 2),
  562. 0x00000000,
  563. (0x0e00 << 16) | (0x8c38 >> 2),
  564. 0x00000000,
  565. (0x0e00 << 16) | (0x8c3c >> 2),
  566. 0x00000000,
  567. (0x0e00 << 16) | (0xae00 >> 2),
  568. 0x00000000,
  569. (0x0e00 << 16) | (0x9604 >> 2),
  570. 0x00000000,
  571. (0x0e00 << 16) | (0xac08 >> 2),
  572. 0x00000000,
  573. (0x0e00 << 16) | (0xac0c >> 2),
  574. 0x00000000,
  575. (0x0e00 << 16) | (0xac10 >> 2),
  576. 0x00000000,
  577. (0x0e00 << 16) | (0xac14 >> 2),
  578. 0x00000000,
  579. (0x0e00 << 16) | (0xac58 >> 2),
  580. 0x00000000,
  581. (0x0e00 << 16) | (0xac68 >> 2),
  582. 0x00000000,
  583. (0x0e00 << 16) | (0xac6c >> 2),
  584. 0x00000000,
  585. (0x0e00 << 16) | (0xac70 >> 2),
  586. 0x00000000,
  587. (0x0e00 << 16) | (0xac74 >> 2),
  588. 0x00000000,
  589. (0x0e00 << 16) | (0xac78 >> 2),
  590. 0x00000000,
  591. (0x0e00 << 16) | (0xac7c >> 2),
  592. 0x00000000,
  593. (0x0e00 << 16) | (0xac80 >> 2),
  594. 0x00000000,
  595. (0x0e00 << 16) | (0xac84 >> 2),
  596. 0x00000000,
  597. (0x0e00 << 16) | (0xac88 >> 2),
  598. 0x00000000,
  599. (0x0e00 << 16) | (0xac8c >> 2),
  600. 0x00000000,
  601. (0x0e00 << 16) | (0x970c >> 2),
  602. 0x00000000,
  603. (0x0e00 << 16) | (0x9714 >> 2),
  604. 0x00000000,
  605. (0x0e00 << 16) | (0x9718 >> 2),
  606. 0x00000000,
  607. (0x0e00 << 16) | (0x971c >> 2),
  608. 0x00000000,
  609. (0x0e00 << 16) | (0x31068 >> 2),
  610. 0x00000000,
  611. (0x4e00 << 16) | (0x31068 >> 2),
  612. 0x00000000,
  613. (0x5e00 << 16) | (0x31068 >> 2),
  614. 0x00000000,
  615. (0x6e00 << 16) | (0x31068 >> 2),
  616. 0x00000000,
  617. (0x7e00 << 16) | (0x31068 >> 2),
  618. 0x00000000,
  619. (0x8e00 << 16) | (0x31068 >> 2),
  620. 0x00000000,
  621. (0x9e00 << 16) | (0x31068 >> 2),
  622. 0x00000000,
  623. (0xae00 << 16) | (0x31068 >> 2),
  624. 0x00000000,
  625. (0xbe00 << 16) | (0x31068 >> 2),
  626. 0x00000000,
  627. (0x0e00 << 16) | (0xcd10 >> 2),
  628. 0x00000000,
  629. (0x0e00 << 16) | (0xcd14 >> 2),
  630. 0x00000000,
  631. (0x0e00 << 16) | (0x88b0 >> 2),
  632. 0x00000000,
  633. (0x0e00 << 16) | (0x88b4 >> 2),
  634. 0x00000000,
  635. (0x0e00 << 16) | (0x88b8 >> 2),
  636. 0x00000000,
  637. (0x0e00 << 16) | (0x88bc >> 2),
  638. 0x00000000,
  639. (0x0400 << 16) | (0x89c0 >> 2),
  640. 0x00000000,
  641. (0x0e00 << 16) | (0x88c4 >> 2),
  642. 0x00000000,
  643. (0x0e00 << 16) | (0x88c8 >> 2),
  644. 0x00000000,
  645. (0x0e00 << 16) | (0x88d0 >> 2),
  646. 0x00000000,
  647. (0x0e00 << 16) | (0x88d4 >> 2),
  648. 0x00000000,
  649. (0x0e00 << 16) | (0x88d8 >> 2),
  650. 0x00000000,
  651. (0x0e00 << 16) | (0x8980 >> 2),
  652. 0x00000000,
  653. (0x0e00 << 16) | (0x30938 >> 2),
  654. 0x00000000,
  655. (0x0e00 << 16) | (0x3093c >> 2),
  656. 0x00000000,
  657. (0x0e00 << 16) | (0x30940 >> 2),
  658. 0x00000000,
  659. (0x0e00 << 16) | (0x89a0 >> 2),
  660. 0x00000000,
  661. (0x0e00 << 16) | (0x30900 >> 2),
  662. 0x00000000,
  663. (0x0e00 << 16) | (0x30904 >> 2),
  664. 0x00000000,
  665. (0x0e00 << 16) | (0x89b4 >> 2),
  666. 0x00000000,
  667. (0x0e00 << 16) | (0x3c210 >> 2),
  668. 0x00000000,
  669. (0x0e00 << 16) | (0x3c214 >> 2),
  670. 0x00000000,
  671. (0x0e00 << 16) | (0x3c218 >> 2),
  672. 0x00000000,
  673. (0x0e00 << 16) | (0x8904 >> 2),
  674. 0x00000000,
  675. 0x5,
  676. (0x0e00 << 16) | (0x8c28 >> 2),
  677. (0x0e00 << 16) | (0x8c2c >> 2),
  678. (0x0e00 << 16) | (0x8c30 >> 2),
  679. (0x0e00 << 16) | (0x8c34 >> 2),
  680. (0x0e00 << 16) | (0x9600 >> 2),
  681. };
  682. static const u32 kalindi_rlc_save_restore_register_list[] =
  683. {
  684. (0x0e00 << 16) | (0xc12c >> 2),
  685. 0x00000000,
  686. (0x0e00 << 16) | (0xc140 >> 2),
  687. 0x00000000,
  688. (0x0e00 << 16) | (0xc150 >> 2),
  689. 0x00000000,
  690. (0x0e00 << 16) | (0xc15c >> 2),
  691. 0x00000000,
  692. (0x0e00 << 16) | (0xc168 >> 2),
  693. 0x00000000,
  694. (0x0e00 << 16) | (0xc170 >> 2),
  695. 0x00000000,
  696. (0x0e00 << 16) | (0xc204 >> 2),
  697. 0x00000000,
  698. (0x0e00 << 16) | (0xc2b4 >> 2),
  699. 0x00000000,
  700. (0x0e00 << 16) | (0xc2b8 >> 2),
  701. 0x00000000,
  702. (0x0e00 << 16) | (0xc2bc >> 2),
  703. 0x00000000,
  704. (0x0e00 << 16) | (0xc2c0 >> 2),
  705. 0x00000000,
  706. (0x0e00 << 16) | (0x8228 >> 2),
  707. 0x00000000,
  708. (0x0e00 << 16) | (0x829c >> 2),
  709. 0x00000000,
  710. (0x0e00 << 16) | (0x869c >> 2),
  711. 0x00000000,
  712. (0x0600 << 16) | (0x98f4 >> 2),
  713. 0x00000000,
  714. (0x0e00 << 16) | (0x98f8 >> 2),
  715. 0x00000000,
  716. (0x0e00 << 16) | (0x9900 >> 2),
  717. 0x00000000,
  718. (0x0e00 << 16) | (0xc260 >> 2),
  719. 0x00000000,
  720. (0x0e00 << 16) | (0x90e8 >> 2),
  721. 0x00000000,
  722. (0x0e00 << 16) | (0x3c000 >> 2),
  723. 0x00000000,
  724. (0x0e00 << 16) | (0x3c00c >> 2),
  725. 0x00000000,
  726. (0x0e00 << 16) | (0x8c1c >> 2),
  727. 0x00000000,
  728. (0x0e00 << 16) | (0x9700 >> 2),
  729. 0x00000000,
  730. (0x0e00 << 16) | (0xcd20 >> 2),
  731. 0x00000000,
  732. (0x4e00 << 16) | (0xcd20 >> 2),
  733. 0x00000000,
  734. (0x5e00 << 16) | (0xcd20 >> 2),
  735. 0x00000000,
  736. (0x6e00 << 16) | (0xcd20 >> 2),
  737. 0x00000000,
  738. (0x7e00 << 16) | (0xcd20 >> 2),
  739. 0x00000000,
  740. (0x0e00 << 16) | (0x89bc >> 2),
  741. 0x00000000,
  742. (0x0e00 << 16) | (0x8900 >> 2),
  743. 0x00000000,
  744. 0x3,
  745. (0x0e00 << 16) | (0xc130 >> 2),
  746. 0x00000000,
  747. (0x0e00 << 16) | (0xc134 >> 2),
  748. 0x00000000,
  749. (0x0e00 << 16) | (0xc1fc >> 2),
  750. 0x00000000,
  751. (0x0e00 << 16) | (0xc208 >> 2),
  752. 0x00000000,
  753. (0x0e00 << 16) | (0xc264 >> 2),
  754. 0x00000000,
  755. (0x0e00 << 16) | (0xc268 >> 2),
  756. 0x00000000,
  757. (0x0e00 << 16) | (0xc26c >> 2),
  758. 0x00000000,
  759. (0x0e00 << 16) | (0xc270 >> 2),
  760. 0x00000000,
  761. (0x0e00 << 16) | (0xc274 >> 2),
  762. 0x00000000,
  763. (0x0e00 << 16) | (0xc28c >> 2),
  764. 0x00000000,
  765. (0x0e00 << 16) | (0xc290 >> 2),
  766. 0x00000000,
  767. (0x0e00 << 16) | (0xc294 >> 2),
  768. 0x00000000,
  769. (0x0e00 << 16) | (0xc298 >> 2),
  770. 0x00000000,
  771. (0x0e00 << 16) | (0xc2a0 >> 2),
  772. 0x00000000,
  773. (0x0e00 << 16) | (0xc2a4 >> 2),
  774. 0x00000000,
  775. (0x0e00 << 16) | (0xc2a8 >> 2),
  776. 0x00000000,
  777. (0x0e00 << 16) | (0xc2ac >> 2),
  778. 0x00000000,
  779. (0x0e00 << 16) | (0x301d0 >> 2),
  780. 0x00000000,
  781. (0x0e00 << 16) | (0x30238 >> 2),
  782. 0x00000000,
  783. (0x0e00 << 16) | (0x30250 >> 2),
  784. 0x00000000,
  785. (0x0e00 << 16) | (0x30254 >> 2),
  786. 0x00000000,
  787. (0x0e00 << 16) | (0x30258 >> 2),
  788. 0x00000000,
  789. (0x0e00 << 16) | (0x3025c >> 2),
  790. 0x00000000,
  791. (0x4e00 << 16) | (0xc900 >> 2),
  792. 0x00000000,
  793. (0x5e00 << 16) | (0xc900 >> 2),
  794. 0x00000000,
  795. (0x6e00 << 16) | (0xc900 >> 2),
  796. 0x00000000,
  797. (0x7e00 << 16) | (0xc900 >> 2),
  798. 0x00000000,
  799. (0x4e00 << 16) | (0xc904 >> 2),
  800. 0x00000000,
  801. (0x5e00 << 16) | (0xc904 >> 2),
  802. 0x00000000,
  803. (0x6e00 << 16) | (0xc904 >> 2),
  804. 0x00000000,
  805. (0x7e00 << 16) | (0xc904 >> 2),
  806. 0x00000000,
  807. (0x4e00 << 16) | (0xc908 >> 2),
  808. 0x00000000,
  809. (0x5e00 << 16) | (0xc908 >> 2),
  810. 0x00000000,
  811. (0x6e00 << 16) | (0xc908 >> 2),
  812. 0x00000000,
  813. (0x7e00 << 16) | (0xc908 >> 2),
  814. 0x00000000,
  815. (0x4e00 << 16) | (0xc90c >> 2),
  816. 0x00000000,
  817. (0x5e00 << 16) | (0xc90c >> 2),
  818. 0x00000000,
  819. (0x6e00 << 16) | (0xc90c >> 2),
  820. 0x00000000,
  821. (0x7e00 << 16) | (0xc90c >> 2),
  822. 0x00000000,
  823. (0x4e00 << 16) | (0xc910 >> 2),
  824. 0x00000000,
  825. (0x5e00 << 16) | (0xc910 >> 2),
  826. 0x00000000,
  827. (0x6e00 << 16) | (0xc910 >> 2),
  828. 0x00000000,
  829. (0x7e00 << 16) | (0xc910 >> 2),
  830. 0x00000000,
  831. (0x0e00 << 16) | (0xc99c >> 2),
  832. 0x00000000,
  833. (0x0e00 << 16) | (0x9834 >> 2),
  834. 0x00000000,
  835. (0x0000 << 16) | (0x30f00 >> 2),
  836. 0x00000000,
  837. (0x0000 << 16) | (0x30f04 >> 2),
  838. 0x00000000,
  839. (0x0000 << 16) | (0x30f08 >> 2),
  840. 0x00000000,
  841. (0x0000 << 16) | (0x30f0c >> 2),
  842. 0x00000000,
  843. (0x0600 << 16) | (0x9b7c >> 2),
  844. 0x00000000,
  845. (0x0e00 << 16) | (0x8a14 >> 2),
  846. 0x00000000,
  847. (0x0e00 << 16) | (0x8a18 >> 2),
  848. 0x00000000,
  849. (0x0600 << 16) | (0x30a00 >> 2),
  850. 0x00000000,
  851. (0x0e00 << 16) | (0x8bf0 >> 2),
  852. 0x00000000,
  853. (0x0e00 << 16) | (0x8bcc >> 2),
  854. 0x00000000,
  855. (0x0e00 << 16) | (0x8b24 >> 2),
  856. 0x00000000,
  857. (0x0e00 << 16) | (0x30a04 >> 2),
  858. 0x00000000,
  859. (0x0600 << 16) | (0x30a10 >> 2),
  860. 0x00000000,
  861. (0x0600 << 16) | (0x30a14 >> 2),
  862. 0x00000000,
  863. (0x0600 << 16) | (0x30a18 >> 2),
  864. 0x00000000,
  865. (0x0600 << 16) | (0x30a2c >> 2),
  866. 0x00000000,
  867. (0x0e00 << 16) | (0xc700 >> 2),
  868. 0x00000000,
  869. (0x0e00 << 16) | (0xc704 >> 2),
  870. 0x00000000,
  871. (0x0e00 << 16) | (0xc708 >> 2),
  872. 0x00000000,
  873. (0x0e00 << 16) | (0xc768 >> 2),
  874. 0x00000000,
  875. (0x0400 << 16) | (0xc770 >> 2),
  876. 0x00000000,
  877. (0x0400 << 16) | (0xc774 >> 2),
  878. 0x00000000,
  879. (0x0400 << 16) | (0xc798 >> 2),
  880. 0x00000000,
  881. (0x0400 << 16) | (0xc79c >> 2),
  882. 0x00000000,
  883. (0x0e00 << 16) | (0x9100 >> 2),
  884. 0x00000000,
  885. (0x0e00 << 16) | (0x3c010 >> 2),
  886. 0x00000000,
  887. (0x0e00 << 16) | (0x8c00 >> 2),
  888. 0x00000000,
  889. (0x0e00 << 16) | (0x8c04 >> 2),
  890. 0x00000000,
  891. (0x0e00 << 16) | (0x8c20 >> 2),
  892. 0x00000000,
  893. (0x0e00 << 16) | (0x8c38 >> 2),
  894. 0x00000000,
  895. (0x0e00 << 16) | (0x8c3c >> 2),
  896. 0x00000000,
  897. (0x0e00 << 16) | (0xae00 >> 2),
  898. 0x00000000,
  899. (0x0e00 << 16) | (0x9604 >> 2),
  900. 0x00000000,
  901. (0x0e00 << 16) | (0xac08 >> 2),
  902. 0x00000000,
  903. (0x0e00 << 16) | (0xac0c >> 2),
  904. 0x00000000,
  905. (0x0e00 << 16) | (0xac10 >> 2),
  906. 0x00000000,
  907. (0x0e00 << 16) | (0xac14 >> 2),
  908. 0x00000000,
  909. (0x0e00 << 16) | (0xac58 >> 2),
  910. 0x00000000,
  911. (0x0e00 << 16) | (0xac68 >> 2),
  912. 0x00000000,
  913. (0x0e00 << 16) | (0xac6c >> 2),
  914. 0x00000000,
  915. (0x0e00 << 16) | (0xac70 >> 2),
  916. 0x00000000,
  917. (0x0e00 << 16) | (0xac74 >> 2),
  918. 0x00000000,
  919. (0x0e00 << 16) | (0xac78 >> 2),
  920. 0x00000000,
  921. (0x0e00 << 16) | (0xac7c >> 2),
  922. 0x00000000,
  923. (0x0e00 << 16) | (0xac80 >> 2),
  924. 0x00000000,
  925. (0x0e00 << 16) | (0xac84 >> 2),
  926. 0x00000000,
  927. (0x0e00 << 16) | (0xac88 >> 2),
  928. 0x00000000,
  929. (0x0e00 << 16) | (0xac8c >> 2),
  930. 0x00000000,
  931. (0x0e00 << 16) | (0x970c >> 2),
  932. 0x00000000,
  933. (0x0e00 << 16) | (0x9714 >> 2),
  934. 0x00000000,
  935. (0x0e00 << 16) | (0x9718 >> 2),
  936. 0x00000000,
  937. (0x0e00 << 16) | (0x971c >> 2),
  938. 0x00000000,
  939. (0x0e00 << 16) | (0x31068 >> 2),
  940. 0x00000000,
  941. (0x4e00 << 16) | (0x31068 >> 2),
  942. 0x00000000,
  943. (0x5e00 << 16) | (0x31068 >> 2),
  944. 0x00000000,
  945. (0x6e00 << 16) | (0x31068 >> 2),
  946. 0x00000000,
  947. (0x7e00 << 16) | (0x31068 >> 2),
  948. 0x00000000,
  949. (0x0e00 << 16) | (0xcd10 >> 2),
  950. 0x00000000,
  951. (0x0e00 << 16) | (0xcd14 >> 2),
  952. 0x00000000,
  953. (0x0e00 << 16) | (0x88b0 >> 2),
  954. 0x00000000,
  955. (0x0e00 << 16) | (0x88b4 >> 2),
  956. 0x00000000,
  957. (0x0e00 << 16) | (0x88b8 >> 2),
  958. 0x00000000,
  959. (0x0e00 << 16) | (0x88bc >> 2),
  960. 0x00000000,
  961. (0x0400 << 16) | (0x89c0 >> 2),
  962. 0x00000000,
  963. (0x0e00 << 16) | (0x88c4 >> 2),
  964. 0x00000000,
  965. (0x0e00 << 16) | (0x88c8 >> 2),
  966. 0x00000000,
  967. (0x0e00 << 16) | (0x88d0 >> 2),
  968. 0x00000000,
  969. (0x0e00 << 16) | (0x88d4 >> 2),
  970. 0x00000000,
  971. (0x0e00 << 16) | (0x88d8 >> 2),
  972. 0x00000000,
  973. (0x0e00 << 16) | (0x8980 >> 2),
  974. 0x00000000,
  975. (0x0e00 << 16) | (0x30938 >> 2),
  976. 0x00000000,
  977. (0x0e00 << 16) | (0x3093c >> 2),
  978. 0x00000000,
  979. (0x0e00 << 16) | (0x30940 >> 2),
  980. 0x00000000,
  981. (0x0e00 << 16) | (0x89a0 >> 2),
  982. 0x00000000,
  983. (0x0e00 << 16) | (0x30900 >> 2),
  984. 0x00000000,
  985. (0x0e00 << 16) | (0x30904 >> 2),
  986. 0x00000000,
  987. (0x0e00 << 16) | (0x89b4 >> 2),
  988. 0x00000000,
  989. (0x0e00 << 16) | (0x3e1fc >> 2),
  990. 0x00000000,
  991. (0x0e00 << 16) | (0x3c210 >> 2),
  992. 0x00000000,
  993. (0x0e00 << 16) | (0x3c214 >> 2),
  994. 0x00000000,
  995. (0x0e00 << 16) | (0x3c218 >> 2),
  996. 0x00000000,
  997. (0x0e00 << 16) | (0x8904 >> 2),
  998. 0x00000000,
  999. 0x5,
  1000. (0x0e00 << 16) | (0x8c28 >> 2),
  1001. (0x0e00 << 16) | (0x8c2c >> 2),
  1002. (0x0e00 << 16) | (0x8c30 >> 2),
  1003. (0x0e00 << 16) | (0x8c34 >> 2),
  1004. (0x0e00 << 16) | (0x9600 >> 2),
  1005. };
  1006. static const u32 bonaire_golden_spm_registers[] =
  1007. {
  1008. 0x30800, 0xe0ffffff, 0xe0000000
  1009. };
  1010. static const u32 bonaire_golden_common_registers[] =
  1011. {
  1012. 0xc770, 0xffffffff, 0x00000800,
  1013. 0xc774, 0xffffffff, 0x00000800,
  1014. 0xc798, 0xffffffff, 0x00007fbf,
  1015. 0xc79c, 0xffffffff, 0x00007faf
  1016. };
  1017. static const u32 bonaire_golden_registers[] =
  1018. {
  1019. 0x3354, 0x00000333, 0x00000333,
  1020. 0x3350, 0x000c0fc0, 0x00040200,
  1021. 0x9a10, 0x00010000, 0x00058208,
  1022. 0x3c000, 0xffff1fff, 0x00140000,
  1023. 0x3c200, 0xfdfc0fff, 0x00000100,
  1024. 0x3c234, 0x40000000, 0x40000200,
  1025. 0x9830, 0xffffffff, 0x00000000,
  1026. 0x9834, 0xf00fffff, 0x00000400,
  1027. 0x9838, 0x0002021c, 0x00020200,
  1028. 0xc78, 0x00000080, 0x00000000,
  1029. 0x5bb0, 0x000000f0, 0x00000070,
  1030. 0x5bc0, 0xf0311fff, 0x80300000,
  1031. 0x98f8, 0x73773777, 0x12010001,
  1032. 0x350c, 0x00810000, 0x408af000,
  1033. 0x7030, 0x31000111, 0x00000011,
  1034. 0x2f48, 0x73773777, 0x12010001,
  1035. 0x220c, 0x00007fb6, 0x0021a1b1,
  1036. 0x2210, 0x00007fb6, 0x002021b1,
  1037. 0x2180, 0x00007fb6, 0x00002191,
  1038. 0x2218, 0x00007fb6, 0x002121b1,
  1039. 0x221c, 0x00007fb6, 0x002021b1,
  1040. 0x21dc, 0x00007fb6, 0x00002191,
  1041. 0x21e0, 0x00007fb6, 0x00002191,
  1042. 0x3628, 0x0000003f, 0x0000000a,
  1043. 0x362c, 0x0000003f, 0x0000000a,
  1044. 0x2ae4, 0x00073ffe, 0x000022a2,
  1045. 0x240c, 0x000007ff, 0x00000000,
  1046. 0x8a14, 0xf000003f, 0x00000007,
  1047. 0x8bf0, 0x00002001, 0x00000001,
  1048. 0x8b24, 0xffffffff, 0x00ffffff,
  1049. 0x30a04, 0x0000ff0f, 0x00000000,
  1050. 0x28a4c, 0x07ffffff, 0x06000000,
  1051. 0x4d8, 0x00000fff, 0x00000100,
  1052. 0x3e78, 0x00000001, 0x00000002,
  1053. 0x9100, 0x03000000, 0x0362c688,
  1054. 0x8c00, 0x000000ff, 0x00000001,
  1055. 0xe40, 0x00001fff, 0x00001fff,
  1056. 0x9060, 0x0000007f, 0x00000020,
  1057. 0x9508, 0x00010000, 0x00010000,
  1058. 0xac14, 0x000003ff, 0x000000f3,
  1059. 0xac0c, 0xffffffff, 0x00001032
  1060. };
  1061. static const u32 bonaire_mgcg_cgcg_init[] =
  1062. {
  1063. 0xc420, 0xffffffff, 0xfffffffc,
  1064. 0x30800, 0xffffffff, 0xe0000000,
  1065. 0x3c2a0, 0xffffffff, 0x00000100,
  1066. 0x3c208, 0xffffffff, 0x00000100,
  1067. 0x3c2c0, 0xffffffff, 0xc0000100,
  1068. 0x3c2c8, 0xffffffff, 0xc0000100,
  1069. 0x3c2c4, 0xffffffff, 0xc0000100,
  1070. 0x55e4, 0xffffffff, 0x00600100,
  1071. 0x3c280, 0xffffffff, 0x00000100,
  1072. 0x3c214, 0xffffffff, 0x06000100,
  1073. 0x3c220, 0xffffffff, 0x00000100,
  1074. 0x3c218, 0xffffffff, 0x06000100,
  1075. 0x3c204, 0xffffffff, 0x00000100,
  1076. 0x3c2e0, 0xffffffff, 0x00000100,
  1077. 0x3c224, 0xffffffff, 0x00000100,
  1078. 0x3c200, 0xffffffff, 0x00000100,
  1079. 0x3c230, 0xffffffff, 0x00000100,
  1080. 0x3c234, 0xffffffff, 0x00000100,
  1081. 0x3c250, 0xffffffff, 0x00000100,
  1082. 0x3c254, 0xffffffff, 0x00000100,
  1083. 0x3c258, 0xffffffff, 0x00000100,
  1084. 0x3c25c, 0xffffffff, 0x00000100,
  1085. 0x3c260, 0xffffffff, 0x00000100,
  1086. 0x3c27c, 0xffffffff, 0x00000100,
  1087. 0x3c278, 0xffffffff, 0x00000100,
  1088. 0x3c210, 0xffffffff, 0x06000100,
  1089. 0x3c290, 0xffffffff, 0x00000100,
  1090. 0x3c274, 0xffffffff, 0x00000100,
  1091. 0x3c2b4, 0xffffffff, 0x00000100,
  1092. 0x3c2b0, 0xffffffff, 0x00000100,
  1093. 0x3c270, 0xffffffff, 0x00000100,
  1094. 0x30800, 0xffffffff, 0xe0000000,
  1095. 0x3c020, 0xffffffff, 0x00010000,
  1096. 0x3c024, 0xffffffff, 0x00030002,
  1097. 0x3c028, 0xffffffff, 0x00040007,
  1098. 0x3c02c, 0xffffffff, 0x00060005,
  1099. 0x3c030, 0xffffffff, 0x00090008,
  1100. 0x3c034, 0xffffffff, 0x00010000,
  1101. 0x3c038, 0xffffffff, 0x00030002,
  1102. 0x3c03c, 0xffffffff, 0x00040007,
  1103. 0x3c040, 0xffffffff, 0x00060005,
  1104. 0x3c044, 0xffffffff, 0x00090008,
  1105. 0x3c048, 0xffffffff, 0x00010000,
  1106. 0x3c04c, 0xffffffff, 0x00030002,
  1107. 0x3c050, 0xffffffff, 0x00040007,
  1108. 0x3c054, 0xffffffff, 0x00060005,
  1109. 0x3c058, 0xffffffff, 0x00090008,
  1110. 0x3c05c, 0xffffffff, 0x00010000,
  1111. 0x3c060, 0xffffffff, 0x00030002,
  1112. 0x3c064, 0xffffffff, 0x00040007,
  1113. 0x3c068, 0xffffffff, 0x00060005,
  1114. 0x3c06c, 0xffffffff, 0x00090008,
  1115. 0x3c070, 0xffffffff, 0x00010000,
  1116. 0x3c074, 0xffffffff, 0x00030002,
  1117. 0x3c078, 0xffffffff, 0x00040007,
  1118. 0x3c07c, 0xffffffff, 0x00060005,
  1119. 0x3c080, 0xffffffff, 0x00090008,
  1120. 0x3c084, 0xffffffff, 0x00010000,
  1121. 0x3c088, 0xffffffff, 0x00030002,
  1122. 0x3c08c, 0xffffffff, 0x00040007,
  1123. 0x3c090, 0xffffffff, 0x00060005,
  1124. 0x3c094, 0xffffffff, 0x00090008,
  1125. 0x3c098, 0xffffffff, 0x00010000,
  1126. 0x3c09c, 0xffffffff, 0x00030002,
  1127. 0x3c0a0, 0xffffffff, 0x00040007,
  1128. 0x3c0a4, 0xffffffff, 0x00060005,
  1129. 0x3c0a8, 0xffffffff, 0x00090008,
  1130. 0x3c000, 0xffffffff, 0x96e00200,
  1131. 0x8708, 0xffffffff, 0x00900100,
  1132. 0xc424, 0xffffffff, 0x0020003f,
  1133. 0x38, 0xffffffff, 0x0140001c,
  1134. 0x3c, 0x000f0000, 0x000f0000,
  1135. 0x220, 0xffffffff, 0xC060000C,
  1136. 0x224, 0xc0000fff, 0x00000100,
  1137. 0xf90, 0xffffffff, 0x00000100,
  1138. 0xf98, 0x00000101, 0x00000000,
  1139. 0x20a8, 0xffffffff, 0x00000104,
  1140. 0x55e4, 0xff000fff, 0x00000100,
  1141. 0x30cc, 0xc0000fff, 0x00000104,
  1142. 0xc1e4, 0x00000001, 0x00000001,
  1143. 0xd00c, 0xff000ff0, 0x00000100,
  1144. 0xd80c, 0xff000ff0, 0x00000100
  1145. };
  1146. static const u32 spectre_golden_spm_registers[] =
  1147. {
  1148. 0x30800, 0xe0ffffff, 0xe0000000
  1149. };
  1150. static const u32 spectre_golden_common_registers[] =
  1151. {
  1152. 0xc770, 0xffffffff, 0x00000800,
  1153. 0xc774, 0xffffffff, 0x00000800,
  1154. 0xc798, 0xffffffff, 0x00007fbf,
  1155. 0xc79c, 0xffffffff, 0x00007faf
  1156. };
  1157. static const u32 spectre_golden_registers[] =
  1158. {
  1159. 0x3c000, 0xffff1fff, 0x96940200,
  1160. 0x3c00c, 0xffff0001, 0xff000000,
  1161. 0x3c200, 0xfffc0fff, 0x00000100,
  1162. 0x6ed8, 0x00010101, 0x00010000,
  1163. 0x9834, 0xf00fffff, 0x00000400,
  1164. 0x9838, 0xfffffffc, 0x00020200,
  1165. 0x5bb0, 0x000000f0, 0x00000070,
  1166. 0x5bc0, 0xf0311fff, 0x80300000,
  1167. 0x98f8, 0x73773777, 0x12010001,
  1168. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1169. 0x2f48, 0x73773777, 0x12010001,
  1170. 0x8a14, 0xf000003f, 0x00000007,
  1171. 0x8b24, 0xffffffff, 0x00ffffff,
  1172. 0x28350, 0x3f3f3fff, 0x00000082,
  1173. 0x28354, 0x0000003f, 0x00000000,
  1174. 0x3e78, 0x00000001, 0x00000002,
  1175. 0x913c, 0xffff03df, 0x00000004,
  1176. 0xc768, 0x00000008, 0x00000008,
  1177. 0x8c00, 0x000008ff, 0x00000800,
  1178. 0x9508, 0x00010000, 0x00010000,
  1179. 0xac0c, 0xffffffff, 0x54763210,
  1180. 0x214f8, 0x01ff01ff, 0x00000002,
  1181. 0x21498, 0x007ff800, 0x00200000,
  1182. 0x2015c, 0xffffffff, 0x00000f40,
  1183. 0x30934, 0xffffffff, 0x00000001
  1184. };
  1185. static const u32 spectre_mgcg_cgcg_init[] =
  1186. {
  1187. 0xc420, 0xffffffff, 0xfffffffc,
  1188. 0x30800, 0xffffffff, 0xe0000000,
  1189. 0x3c2a0, 0xffffffff, 0x00000100,
  1190. 0x3c208, 0xffffffff, 0x00000100,
  1191. 0x3c2c0, 0xffffffff, 0x00000100,
  1192. 0x3c2c8, 0xffffffff, 0x00000100,
  1193. 0x3c2c4, 0xffffffff, 0x00000100,
  1194. 0x55e4, 0xffffffff, 0x00600100,
  1195. 0x3c280, 0xffffffff, 0x00000100,
  1196. 0x3c214, 0xffffffff, 0x06000100,
  1197. 0x3c220, 0xffffffff, 0x00000100,
  1198. 0x3c218, 0xffffffff, 0x06000100,
  1199. 0x3c204, 0xffffffff, 0x00000100,
  1200. 0x3c2e0, 0xffffffff, 0x00000100,
  1201. 0x3c224, 0xffffffff, 0x00000100,
  1202. 0x3c200, 0xffffffff, 0x00000100,
  1203. 0x3c230, 0xffffffff, 0x00000100,
  1204. 0x3c234, 0xffffffff, 0x00000100,
  1205. 0x3c250, 0xffffffff, 0x00000100,
  1206. 0x3c254, 0xffffffff, 0x00000100,
  1207. 0x3c258, 0xffffffff, 0x00000100,
  1208. 0x3c25c, 0xffffffff, 0x00000100,
  1209. 0x3c260, 0xffffffff, 0x00000100,
  1210. 0x3c27c, 0xffffffff, 0x00000100,
  1211. 0x3c278, 0xffffffff, 0x00000100,
  1212. 0x3c210, 0xffffffff, 0x06000100,
  1213. 0x3c290, 0xffffffff, 0x00000100,
  1214. 0x3c274, 0xffffffff, 0x00000100,
  1215. 0x3c2b4, 0xffffffff, 0x00000100,
  1216. 0x3c2b0, 0xffffffff, 0x00000100,
  1217. 0x3c270, 0xffffffff, 0x00000100,
  1218. 0x30800, 0xffffffff, 0xe0000000,
  1219. 0x3c020, 0xffffffff, 0x00010000,
  1220. 0x3c024, 0xffffffff, 0x00030002,
  1221. 0x3c028, 0xffffffff, 0x00040007,
  1222. 0x3c02c, 0xffffffff, 0x00060005,
  1223. 0x3c030, 0xffffffff, 0x00090008,
  1224. 0x3c034, 0xffffffff, 0x00010000,
  1225. 0x3c038, 0xffffffff, 0x00030002,
  1226. 0x3c03c, 0xffffffff, 0x00040007,
  1227. 0x3c040, 0xffffffff, 0x00060005,
  1228. 0x3c044, 0xffffffff, 0x00090008,
  1229. 0x3c048, 0xffffffff, 0x00010000,
  1230. 0x3c04c, 0xffffffff, 0x00030002,
  1231. 0x3c050, 0xffffffff, 0x00040007,
  1232. 0x3c054, 0xffffffff, 0x00060005,
  1233. 0x3c058, 0xffffffff, 0x00090008,
  1234. 0x3c05c, 0xffffffff, 0x00010000,
  1235. 0x3c060, 0xffffffff, 0x00030002,
  1236. 0x3c064, 0xffffffff, 0x00040007,
  1237. 0x3c068, 0xffffffff, 0x00060005,
  1238. 0x3c06c, 0xffffffff, 0x00090008,
  1239. 0x3c070, 0xffffffff, 0x00010000,
  1240. 0x3c074, 0xffffffff, 0x00030002,
  1241. 0x3c078, 0xffffffff, 0x00040007,
  1242. 0x3c07c, 0xffffffff, 0x00060005,
  1243. 0x3c080, 0xffffffff, 0x00090008,
  1244. 0x3c084, 0xffffffff, 0x00010000,
  1245. 0x3c088, 0xffffffff, 0x00030002,
  1246. 0x3c08c, 0xffffffff, 0x00040007,
  1247. 0x3c090, 0xffffffff, 0x00060005,
  1248. 0x3c094, 0xffffffff, 0x00090008,
  1249. 0x3c098, 0xffffffff, 0x00010000,
  1250. 0x3c09c, 0xffffffff, 0x00030002,
  1251. 0x3c0a0, 0xffffffff, 0x00040007,
  1252. 0x3c0a4, 0xffffffff, 0x00060005,
  1253. 0x3c0a8, 0xffffffff, 0x00090008,
  1254. 0x3c0ac, 0xffffffff, 0x00010000,
  1255. 0x3c0b0, 0xffffffff, 0x00030002,
  1256. 0x3c0b4, 0xffffffff, 0x00040007,
  1257. 0x3c0b8, 0xffffffff, 0x00060005,
  1258. 0x3c0bc, 0xffffffff, 0x00090008,
  1259. 0x3c000, 0xffffffff, 0x96e00200,
  1260. 0x8708, 0xffffffff, 0x00900100,
  1261. 0xc424, 0xffffffff, 0x0020003f,
  1262. 0x38, 0xffffffff, 0x0140001c,
  1263. 0x3c, 0x000f0000, 0x000f0000,
  1264. 0x220, 0xffffffff, 0xC060000C,
  1265. 0x224, 0xc0000fff, 0x00000100,
  1266. 0xf90, 0xffffffff, 0x00000100,
  1267. 0xf98, 0x00000101, 0x00000000,
  1268. 0x20a8, 0xffffffff, 0x00000104,
  1269. 0x55e4, 0xff000fff, 0x00000100,
  1270. 0x30cc, 0xc0000fff, 0x00000104,
  1271. 0xc1e4, 0x00000001, 0x00000001,
  1272. 0xd00c, 0xff000ff0, 0x00000100,
  1273. 0xd80c, 0xff000ff0, 0x00000100
  1274. };
  1275. static const u32 kalindi_golden_spm_registers[] =
  1276. {
  1277. 0x30800, 0xe0ffffff, 0xe0000000
  1278. };
  1279. static const u32 kalindi_golden_common_registers[] =
  1280. {
  1281. 0xc770, 0xffffffff, 0x00000800,
  1282. 0xc774, 0xffffffff, 0x00000800,
  1283. 0xc798, 0xffffffff, 0x00007fbf,
  1284. 0xc79c, 0xffffffff, 0x00007faf
  1285. };
  1286. static const u32 kalindi_golden_registers[] =
  1287. {
  1288. 0x3c000, 0xffffdfff, 0x6e944040,
  1289. 0x55e4, 0xff607fff, 0xfc000100,
  1290. 0x3c220, 0xff000fff, 0x00000100,
  1291. 0x3c224, 0xff000fff, 0x00000100,
  1292. 0x3c200, 0xfffc0fff, 0x00000100,
  1293. 0x6ed8, 0x00010101, 0x00010000,
  1294. 0x9830, 0xffffffff, 0x00000000,
  1295. 0x9834, 0xf00fffff, 0x00000400,
  1296. 0x5bb0, 0x000000f0, 0x00000070,
  1297. 0x5bc0, 0xf0311fff, 0x80300000,
  1298. 0x98f8, 0x73773777, 0x12010001,
  1299. 0x98fc, 0xffffffff, 0x00000010,
  1300. 0x9b7c, 0x00ff0000, 0x00fc0000,
  1301. 0x8030, 0x00001f0f, 0x0000100a,
  1302. 0x2f48, 0x73773777, 0x12010001,
  1303. 0x2408, 0x000fffff, 0x000c007f,
  1304. 0x8a14, 0xf000003f, 0x00000007,
  1305. 0x8b24, 0x3fff3fff, 0x00ffcfff,
  1306. 0x30a04, 0x0000ff0f, 0x00000000,
  1307. 0x28a4c, 0x07ffffff, 0x06000000,
  1308. 0x4d8, 0x00000fff, 0x00000100,
  1309. 0x3e78, 0x00000001, 0x00000002,
  1310. 0xc768, 0x00000008, 0x00000008,
  1311. 0x8c00, 0x000000ff, 0x00000003,
  1312. 0x214f8, 0x01ff01ff, 0x00000002,
  1313. 0x21498, 0x007ff800, 0x00200000,
  1314. 0x2015c, 0xffffffff, 0x00000f40,
  1315. 0x88c4, 0x001f3ae3, 0x00000082,
  1316. 0x88d4, 0x0000001f, 0x00000010,
  1317. 0x30934, 0xffffffff, 0x00000000
  1318. };
  1319. static const u32 kalindi_mgcg_cgcg_init[] =
  1320. {
  1321. 0xc420, 0xffffffff, 0xfffffffc,
  1322. 0x30800, 0xffffffff, 0xe0000000,
  1323. 0x3c2a0, 0xffffffff, 0x00000100,
  1324. 0x3c208, 0xffffffff, 0x00000100,
  1325. 0x3c2c0, 0xffffffff, 0x00000100,
  1326. 0x3c2c8, 0xffffffff, 0x00000100,
  1327. 0x3c2c4, 0xffffffff, 0x00000100,
  1328. 0x55e4, 0xffffffff, 0x00600100,
  1329. 0x3c280, 0xffffffff, 0x00000100,
  1330. 0x3c214, 0xffffffff, 0x06000100,
  1331. 0x3c220, 0xffffffff, 0x00000100,
  1332. 0x3c218, 0xffffffff, 0x06000100,
  1333. 0x3c204, 0xffffffff, 0x00000100,
  1334. 0x3c2e0, 0xffffffff, 0x00000100,
  1335. 0x3c224, 0xffffffff, 0x00000100,
  1336. 0x3c200, 0xffffffff, 0x00000100,
  1337. 0x3c230, 0xffffffff, 0x00000100,
  1338. 0x3c234, 0xffffffff, 0x00000100,
  1339. 0x3c250, 0xffffffff, 0x00000100,
  1340. 0x3c254, 0xffffffff, 0x00000100,
  1341. 0x3c258, 0xffffffff, 0x00000100,
  1342. 0x3c25c, 0xffffffff, 0x00000100,
  1343. 0x3c260, 0xffffffff, 0x00000100,
  1344. 0x3c27c, 0xffffffff, 0x00000100,
  1345. 0x3c278, 0xffffffff, 0x00000100,
  1346. 0x3c210, 0xffffffff, 0x06000100,
  1347. 0x3c290, 0xffffffff, 0x00000100,
  1348. 0x3c274, 0xffffffff, 0x00000100,
  1349. 0x3c2b4, 0xffffffff, 0x00000100,
  1350. 0x3c2b0, 0xffffffff, 0x00000100,
  1351. 0x3c270, 0xffffffff, 0x00000100,
  1352. 0x30800, 0xffffffff, 0xe0000000,
  1353. 0x3c020, 0xffffffff, 0x00010000,
  1354. 0x3c024, 0xffffffff, 0x00030002,
  1355. 0x3c028, 0xffffffff, 0x00040007,
  1356. 0x3c02c, 0xffffffff, 0x00060005,
  1357. 0x3c030, 0xffffffff, 0x00090008,
  1358. 0x3c034, 0xffffffff, 0x00010000,
  1359. 0x3c038, 0xffffffff, 0x00030002,
  1360. 0x3c03c, 0xffffffff, 0x00040007,
  1361. 0x3c040, 0xffffffff, 0x00060005,
  1362. 0x3c044, 0xffffffff, 0x00090008,
  1363. 0x3c000, 0xffffffff, 0x96e00200,
  1364. 0x8708, 0xffffffff, 0x00900100,
  1365. 0xc424, 0xffffffff, 0x0020003f,
  1366. 0x38, 0xffffffff, 0x0140001c,
  1367. 0x3c, 0x000f0000, 0x000f0000,
  1368. 0x220, 0xffffffff, 0xC060000C,
  1369. 0x224, 0xc0000fff, 0x00000100,
  1370. 0x20a8, 0xffffffff, 0x00000104,
  1371. 0x55e4, 0xff000fff, 0x00000100,
  1372. 0x30cc, 0xc0000fff, 0x00000104,
  1373. 0xc1e4, 0x00000001, 0x00000001,
  1374. 0xd00c, 0xff000ff0, 0x00000100,
  1375. 0xd80c, 0xff000ff0, 0x00000100
  1376. };
  1377. static const u32 hawaii_golden_spm_registers[] =
  1378. {
  1379. 0x30800, 0xe0ffffff, 0xe0000000
  1380. };
  1381. static const u32 hawaii_golden_common_registers[] =
  1382. {
  1383. 0x30800, 0xffffffff, 0xe0000000,
  1384. 0x28350, 0xffffffff, 0x3a00161a,
  1385. 0x28354, 0xffffffff, 0x0000002e,
  1386. 0x9a10, 0xffffffff, 0x00018208,
  1387. 0x98f8, 0xffffffff, 0x12011003
  1388. };
  1389. static const u32 hawaii_golden_registers[] =
  1390. {
  1391. 0x3354, 0x00000333, 0x00000333,
  1392. 0x9a10, 0x00010000, 0x00058208,
  1393. 0x9830, 0xffffffff, 0x00000000,
  1394. 0x9834, 0xf00fffff, 0x00000400,
  1395. 0x9838, 0x0002021c, 0x00020200,
  1396. 0xc78, 0x00000080, 0x00000000,
  1397. 0x5bb0, 0x000000f0, 0x00000070,
  1398. 0x5bc0, 0xf0311fff, 0x80300000,
  1399. 0x350c, 0x00810000, 0x408af000,
  1400. 0x7030, 0x31000111, 0x00000011,
  1401. 0x2f48, 0x73773777, 0x12010001,
  1402. 0x2120, 0x0000007f, 0x0000001b,
  1403. 0x21dc, 0x00007fb6, 0x00002191,
  1404. 0x3628, 0x0000003f, 0x0000000a,
  1405. 0x362c, 0x0000003f, 0x0000000a,
  1406. 0x2ae4, 0x00073ffe, 0x000022a2,
  1407. 0x240c, 0x000007ff, 0x00000000,
  1408. 0x8bf0, 0x00002001, 0x00000001,
  1409. 0x8b24, 0xffffffff, 0x00ffffff,
  1410. 0x30a04, 0x0000ff0f, 0x00000000,
  1411. 0x28a4c, 0x07ffffff, 0x06000000,
  1412. 0x3e78, 0x00000001, 0x00000002,
  1413. 0xc768, 0x00000008, 0x00000008,
  1414. 0xc770, 0x00000f00, 0x00000800,
  1415. 0xc774, 0x00000f00, 0x00000800,
  1416. 0xc798, 0x00ffffff, 0x00ff7fbf,
  1417. 0xc79c, 0x00ffffff, 0x00ff7faf,
  1418. 0x8c00, 0x000000ff, 0x00000800,
  1419. 0xe40, 0x00001fff, 0x00001fff,
  1420. 0x9060, 0x0000007f, 0x00000020,
  1421. 0x9508, 0x00010000, 0x00010000,
  1422. 0xae00, 0x00100000, 0x000ff07c,
  1423. 0xac14, 0x000003ff, 0x0000000f,
  1424. 0xac10, 0xffffffff, 0x7564fdec,
  1425. 0xac0c, 0xffffffff, 0x3120b9a8,
  1426. 0xac08, 0x20000000, 0x0f9c0000
  1427. };
  1428. static const u32 hawaii_mgcg_cgcg_init[] =
  1429. {
  1430. 0xc420, 0xffffffff, 0xfffffffd,
  1431. 0x30800, 0xffffffff, 0xe0000000,
  1432. 0x3c2a0, 0xffffffff, 0x00000100,
  1433. 0x3c208, 0xffffffff, 0x00000100,
  1434. 0x3c2c0, 0xffffffff, 0x00000100,
  1435. 0x3c2c8, 0xffffffff, 0x00000100,
  1436. 0x3c2c4, 0xffffffff, 0x00000100,
  1437. 0x55e4, 0xffffffff, 0x00200100,
  1438. 0x3c280, 0xffffffff, 0x00000100,
  1439. 0x3c214, 0xffffffff, 0x06000100,
  1440. 0x3c220, 0xffffffff, 0x00000100,
  1441. 0x3c218, 0xffffffff, 0x06000100,
  1442. 0x3c204, 0xffffffff, 0x00000100,
  1443. 0x3c2e0, 0xffffffff, 0x00000100,
  1444. 0x3c224, 0xffffffff, 0x00000100,
  1445. 0x3c200, 0xffffffff, 0x00000100,
  1446. 0x3c230, 0xffffffff, 0x00000100,
  1447. 0x3c234, 0xffffffff, 0x00000100,
  1448. 0x3c250, 0xffffffff, 0x00000100,
  1449. 0x3c254, 0xffffffff, 0x00000100,
  1450. 0x3c258, 0xffffffff, 0x00000100,
  1451. 0x3c25c, 0xffffffff, 0x00000100,
  1452. 0x3c260, 0xffffffff, 0x00000100,
  1453. 0x3c27c, 0xffffffff, 0x00000100,
  1454. 0x3c278, 0xffffffff, 0x00000100,
  1455. 0x3c210, 0xffffffff, 0x06000100,
  1456. 0x3c290, 0xffffffff, 0x00000100,
  1457. 0x3c274, 0xffffffff, 0x00000100,
  1458. 0x3c2b4, 0xffffffff, 0x00000100,
  1459. 0x3c2b0, 0xffffffff, 0x00000100,
  1460. 0x3c270, 0xffffffff, 0x00000100,
  1461. 0x30800, 0xffffffff, 0xe0000000,
  1462. 0x3c020, 0xffffffff, 0x00010000,
  1463. 0x3c024, 0xffffffff, 0x00030002,
  1464. 0x3c028, 0xffffffff, 0x00040007,
  1465. 0x3c02c, 0xffffffff, 0x00060005,
  1466. 0x3c030, 0xffffffff, 0x00090008,
  1467. 0x3c034, 0xffffffff, 0x00010000,
  1468. 0x3c038, 0xffffffff, 0x00030002,
  1469. 0x3c03c, 0xffffffff, 0x00040007,
  1470. 0x3c040, 0xffffffff, 0x00060005,
  1471. 0x3c044, 0xffffffff, 0x00090008,
  1472. 0x3c048, 0xffffffff, 0x00010000,
  1473. 0x3c04c, 0xffffffff, 0x00030002,
  1474. 0x3c050, 0xffffffff, 0x00040007,
  1475. 0x3c054, 0xffffffff, 0x00060005,
  1476. 0x3c058, 0xffffffff, 0x00090008,
  1477. 0x3c05c, 0xffffffff, 0x00010000,
  1478. 0x3c060, 0xffffffff, 0x00030002,
  1479. 0x3c064, 0xffffffff, 0x00040007,
  1480. 0x3c068, 0xffffffff, 0x00060005,
  1481. 0x3c06c, 0xffffffff, 0x00090008,
  1482. 0x3c070, 0xffffffff, 0x00010000,
  1483. 0x3c074, 0xffffffff, 0x00030002,
  1484. 0x3c078, 0xffffffff, 0x00040007,
  1485. 0x3c07c, 0xffffffff, 0x00060005,
  1486. 0x3c080, 0xffffffff, 0x00090008,
  1487. 0x3c084, 0xffffffff, 0x00010000,
  1488. 0x3c088, 0xffffffff, 0x00030002,
  1489. 0x3c08c, 0xffffffff, 0x00040007,
  1490. 0x3c090, 0xffffffff, 0x00060005,
  1491. 0x3c094, 0xffffffff, 0x00090008,
  1492. 0x3c098, 0xffffffff, 0x00010000,
  1493. 0x3c09c, 0xffffffff, 0x00030002,
  1494. 0x3c0a0, 0xffffffff, 0x00040007,
  1495. 0x3c0a4, 0xffffffff, 0x00060005,
  1496. 0x3c0a8, 0xffffffff, 0x00090008,
  1497. 0x3c0ac, 0xffffffff, 0x00010000,
  1498. 0x3c0b0, 0xffffffff, 0x00030002,
  1499. 0x3c0b4, 0xffffffff, 0x00040007,
  1500. 0x3c0b8, 0xffffffff, 0x00060005,
  1501. 0x3c0bc, 0xffffffff, 0x00090008,
  1502. 0x3c0c0, 0xffffffff, 0x00010000,
  1503. 0x3c0c4, 0xffffffff, 0x00030002,
  1504. 0x3c0c8, 0xffffffff, 0x00040007,
  1505. 0x3c0cc, 0xffffffff, 0x00060005,
  1506. 0x3c0d0, 0xffffffff, 0x00090008,
  1507. 0x3c0d4, 0xffffffff, 0x00010000,
  1508. 0x3c0d8, 0xffffffff, 0x00030002,
  1509. 0x3c0dc, 0xffffffff, 0x00040007,
  1510. 0x3c0e0, 0xffffffff, 0x00060005,
  1511. 0x3c0e4, 0xffffffff, 0x00090008,
  1512. 0x3c0e8, 0xffffffff, 0x00010000,
  1513. 0x3c0ec, 0xffffffff, 0x00030002,
  1514. 0x3c0f0, 0xffffffff, 0x00040007,
  1515. 0x3c0f4, 0xffffffff, 0x00060005,
  1516. 0x3c0f8, 0xffffffff, 0x00090008,
  1517. 0xc318, 0xffffffff, 0x00020200,
  1518. 0x3350, 0xffffffff, 0x00000200,
  1519. 0x15c0, 0xffffffff, 0x00000400,
  1520. 0x55e8, 0xffffffff, 0x00000000,
  1521. 0x2f50, 0xffffffff, 0x00000902,
  1522. 0x3c000, 0xffffffff, 0x96940200,
  1523. 0x8708, 0xffffffff, 0x00900100,
  1524. 0xc424, 0xffffffff, 0x0020003f,
  1525. 0x38, 0xffffffff, 0x0140001c,
  1526. 0x3c, 0x000f0000, 0x000f0000,
  1527. 0x220, 0xffffffff, 0xc060000c,
  1528. 0x224, 0xc0000fff, 0x00000100,
  1529. 0xf90, 0xffffffff, 0x00000100,
  1530. 0xf98, 0x00000101, 0x00000000,
  1531. 0x20a8, 0xffffffff, 0x00000104,
  1532. 0x55e4, 0xff000fff, 0x00000100,
  1533. 0x30cc, 0xc0000fff, 0x00000104,
  1534. 0xc1e4, 0x00000001, 0x00000001,
  1535. 0xd00c, 0xff000ff0, 0x00000100,
  1536. 0xd80c, 0xff000ff0, 0x00000100
  1537. };
  1538. static const u32 godavari_golden_registers[] =
  1539. {
  1540. 0x55e4, 0xff607fff, 0xfc000100,
  1541. 0x6ed8, 0x00010101, 0x00010000,
  1542. 0x9830, 0xffffffff, 0x00000000,
  1543. 0x98302, 0xf00fffff, 0x00000400,
  1544. 0x6130, 0xffffffff, 0x00010000,
  1545. 0x5bb0, 0x000000f0, 0x00000070,
  1546. 0x5bc0, 0xf0311fff, 0x80300000,
  1547. 0x98f8, 0x73773777, 0x12010001,
  1548. 0x98fc, 0xffffffff, 0x00000010,
  1549. 0x8030, 0x00001f0f, 0x0000100a,
  1550. 0x2f48, 0x73773777, 0x12010001,
  1551. 0x2408, 0x000fffff, 0x000c007f,
  1552. 0x8a14, 0xf000003f, 0x00000007,
  1553. 0x8b24, 0xffffffff, 0x00ff0fff,
  1554. 0x30a04, 0x0000ff0f, 0x00000000,
  1555. 0x28a4c, 0x07ffffff, 0x06000000,
  1556. 0x4d8, 0x00000fff, 0x00000100,
  1557. 0xd014, 0x00010000, 0x00810001,
  1558. 0xd814, 0x00010000, 0x00810001,
  1559. 0x3e78, 0x00000001, 0x00000002,
  1560. 0xc768, 0x00000008, 0x00000008,
  1561. 0xc770, 0x00000f00, 0x00000800,
  1562. 0xc774, 0x00000f00, 0x00000800,
  1563. 0xc798, 0x00ffffff, 0x00ff7fbf,
  1564. 0xc79c, 0x00ffffff, 0x00ff7faf,
  1565. 0x8c00, 0x000000ff, 0x00000001,
  1566. 0x214f8, 0x01ff01ff, 0x00000002,
  1567. 0x21498, 0x007ff800, 0x00200000,
  1568. 0x2015c, 0xffffffff, 0x00000f40,
  1569. 0x88c4, 0x001f3ae3, 0x00000082,
  1570. 0x88d4, 0x0000001f, 0x00000010,
  1571. 0x30934, 0xffffffff, 0x00000000
  1572. };
  1573. static void cik_init_golden_registers(struct radeon_device *rdev)
  1574. {
  1575. /* Some of the registers might be dependent on GRBM_GFX_INDEX */
  1576. mutex_lock(&rdev->grbm_idx_mutex);
  1577. switch (rdev->family) {
  1578. case CHIP_BONAIRE:
  1579. radeon_program_register_sequence(rdev,
  1580. bonaire_mgcg_cgcg_init,
  1581. (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
  1582. radeon_program_register_sequence(rdev,
  1583. bonaire_golden_registers,
  1584. (const u32)ARRAY_SIZE(bonaire_golden_registers));
  1585. radeon_program_register_sequence(rdev,
  1586. bonaire_golden_common_registers,
  1587. (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
  1588. radeon_program_register_sequence(rdev,
  1589. bonaire_golden_spm_registers,
  1590. (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
  1591. break;
  1592. case CHIP_KABINI:
  1593. radeon_program_register_sequence(rdev,
  1594. kalindi_mgcg_cgcg_init,
  1595. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  1596. radeon_program_register_sequence(rdev,
  1597. kalindi_golden_registers,
  1598. (const u32)ARRAY_SIZE(kalindi_golden_registers));
  1599. radeon_program_register_sequence(rdev,
  1600. kalindi_golden_common_registers,
  1601. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  1602. radeon_program_register_sequence(rdev,
  1603. kalindi_golden_spm_registers,
  1604. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  1605. break;
  1606. case CHIP_MULLINS:
  1607. radeon_program_register_sequence(rdev,
  1608. kalindi_mgcg_cgcg_init,
  1609. (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
  1610. radeon_program_register_sequence(rdev,
  1611. godavari_golden_registers,
  1612. (const u32)ARRAY_SIZE(godavari_golden_registers));
  1613. radeon_program_register_sequence(rdev,
  1614. kalindi_golden_common_registers,
  1615. (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
  1616. radeon_program_register_sequence(rdev,
  1617. kalindi_golden_spm_registers,
  1618. (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
  1619. break;
  1620. case CHIP_KAVERI:
  1621. radeon_program_register_sequence(rdev,
  1622. spectre_mgcg_cgcg_init,
  1623. (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
  1624. radeon_program_register_sequence(rdev,
  1625. spectre_golden_registers,
  1626. (const u32)ARRAY_SIZE(spectre_golden_registers));
  1627. radeon_program_register_sequence(rdev,
  1628. spectre_golden_common_registers,
  1629. (const u32)ARRAY_SIZE(spectre_golden_common_registers));
  1630. radeon_program_register_sequence(rdev,
  1631. spectre_golden_spm_registers,
  1632. (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
  1633. break;
  1634. case CHIP_HAWAII:
  1635. radeon_program_register_sequence(rdev,
  1636. hawaii_mgcg_cgcg_init,
  1637. (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
  1638. radeon_program_register_sequence(rdev,
  1639. hawaii_golden_registers,
  1640. (const u32)ARRAY_SIZE(hawaii_golden_registers));
  1641. radeon_program_register_sequence(rdev,
  1642. hawaii_golden_common_registers,
  1643. (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
  1644. radeon_program_register_sequence(rdev,
  1645. hawaii_golden_spm_registers,
  1646. (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
  1647. break;
  1648. default:
  1649. break;
  1650. }
  1651. mutex_unlock(&rdev->grbm_idx_mutex);
  1652. }
  1653. /**
  1654. * cik_get_xclk - get the xclk
  1655. *
  1656. * @rdev: radeon_device pointer
  1657. *
  1658. * Returns the reference clock used by the gfx engine
  1659. * (CIK).
  1660. */
  1661. u32 cik_get_xclk(struct radeon_device *rdev)
  1662. {
  1663. u32 reference_clock = rdev->clock.spll.reference_freq;
  1664. if (rdev->flags & RADEON_IS_IGP) {
  1665. if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
  1666. return reference_clock / 2;
  1667. } else {
  1668. if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
  1669. return reference_clock / 4;
  1670. }
  1671. return reference_clock;
  1672. }
  1673. /**
  1674. * cik_mm_rdoorbell - read a doorbell dword
  1675. *
  1676. * @rdev: radeon_device pointer
  1677. * @index: doorbell index
  1678. *
  1679. * Returns the value in the doorbell aperture at the
  1680. * requested doorbell index (CIK).
  1681. */
  1682. u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
  1683. {
  1684. if (index < rdev->doorbell.num_doorbells) {
  1685. return readl(rdev->doorbell.ptr + index);
  1686. } else {
  1687. DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
  1688. return 0;
  1689. }
  1690. }
  1691. /**
  1692. * cik_mm_wdoorbell - write a doorbell dword
  1693. *
  1694. * @rdev: radeon_device pointer
  1695. * @index: doorbell index
  1696. * @v: value to write
  1697. *
  1698. * Writes @v to the doorbell aperture at the
  1699. * requested doorbell index (CIK).
  1700. */
  1701. void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
  1702. {
  1703. if (index < rdev->doorbell.num_doorbells) {
  1704. writel(v, rdev->doorbell.ptr + index);
  1705. } else {
  1706. DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
  1707. }
  1708. }
  1709. #define BONAIRE_IO_MC_REGS_SIZE 36
  1710. static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
  1711. {
  1712. {0x00000070, 0x04400000},
  1713. {0x00000071, 0x80c01803},
  1714. {0x00000072, 0x00004004},
  1715. {0x00000073, 0x00000100},
  1716. {0x00000074, 0x00ff0000},
  1717. {0x00000075, 0x34000000},
  1718. {0x00000076, 0x08000014},
  1719. {0x00000077, 0x00cc08ec},
  1720. {0x00000078, 0x00000400},
  1721. {0x00000079, 0x00000000},
  1722. {0x0000007a, 0x04090000},
  1723. {0x0000007c, 0x00000000},
  1724. {0x0000007e, 0x4408a8e8},
  1725. {0x0000007f, 0x00000304},
  1726. {0x00000080, 0x00000000},
  1727. {0x00000082, 0x00000001},
  1728. {0x00000083, 0x00000002},
  1729. {0x00000084, 0xf3e4f400},
  1730. {0x00000085, 0x052024e3},
  1731. {0x00000087, 0x00000000},
  1732. {0x00000088, 0x01000000},
  1733. {0x0000008a, 0x1c0a0000},
  1734. {0x0000008b, 0xff010000},
  1735. {0x0000008d, 0xffffefff},
  1736. {0x0000008e, 0xfff3efff},
  1737. {0x0000008f, 0xfff3efbf},
  1738. {0x00000092, 0xf7ffffff},
  1739. {0x00000093, 0xffffff7f},
  1740. {0x00000095, 0x00101101},
  1741. {0x00000096, 0x00000fff},
  1742. {0x00000097, 0x00116fff},
  1743. {0x00000098, 0x60010000},
  1744. {0x00000099, 0x10010000},
  1745. {0x0000009a, 0x00006000},
  1746. {0x0000009b, 0x00001000},
  1747. {0x0000009f, 0x00b48000}
  1748. };
  1749. #define HAWAII_IO_MC_REGS_SIZE 22
  1750. static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
  1751. {
  1752. {0x0000007d, 0x40000000},
  1753. {0x0000007e, 0x40180304},
  1754. {0x0000007f, 0x0000ff00},
  1755. {0x00000081, 0x00000000},
  1756. {0x00000083, 0x00000800},
  1757. {0x00000086, 0x00000000},
  1758. {0x00000087, 0x00000100},
  1759. {0x00000088, 0x00020100},
  1760. {0x00000089, 0x00000000},
  1761. {0x0000008b, 0x00040000},
  1762. {0x0000008c, 0x00000100},
  1763. {0x0000008e, 0xff010000},
  1764. {0x00000090, 0xffffefff},
  1765. {0x00000091, 0xfff3efff},
  1766. {0x00000092, 0xfff3efbf},
  1767. {0x00000093, 0xf7ffffff},
  1768. {0x00000094, 0xffffff7f},
  1769. {0x00000095, 0x00000fff},
  1770. {0x00000096, 0x00116fff},
  1771. {0x00000097, 0x60010000},
  1772. {0x00000098, 0x10010000},
  1773. {0x0000009f, 0x00c79000}
  1774. };
  1775. /**
  1776. * cik_srbm_select - select specific register instances
  1777. *
  1778. * @rdev: radeon_device pointer
  1779. * @me: selected ME (micro engine)
  1780. * @pipe: pipe
  1781. * @queue: queue
  1782. * @vmid: VMID
  1783. *
  1784. * Switches the currently active registers instances. Some
  1785. * registers are instanced per VMID, others are instanced per
  1786. * me/pipe/queue combination.
  1787. */
  1788. static void cik_srbm_select(struct radeon_device *rdev,
  1789. u32 me, u32 pipe, u32 queue, u32 vmid)
  1790. {
  1791. u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
  1792. MEID(me & 0x3) |
  1793. VMID(vmid & 0xf) |
  1794. QUEUEID(queue & 0x7));
  1795. WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
  1796. }
  1797. /* ucode loading */
  1798. /**
  1799. * ci_mc_load_microcode - load MC ucode into the hw
  1800. *
  1801. * @rdev: radeon_device pointer
  1802. *
  1803. * Load the GDDR MC ucode into the hw (CIK).
  1804. * Returns 0 on success, error on failure.
  1805. */
  1806. int ci_mc_load_microcode(struct radeon_device *rdev)
  1807. {
  1808. const __be32 *fw_data = NULL;
  1809. const __le32 *new_fw_data = NULL;
  1810. u32 running, blackout = 0, tmp;
  1811. u32 *io_mc_regs = NULL;
  1812. const __le32 *new_io_mc_regs = NULL;
  1813. int i, regs_size, ucode_size;
  1814. if (!rdev->mc_fw)
  1815. return -EINVAL;
  1816. if (rdev->new_fw) {
  1817. const struct mc_firmware_header_v1_0 *hdr =
  1818. (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
  1819. radeon_ucode_print_mc_hdr(&hdr->header);
  1820. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  1821. new_io_mc_regs = (const __le32 *)
  1822. (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  1823. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  1824. new_fw_data = (const __le32 *)
  1825. (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  1826. } else {
  1827. ucode_size = rdev->mc_fw->size / 4;
  1828. switch (rdev->family) {
  1829. case CHIP_BONAIRE:
  1830. io_mc_regs = (u32 *)&bonaire_io_mc_regs;
  1831. regs_size = BONAIRE_IO_MC_REGS_SIZE;
  1832. break;
  1833. case CHIP_HAWAII:
  1834. io_mc_regs = (u32 *)&hawaii_io_mc_regs;
  1835. regs_size = HAWAII_IO_MC_REGS_SIZE;
  1836. break;
  1837. default:
  1838. return -EINVAL;
  1839. }
  1840. fw_data = (const __be32 *)rdev->mc_fw->data;
  1841. }
  1842. running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
  1843. if (running == 0) {
  1844. if (running) {
  1845. blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
  1846. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
  1847. }
  1848. /* reset the engine and set to writable */
  1849. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1850. WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
  1851. /* load mc io regs */
  1852. for (i = 0; i < regs_size; i++) {
  1853. if (rdev->new_fw) {
  1854. WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
  1855. WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
  1856. } else {
  1857. WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
  1858. WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
  1859. }
  1860. }
  1861. tmp = RREG32(MC_SEQ_MISC0);
  1862. if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
  1863. WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
  1864. WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
  1865. WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
  1866. WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
  1867. }
  1868. /* load the MC ucode */
  1869. for (i = 0; i < ucode_size; i++) {
  1870. if (rdev->new_fw)
  1871. WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
  1872. else
  1873. WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
  1874. }
  1875. /* put the engine back into the active state */
  1876. WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
  1877. WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
  1878. WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
  1879. /* wait for training to complete */
  1880. for (i = 0; i < rdev->usec_timeout; i++) {
  1881. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
  1882. break;
  1883. udelay(1);
  1884. }
  1885. for (i = 0; i < rdev->usec_timeout; i++) {
  1886. if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
  1887. break;
  1888. udelay(1);
  1889. }
  1890. if (running)
  1891. WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
  1892. }
  1893. return 0;
  1894. }
  1895. /**
  1896. * cik_init_microcode - load ucode images from disk
  1897. *
  1898. * @rdev: radeon_device pointer
  1899. *
  1900. * Use the firmware interface to load the ucode images into
  1901. * the driver (not loaded into hw).
  1902. * Returns 0 on success, error on failure.
  1903. */
  1904. static int cik_init_microcode(struct radeon_device *rdev)
  1905. {
  1906. const char *chip_name;
  1907. const char *new_chip_name;
  1908. size_t pfp_req_size, me_req_size, ce_req_size,
  1909. mec_req_size, rlc_req_size, mc_req_size = 0,
  1910. sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
  1911. char fw_name[30];
  1912. int new_fw = 0;
  1913. int err;
  1914. int num_fw;
  1915. DRM_DEBUG("\n");
  1916. switch (rdev->family) {
  1917. case CHIP_BONAIRE:
  1918. chip_name = "BONAIRE";
  1919. new_chip_name = "bonaire";
  1920. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1921. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1922. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1923. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1924. rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
  1925. mc_req_size = BONAIRE_MC_UCODE_SIZE * 4;
  1926. mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
  1927. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1928. smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
  1929. num_fw = 8;
  1930. break;
  1931. case CHIP_HAWAII:
  1932. chip_name = "HAWAII";
  1933. new_chip_name = "hawaii";
  1934. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1935. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1936. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1937. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1938. rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
  1939. mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
  1940. mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
  1941. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1942. smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
  1943. num_fw = 8;
  1944. break;
  1945. case CHIP_KAVERI:
  1946. chip_name = "KAVERI";
  1947. new_chip_name = "kaveri";
  1948. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1949. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1950. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1951. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1952. rlc_req_size = KV_RLC_UCODE_SIZE * 4;
  1953. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1954. num_fw = 7;
  1955. break;
  1956. case CHIP_KABINI:
  1957. chip_name = "KABINI";
  1958. new_chip_name = "kabini";
  1959. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1960. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1961. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1962. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1963. rlc_req_size = KB_RLC_UCODE_SIZE * 4;
  1964. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1965. num_fw = 6;
  1966. break;
  1967. case CHIP_MULLINS:
  1968. chip_name = "MULLINS";
  1969. new_chip_name = "mullins";
  1970. pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
  1971. me_req_size = CIK_ME_UCODE_SIZE * 4;
  1972. ce_req_size = CIK_CE_UCODE_SIZE * 4;
  1973. mec_req_size = CIK_MEC_UCODE_SIZE * 4;
  1974. rlc_req_size = ML_RLC_UCODE_SIZE * 4;
  1975. sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
  1976. num_fw = 6;
  1977. break;
  1978. default: BUG();
  1979. }
  1980. DRM_INFO("Loading %s Microcode\n", new_chip_name);
  1981. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
  1982. err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
  1983. if (err) {
  1984. snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
  1985. err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
  1986. if (err)
  1987. goto out;
  1988. if (rdev->pfp_fw->size != pfp_req_size) {
  1989. printk(KERN_ERR
  1990. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  1991. rdev->pfp_fw->size, fw_name);
  1992. err = -EINVAL;
  1993. goto out;
  1994. }
  1995. } else {
  1996. err = radeon_ucode_validate(rdev->pfp_fw);
  1997. if (err) {
  1998. printk(KERN_ERR
  1999. "cik_fw: validation failed for firmware \"%s\"\n",
  2000. fw_name);
  2001. goto out;
  2002. } else {
  2003. new_fw++;
  2004. }
  2005. }
  2006. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
  2007. err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
  2008. if (err) {
  2009. snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
  2010. err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
  2011. if (err)
  2012. goto out;
  2013. if (rdev->me_fw->size != me_req_size) {
  2014. printk(KERN_ERR
  2015. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  2016. rdev->me_fw->size, fw_name);
  2017. err = -EINVAL;
  2018. }
  2019. } else {
  2020. err = radeon_ucode_validate(rdev->me_fw);
  2021. if (err) {
  2022. printk(KERN_ERR
  2023. "cik_fw: validation failed for firmware \"%s\"\n",
  2024. fw_name);
  2025. goto out;
  2026. } else {
  2027. new_fw++;
  2028. }
  2029. }
  2030. snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
  2031. err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
  2032. if (err) {
  2033. snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
  2034. err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
  2035. if (err)
  2036. goto out;
  2037. if (rdev->ce_fw->size != ce_req_size) {
  2038. printk(KERN_ERR
  2039. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  2040. rdev->ce_fw->size, fw_name);
  2041. err = -EINVAL;
  2042. }
  2043. } else {
  2044. err = radeon_ucode_validate(rdev->ce_fw);
  2045. if (err) {
  2046. printk(KERN_ERR
  2047. "cik_fw: validation failed for firmware \"%s\"\n",
  2048. fw_name);
  2049. goto out;
  2050. } else {
  2051. new_fw++;
  2052. }
  2053. }
  2054. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
  2055. err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
  2056. if (err) {
  2057. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
  2058. err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
  2059. if (err)
  2060. goto out;
  2061. if (rdev->mec_fw->size != mec_req_size) {
  2062. printk(KERN_ERR
  2063. "cik_cp: Bogus length %zu in firmware \"%s\"\n",
  2064. rdev->mec_fw->size, fw_name);
  2065. err = -EINVAL;
  2066. }
  2067. } else {
  2068. err = radeon_ucode_validate(rdev->mec_fw);
  2069. if (err) {
  2070. printk(KERN_ERR
  2071. "cik_fw: validation failed for firmware \"%s\"\n",
  2072. fw_name);
  2073. goto out;
  2074. } else {
  2075. new_fw++;
  2076. }
  2077. }
  2078. if (rdev->family == CHIP_KAVERI) {
  2079. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
  2080. err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
  2081. if (err) {
  2082. goto out;
  2083. } else {
  2084. err = radeon_ucode_validate(rdev->mec2_fw);
  2085. if (err) {
  2086. goto out;
  2087. } else {
  2088. new_fw++;
  2089. }
  2090. }
  2091. }
  2092. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
  2093. err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
  2094. if (err) {
  2095. snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
  2096. err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
  2097. if (err)
  2098. goto out;
  2099. if (rdev->rlc_fw->size != rlc_req_size) {
  2100. printk(KERN_ERR
  2101. "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
  2102. rdev->rlc_fw->size, fw_name);
  2103. err = -EINVAL;
  2104. }
  2105. } else {
  2106. err = radeon_ucode_validate(rdev->rlc_fw);
  2107. if (err) {
  2108. printk(KERN_ERR
  2109. "cik_fw: validation failed for firmware \"%s\"\n",
  2110. fw_name);
  2111. goto out;
  2112. } else {
  2113. new_fw++;
  2114. }
  2115. }
  2116. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
  2117. err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
  2118. if (err) {
  2119. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
  2120. err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
  2121. if (err)
  2122. goto out;
  2123. if (rdev->sdma_fw->size != sdma_req_size) {
  2124. printk(KERN_ERR
  2125. "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
  2126. rdev->sdma_fw->size, fw_name);
  2127. err = -EINVAL;
  2128. }
  2129. } else {
  2130. err = radeon_ucode_validate(rdev->sdma_fw);
  2131. if (err) {
  2132. printk(KERN_ERR
  2133. "cik_fw: validation failed for firmware \"%s\"\n",
  2134. fw_name);
  2135. goto out;
  2136. } else {
  2137. new_fw++;
  2138. }
  2139. }
  2140. /* No SMC, MC ucode on APUs */
  2141. if (!(rdev->flags & RADEON_IS_IGP)) {
  2142. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
  2143. err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
  2144. if (err) {
  2145. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
  2146. err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
  2147. if (err) {
  2148. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  2149. err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
  2150. if (err)
  2151. goto out;
  2152. }
  2153. if ((rdev->mc_fw->size != mc_req_size) &&
  2154. (rdev->mc_fw->size != mc2_req_size)){
  2155. printk(KERN_ERR
  2156. "cik_mc: Bogus length %zu in firmware \"%s\"\n",
  2157. rdev->mc_fw->size, fw_name);
  2158. err = -EINVAL;
  2159. }
  2160. DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
  2161. } else {
  2162. err = radeon_ucode_validate(rdev->mc_fw);
  2163. if (err) {
  2164. printk(KERN_ERR
  2165. "cik_fw: validation failed for firmware \"%s\"\n",
  2166. fw_name);
  2167. goto out;
  2168. } else {
  2169. new_fw++;
  2170. }
  2171. }
  2172. snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
  2173. err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
  2174. if (err) {
  2175. snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
  2176. err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
  2177. if (err) {
  2178. printk(KERN_ERR
  2179. "smc: error loading firmware \"%s\"\n",
  2180. fw_name);
  2181. release_firmware(rdev->smc_fw);
  2182. rdev->smc_fw = NULL;
  2183. err = 0;
  2184. } else if (rdev->smc_fw->size != smc_req_size) {
  2185. printk(KERN_ERR
  2186. "cik_smc: Bogus length %zu in firmware \"%s\"\n",
  2187. rdev->smc_fw->size, fw_name);
  2188. err = -EINVAL;
  2189. }
  2190. } else {
  2191. err = radeon_ucode_validate(rdev->smc_fw);
  2192. if (err) {
  2193. printk(KERN_ERR
  2194. "cik_fw: validation failed for firmware \"%s\"\n",
  2195. fw_name);
  2196. goto out;
  2197. } else {
  2198. new_fw++;
  2199. }
  2200. }
  2201. }
  2202. if (new_fw == 0) {
  2203. rdev->new_fw = false;
  2204. } else if (new_fw < num_fw) {
  2205. printk(KERN_ERR "ci_fw: mixing new and old firmware!\n");
  2206. err = -EINVAL;
  2207. } else {
  2208. rdev->new_fw = true;
  2209. }
  2210. out:
  2211. if (err) {
  2212. if (err != -EINVAL)
  2213. printk(KERN_ERR
  2214. "cik_cp: Failed to load firmware \"%s\"\n",
  2215. fw_name);
  2216. release_firmware(rdev->pfp_fw);
  2217. rdev->pfp_fw = NULL;
  2218. release_firmware(rdev->me_fw);
  2219. rdev->me_fw = NULL;
  2220. release_firmware(rdev->ce_fw);
  2221. rdev->ce_fw = NULL;
  2222. release_firmware(rdev->mec_fw);
  2223. rdev->mec_fw = NULL;
  2224. release_firmware(rdev->mec2_fw);
  2225. rdev->mec2_fw = NULL;
  2226. release_firmware(rdev->rlc_fw);
  2227. rdev->rlc_fw = NULL;
  2228. release_firmware(rdev->sdma_fw);
  2229. rdev->sdma_fw = NULL;
  2230. release_firmware(rdev->mc_fw);
  2231. rdev->mc_fw = NULL;
  2232. release_firmware(rdev->smc_fw);
  2233. rdev->smc_fw = NULL;
  2234. }
  2235. return err;
  2236. }
  2237. /*
  2238. * Core functions
  2239. */
  2240. /**
  2241. * cik_tiling_mode_table_init - init the hw tiling table
  2242. *
  2243. * @rdev: radeon_device pointer
  2244. *
  2245. * Starting with SI, the tiling setup is done globally in a
  2246. * set of 32 tiling modes. Rather than selecting each set of
  2247. * parameters per surface as on older asics, we just select
  2248. * which index in the tiling table we want to use, and the
  2249. * surface uses those parameters (CIK).
  2250. */
  2251. static void cik_tiling_mode_table_init(struct radeon_device *rdev)
  2252. {
  2253. u32 *tile = rdev->config.cik.tile_mode_array;
  2254. u32 *macrotile = rdev->config.cik.macrotile_mode_array;
  2255. const u32 num_tile_mode_states =
  2256. ARRAY_SIZE(rdev->config.cik.tile_mode_array);
  2257. const u32 num_secondary_tile_mode_states =
  2258. ARRAY_SIZE(rdev->config.cik.macrotile_mode_array);
  2259. u32 reg_offset, split_equal_to_row_size;
  2260. u32 num_pipe_configs;
  2261. u32 num_rbs = rdev->config.cik.max_backends_per_se *
  2262. rdev->config.cik.max_shader_engines;
  2263. switch (rdev->config.cik.mem_row_size_in_kb) {
  2264. case 1:
  2265. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
  2266. break;
  2267. case 2:
  2268. default:
  2269. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
  2270. break;
  2271. case 4:
  2272. split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
  2273. break;
  2274. }
  2275. num_pipe_configs = rdev->config.cik.max_tile_pipes;
  2276. if (num_pipe_configs > 8)
  2277. num_pipe_configs = 16;
  2278. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2279. tile[reg_offset] = 0;
  2280. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2281. macrotile[reg_offset] = 0;
  2282. switch(num_pipe_configs) {
  2283. case 16:
  2284. tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2285. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2286. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2287. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2288. tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2289. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2290. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2291. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2292. tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2293. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2294. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2295. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2296. tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2297. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2298. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2299. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2300. tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2301. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2302. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2303. TILE_SPLIT(split_equal_to_row_size));
  2304. tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2305. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2306. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2307. tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2308. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2309. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2310. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2311. tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2312. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2313. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2314. TILE_SPLIT(split_equal_to_row_size));
  2315. tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2316. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
  2317. tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2318. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2319. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2320. tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2321. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2322. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2323. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2324. tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2325. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2326. PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
  2327. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2328. tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2329. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2330. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2331. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2332. tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2333. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2334. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2335. tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2336. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2337. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2338. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2339. tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2340. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2341. PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
  2342. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2343. tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2344. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2345. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2346. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2347. tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2348. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2349. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2350. tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2351. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2352. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2353. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2354. tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2355. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2356. PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
  2357. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2358. tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2359. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2360. PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
  2361. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2362. macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2363. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2364. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2365. NUM_BANKS(ADDR_SURF_16_BANK));
  2366. macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2367. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2368. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2369. NUM_BANKS(ADDR_SURF_16_BANK));
  2370. macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2371. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2372. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2373. NUM_BANKS(ADDR_SURF_16_BANK));
  2374. macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2375. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2376. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2377. NUM_BANKS(ADDR_SURF_16_BANK));
  2378. macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2379. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2380. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2381. NUM_BANKS(ADDR_SURF_8_BANK));
  2382. macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2383. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2384. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2385. NUM_BANKS(ADDR_SURF_4_BANK));
  2386. macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2387. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2388. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2389. NUM_BANKS(ADDR_SURF_2_BANK));
  2390. macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2391. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2392. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2393. NUM_BANKS(ADDR_SURF_16_BANK));
  2394. macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2395. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2396. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2397. NUM_BANKS(ADDR_SURF_16_BANK));
  2398. macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2399. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2400. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2401. NUM_BANKS(ADDR_SURF_16_BANK));
  2402. macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2403. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2404. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2405. NUM_BANKS(ADDR_SURF_8_BANK));
  2406. macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2407. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2408. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2409. NUM_BANKS(ADDR_SURF_4_BANK));
  2410. macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2411. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2412. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2413. NUM_BANKS(ADDR_SURF_2_BANK));
  2414. macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2415. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2416. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2417. NUM_BANKS(ADDR_SURF_2_BANK));
  2418. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2419. WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
  2420. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2421. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
  2422. break;
  2423. case 8:
  2424. tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2425. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2426. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2427. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2428. tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2429. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2430. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2431. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2432. tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2433. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2434. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2435. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2436. tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2437. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2438. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2439. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2440. tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2441. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2442. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2443. TILE_SPLIT(split_equal_to_row_size));
  2444. tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2445. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2446. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2447. tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2448. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2449. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2450. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2451. tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2452. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2453. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2454. TILE_SPLIT(split_equal_to_row_size));
  2455. tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2456. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
  2457. tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2458. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2459. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2460. tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2461. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2462. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2463. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2464. tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2465. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2466. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  2467. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2468. tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2469. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2470. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2471. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2472. tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2473. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2474. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2475. tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2476. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2477. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2478. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2479. tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2480. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2481. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  2482. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2483. tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2484. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2485. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2486. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2487. tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2488. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2489. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2490. tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2491. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2492. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2493. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2494. tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2495. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2496. PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
  2497. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2498. tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2499. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2500. PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
  2501. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2502. macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2503. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2504. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2505. NUM_BANKS(ADDR_SURF_16_BANK));
  2506. macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2507. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2508. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2509. NUM_BANKS(ADDR_SURF_16_BANK));
  2510. macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2511. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2512. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2513. NUM_BANKS(ADDR_SURF_16_BANK));
  2514. macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2515. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2516. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2517. NUM_BANKS(ADDR_SURF_16_BANK));
  2518. macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2519. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2520. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2521. NUM_BANKS(ADDR_SURF_8_BANK));
  2522. macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2523. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2524. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2525. NUM_BANKS(ADDR_SURF_4_BANK));
  2526. macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2527. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2528. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2529. NUM_BANKS(ADDR_SURF_2_BANK));
  2530. macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2531. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2532. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2533. NUM_BANKS(ADDR_SURF_16_BANK));
  2534. macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2535. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2536. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2537. NUM_BANKS(ADDR_SURF_16_BANK));
  2538. macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2539. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2540. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2541. NUM_BANKS(ADDR_SURF_16_BANK));
  2542. macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2543. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2544. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2545. NUM_BANKS(ADDR_SURF_16_BANK));
  2546. macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2547. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2548. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2549. NUM_BANKS(ADDR_SURF_8_BANK));
  2550. macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2551. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2552. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2553. NUM_BANKS(ADDR_SURF_4_BANK));
  2554. macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2555. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2556. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2557. NUM_BANKS(ADDR_SURF_2_BANK));
  2558. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2559. WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
  2560. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2561. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
  2562. break;
  2563. case 4:
  2564. if (num_rbs == 4) {
  2565. tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2566. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2567. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2568. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2569. tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2570. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2571. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2572. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2573. tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2574. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2575. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2576. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2577. tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2578. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2579. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2580. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2581. tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2582. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2583. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2584. TILE_SPLIT(split_equal_to_row_size));
  2585. tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2586. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2587. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2588. tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2589. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2590. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2591. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2592. tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2593. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2594. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2595. TILE_SPLIT(split_equal_to_row_size));
  2596. tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2597. PIPE_CONFIG(ADDR_SURF_P4_16x16));
  2598. tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2599. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2600. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2601. tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2602. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2603. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2604. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2605. tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2606. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2607. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2608. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2609. tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2610. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2611. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2612. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2613. tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2614. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2615. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2616. tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2617. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2618. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2619. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2620. tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2621. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2622. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2623. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2624. tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2625. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2626. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2627. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2628. tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2629. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2630. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2631. tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2632. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2633. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2634. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2635. tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2636. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2637. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2638. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2639. tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2640. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2641. PIPE_CONFIG(ADDR_SURF_P4_16x16) |
  2642. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2643. } else if (num_rbs < 4) {
  2644. tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2645. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2646. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2647. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2648. tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2649. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2650. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2651. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2652. tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2653. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2654. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2655. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2656. tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2657. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2658. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2659. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2660. tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2661. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2662. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2663. TILE_SPLIT(split_equal_to_row_size));
  2664. tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2665. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2666. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2667. tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2668. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2669. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2670. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2671. tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2672. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2673. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2674. TILE_SPLIT(split_equal_to_row_size));
  2675. tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2676. PIPE_CONFIG(ADDR_SURF_P4_8x16));
  2677. tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2678. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2679. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
  2680. tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2681. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2682. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2683. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2684. tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2685. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2686. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2687. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2688. tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2689. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2690. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2691. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2692. tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2693. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2694. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2695. tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2696. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2697. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2698. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2699. tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2700. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2701. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2702. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2703. tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2704. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2705. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2706. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2707. tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2708. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2709. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
  2710. tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2711. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2712. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2713. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2714. tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2715. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2716. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2717. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2718. tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2719. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2720. PIPE_CONFIG(ADDR_SURF_P4_8x16) |
  2721. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2722. }
  2723. macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2724. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2725. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2726. NUM_BANKS(ADDR_SURF_16_BANK));
  2727. macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2728. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2729. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2730. NUM_BANKS(ADDR_SURF_16_BANK));
  2731. macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2732. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2733. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2734. NUM_BANKS(ADDR_SURF_16_BANK));
  2735. macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2736. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2737. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2738. NUM_BANKS(ADDR_SURF_16_BANK));
  2739. macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2740. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2741. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2742. NUM_BANKS(ADDR_SURF_16_BANK));
  2743. macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2744. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2745. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2746. NUM_BANKS(ADDR_SURF_8_BANK));
  2747. macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2748. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2749. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2750. NUM_BANKS(ADDR_SURF_4_BANK));
  2751. macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2752. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2753. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2754. NUM_BANKS(ADDR_SURF_16_BANK));
  2755. macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2756. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2757. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2758. NUM_BANKS(ADDR_SURF_16_BANK));
  2759. macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2760. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2761. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2762. NUM_BANKS(ADDR_SURF_16_BANK));
  2763. macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2764. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2765. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2766. NUM_BANKS(ADDR_SURF_16_BANK));
  2767. macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2768. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2769. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2770. NUM_BANKS(ADDR_SURF_16_BANK));
  2771. macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2772. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2773. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2774. NUM_BANKS(ADDR_SURF_8_BANK));
  2775. macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2776. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2777. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
  2778. NUM_BANKS(ADDR_SURF_4_BANK));
  2779. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2780. WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
  2781. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2782. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
  2783. break;
  2784. case 2:
  2785. tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2786. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2787. PIPE_CONFIG(ADDR_SURF_P2) |
  2788. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
  2789. tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2790. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2791. PIPE_CONFIG(ADDR_SURF_P2) |
  2792. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
  2793. tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2794. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2795. PIPE_CONFIG(ADDR_SURF_P2) |
  2796. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2797. tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2798. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2799. PIPE_CONFIG(ADDR_SURF_P2) |
  2800. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
  2801. tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2802. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2803. PIPE_CONFIG(ADDR_SURF_P2) |
  2804. TILE_SPLIT(split_equal_to_row_size));
  2805. tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2806. PIPE_CONFIG(ADDR_SURF_P2) |
  2807. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
  2808. tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2809. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2810. PIPE_CONFIG(ADDR_SURF_P2) |
  2811. TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
  2812. tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2813. MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
  2814. PIPE_CONFIG(ADDR_SURF_P2) |
  2815. TILE_SPLIT(split_equal_to_row_size));
  2816. tile[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
  2817. PIPE_CONFIG(ADDR_SURF_P2);
  2818. tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2819. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2820. PIPE_CONFIG(ADDR_SURF_P2));
  2821. tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2822. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2823. PIPE_CONFIG(ADDR_SURF_P2) |
  2824. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2825. tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2826. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2827. PIPE_CONFIG(ADDR_SURF_P2) |
  2828. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2829. tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2830. MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
  2831. PIPE_CONFIG(ADDR_SURF_P2) |
  2832. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2833. tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2834. PIPE_CONFIG(ADDR_SURF_P2) |
  2835. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
  2836. tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
  2837. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2838. PIPE_CONFIG(ADDR_SURF_P2) |
  2839. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2840. tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2841. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2842. PIPE_CONFIG(ADDR_SURF_P2) |
  2843. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2844. tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2845. MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
  2846. PIPE_CONFIG(ADDR_SURF_P2) |
  2847. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2848. tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
  2849. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2850. PIPE_CONFIG(ADDR_SURF_P2));
  2851. tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2852. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2853. PIPE_CONFIG(ADDR_SURF_P2) |
  2854. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2855. tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
  2856. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2857. PIPE_CONFIG(ADDR_SURF_P2) |
  2858. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2859. tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
  2860. MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
  2861. PIPE_CONFIG(ADDR_SURF_P2) |
  2862. SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
  2863. macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2864. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2865. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2866. NUM_BANKS(ADDR_SURF_16_BANK));
  2867. macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2868. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2869. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2870. NUM_BANKS(ADDR_SURF_16_BANK));
  2871. macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2872. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2873. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2874. NUM_BANKS(ADDR_SURF_16_BANK));
  2875. macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2876. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2877. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2878. NUM_BANKS(ADDR_SURF_16_BANK));
  2879. macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2880. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2881. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2882. NUM_BANKS(ADDR_SURF_16_BANK));
  2883. macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2884. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2885. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2886. NUM_BANKS(ADDR_SURF_16_BANK));
  2887. macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2888. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2889. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2890. NUM_BANKS(ADDR_SURF_8_BANK));
  2891. macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2892. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
  2893. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2894. NUM_BANKS(ADDR_SURF_16_BANK));
  2895. macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
  2896. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2897. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2898. NUM_BANKS(ADDR_SURF_16_BANK));
  2899. macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2900. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
  2901. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2902. NUM_BANKS(ADDR_SURF_16_BANK));
  2903. macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
  2904. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2905. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2906. NUM_BANKS(ADDR_SURF_16_BANK));
  2907. macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2908. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
  2909. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2910. NUM_BANKS(ADDR_SURF_16_BANK));
  2911. macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2912. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2913. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
  2914. NUM_BANKS(ADDR_SURF_16_BANK));
  2915. macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
  2916. BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
  2917. MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
  2918. NUM_BANKS(ADDR_SURF_8_BANK));
  2919. for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
  2920. WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
  2921. for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
  2922. WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
  2923. break;
  2924. default:
  2925. DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
  2926. }
  2927. }
  2928. /**
  2929. * cik_select_se_sh - select which SE, SH to address
  2930. *
  2931. * @rdev: radeon_device pointer
  2932. * @se_num: shader engine to address
  2933. * @sh_num: sh block to address
  2934. *
  2935. * Select which SE, SH combinations to address. Certain
  2936. * registers are instanced per SE or SH. 0xffffffff means
  2937. * broadcast to all SEs or SHs (CIK).
  2938. */
  2939. static void cik_select_se_sh(struct radeon_device *rdev,
  2940. u32 se_num, u32 sh_num)
  2941. {
  2942. u32 data = INSTANCE_BROADCAST_WRITES;
  2943. if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
  2944. data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
  2945. else if (se_num == 0xffffffff)
  2946. data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
  2947. else if (sh_num == 0xffffffff)
  2948. data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
  2949. else
  2950. data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
  2951. WREG32(GRBM_GFX_INDEX, data);
  2952. }
  2953. /**
  2954. * cik_create_bitmask - create a bitmask
  2955. *
  2956. * @bit_width: length of the mask
  2957. *
  2958. * create a variable length bit mask (CIK).
  2959. * Returns the bitmask.
  2960. */
  2961. static u32 cik_create_bitmask(u32 bit_width)
  2962. {
  2963. u32 i, mask = 0;
  2964. for (i = 0; i < bit_width; i++) {
  2965. mask <<= 1;
  2966. mask |= 1;
  2967. }
  2968. return mask;
  2969. }
  2970. /**
  2971. * cik_get_rb_disabled - computes the mask of disabled RBs
  2972. *
  2973. * @rdev: radeon_device pointer
  2974. * @max_rb_num: max RBs (render backends) for the asic
  2975. * @se_num: number of SEs (shader engines) for the asic
  2976. * @sh_per_se: number of SH blocks per SE for the asic
  2977. *
  2978. * Calculates the bitmask of disabled RBs (CIK).
  2979. * Returns the disabled RB bitmask.
  2980. */
  2981. static u32 cik_get_rb_disabled(struct radeon_device *rdev,
  2982. u32 max_rb_num_per_se,
  2983. u32 sh_per_se)
  2984. {
  2985. u32 data, mask;
  2986. data = RREG32(CC_RB_BACKEND_DISABLE);
  2987. if (data & 1)
  2988. data &= BACKEND_DISABLE_MASK;
  2989. else
  2990. data = 0;
  2991. data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
  2992. data >>= BACKEND_DISABLE_SHIFT;
  2993. mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
  2994. return data & mask;
  2995. }
  2996. /**
  2997. * cik_setup_rb - setup the RBs on the asic
  2998. *
  2999. * @rdev: radeon_device pointer
  3000. * @se_num: number of SEs (shader engines) for the asic
  3001. * @sh_per_se: number of SH blocks per SE for the asic
  3002. * @max_rb_num: max RBs (render backends) for the asic
  3003. *
  3004. * Configures per-SE/SH RB registers (CIK).
  3005. */
  3006. static void cik_setup_rb(struct radeon_device *rdev,
  3007. u32 se_num, u32 sh_per_se,
  3008. u32 max_rb_num_per_se)
  3009. {
  3010. int i, j;
  3011. u32 data, mask;
  3012. u32 disabled_rbs = 0;
  3013. u32 enabled_rbs = 0;
  3014. mutex_lock(&rdev->grbm_idx_mutex);
  3015. for (i = 0; i < se_num; i++) {
  3016. for (j = 0; j < sh_per_se; j++) {
  3017. cik_select_se_sh(rdev, i, j);
  3018. data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
  3019. if (rdev->family == CHIP_HAWAII)
  3020. disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
  3021. else
  3022. disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
  3023. }
  3024. }
  3025. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  3026. mutex_unlock(&rdev->grbm_idx_mutex);
  3027. mask = 1;
  3028. for (i = 0; i < max_rb_num_per_se * se_num; i++) {
  3029. if (!(disabled_rbs & mask))
  3030. enabled_rbs |= mask;
  3031. mask <<= 1;
  3032. }
  3033. rdev->config.cik.backend_enable_mask = enabled_rbs;
  3034. mutex_lock(&rdev->grbm_idx_mutex);
  3035. for (i = 0; i < se_num; i++) {
  3036. cik_select_se_sh(rdev, i, 0xffffffff);
  3037. data = 0;
  3038. for (j = 0; j < sh_per_se; j++) {
  3039. switch (enabled_rbs & 3) {
  3040. case 0:
  3041. if (j == 0)
  3042. data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
  3043. else
  3044. data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
  3045. break;
  3046. case 1:
  3047. data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
  3048. break;
  3049. case 2:
  3050. data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
  3051. break;
  3052. case 3:
  3053. default:
  3054. data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
  3055. break;
  3056. }
  3057. enabled_rbs >>= 2;
  3058. }
  3059. WREG32(PA_SC_RASTER_CONFIG, data);
  3060. }
  3061. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  3062. mutex_unlock(&rdev->grbm_idx_mutex);
  3063. }
  3064. /**
  3065. * cik_gpu_init - setup the 3D engine
  3066. *
  3067. * @rdev: radeon_device pointer
  3068. *
  3069. * Configures the 3D engine and tiling configuration
  3070. * registers so that the 3D engine is usable.
  3071. */
  3072. static void cik_gpu_init(struct radeon_device *rdev)
  3073. {
  3074. u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
  3075. u32 mc_shared_chmap, mc_arb_ramcfg;
  3076. u32 hdp_host_path_cntl;
  3077. u32 tmp;
  3078. int i, j;
  3079. switch (rdev->family) {
  3080. case CHIP_BONAIRE:
  3081. rdev->config.cik.max_shader_engines = 2;
  3082. rdev->config.cik.max_tile_pipes = 4;
  3083. rdev->config.cik.max_cu_per_sh = 7;
  3084. rdev->config.cik.max_sh_per_se = 1;
  3085. rdev->config.cik.max_backends_per_se = 2;
  3086. rdev->config.cik.max_texture_channel_caches = 4;
  3087. rdev->config.cik.max_gprs = 256;
  3088. rdev->config.cik.max_gs_threads = 32;
  3089. rdev->config.cik.max_hw_contexts = 8;
  3090. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3091. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3092. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3093. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3094. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  3095. break;
  3096. case CHIP_HAWAII:
  3097. rdev->config.cik.max_shader_engines = 4;
  3098. rdev->config.cik.max_tile_pipes = 16;
  3099. rdev->config.cik.max_cu_per_sh = 11;
  3100. rdev->config.cik.max_sh_per_se = 1;
  3101. rdev->config.cik.max_backends_per_se = 4;
  3102. rdev->config.cik.max_texture_channel_caches = 16;
  3103. rdev->config.cik.max_gprs = 256;
  3104. rdev->config.cik.max_gs_threads = 32;
  3105. rdev->config.cik.max_hw_contexts = 8;
  3106. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3107. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3108. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3109. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3110. gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
  3111. break;
  3112. case CHIP_KAVERI:
  3113. rdev->config.cik.max_shader_engines = 1;
  3114. rdev->config.cik.max_tile_pipes = 4;
  3115. if ((rdev->pdev->device == 0x1304) ||
  3116. (rdev->pdev->device == 0x1305) ||
  3117. (rdev->pdev->device == 0x130C) ||
  3118. (rdev->pdev->device == 0x130F) ||
  3119. (rdev->pdev->device == 0x1310) ||
  3120. (rdev->pdev->device == 0x1311) ||
  3121. (rdev->pdev->device == 0x131C)) {
  3122. rdev->config.cik.max_cu_per_sh = 8;
  3123. rdev->config.cik.max_backends_per_se = 2;
  3124. } else if ((rdev->pdev->device == 0x1309) ||
  3125. (rdev->pdev->device == 0x130A) ||
  3126. (rdev->pdev->device == 0x130D) ||
  3127. (rdev->pdev->device == 0x1313) ||
  3128. (rdev->pdev->device == 0x131D)) {
  3129. rdev->config.cik.max_cu_per_sh = 6;
  3130. rdev->config.cik.max_backends_per_se = 2;
  3131. } else if ((rdev->pdev->device == 0x1306) ||
  3132. (rdev->pdev->device == 0x1307) ||
  3133. (rdev->pdev->device == 0x130B) ||
  3134. (rdev->pdev->device == 0x130E) ||
  3135. (rdev->pdev->device == 0x1315) ||
  3136. (rdev->pdev->device == 0x1318) ||
  3137. (rdev->pdev->device == 0x131B)) {
  3138. rdev->config.cik.max_cu_per_sh = 4;
  3139. rdev->config.cik.max_backends_per_se = 1;
  3140. } else {
  3141. rdev->config.cik.max_cu_per_sh = 3;
  3142. rdev->config.cik.max_backends_per_se = 1;
  3143. }
  3144. rdev->config.cik.max_sh_per_se = 1;
  3145. rdev->config.cik.max_texture_channel_caches = 4;
  3146. rdev->config.cik.max_gprs = 256;
  3147. rdev->config.cik.max_gs_threads = 16;
  3148. rdev->config.cik.max_hw_contexts = 8;
  3149. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3150. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3151. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3152. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3153. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  3154. break;
  3155. case CHIP_KABINI:
  3156. case CHIP_MULLINS:
  3157. default:
  3158. rdev->config.cik.max_shader_engines = 1;
  3159. rdev->config.cik.max_tile_pipes = 2;
  3160. rdev->config.cik.max_cu_per_sh = 2;
  3161. rdev->config.cik.max_sh_per_se = 1;
  3162. rdev->config.cik.max_backends_per_se = 1;
  3163. rdev->config.cik.max_texture_channel_caches = 2;
  3164. rdev->config.cik.max_gprs = 256;
  3165. rdev->config.cik.max_gs_threads = 16;
  3166. rdev->config.cik.max_hw_contexts = 8;
  3167. rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
  3168. rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
  3169. rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
  3170. rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
  3171. gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
  3172. break;
  3173. }
  3174. /* Initialize HDP */
  3175. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  3176. WREG32((0x2c14 + j), 0x00000000);
  3177. WREG32((0x2c18 + j), 0x00000000);
  3178. WREG32((0x2c1c + j), 0x00000000);
  3179. WREG32((0x2c20 + j), 0x00000000);
  3180. WREG32((0x2c24 + j), 0x00000000);
  3181. }
  3182. WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
  3183. WREG32(SRBM_INT_CNTL, 0x1);
  3184. WREG32(SRBM_INT_ACK, 0x1);
  3185. WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
  3186. mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
  3187. mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
  3188. rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
  3189. rdev->config.cik.mem_max_burst_length_bytes = 256;
  3190. tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
  3191. rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
  3192. if (rdev->config.cik.mem_row_size_in_kb > 4)
  3193. rdev->config.cik.mem_row_size_in_kb = 4;
  3194. /* XXX use MC settings? */
  3195. rdev->config.cik.shader_engine_tile_size = 32;
  3196. rdev->config.cik.num_gpus = 1;
  3197. rdev->config.cik.multi_gpu_tile_size = 64;
  3198. /* fix up row size */
  3199. gb_addr_config &= ~ROW_SIZE_MASK;
  3200. switch (rdev->config.cik.mem_row_size_in_kb) {
  3201. case 1:
  3202. default:
  3203. gb_addr_config |= ROW_SIZE(0);
  3204. break;
  3205. case 2:
  3206. gb_addr_config |= ROW_SIZE(1);
  3207. break;
  3208. case 4:
  3209. gb_addr_config |= ROW_SIZE(2);
  3210. break;
  3211. }
  3212. /* setup tiling info dword. gb_addr_config is not adequate since it does
  3213. * not have bank info, so create a custom tiling dword.
  3214. * bits 3:0 num_pipes
  3215. * bits 7:4 num_banks
  3216. * bits 11:8 group_size
  3217. * bits 15:12 row_size
  3218. */
  3219. rdev->config.cik.tile_config = 0;
  3220. switch (rdev->config.cik.num_tile_pipes) {
  3221. case 1:
  3222. rdev->config.cik.tile_config |= (0 << 0);
  3223. break;
  3224. case 2:
  3225. rdev->config.cik.tile_config |= (1 << 0);
  3226. break;
  3227. case 4:
  3228. rdev->config.cik.tile_config |= (2 << 0);
  3229. break;
  3230. case 8:
  3231. default:
  3232. /* XXX what about 12? */
  3233. rdev->config.cik.tile_config |= (3 << 0);
  3234. break;
  3235. }
  3236. rdev->config.cik.tile_config |=
  3237. ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
  3238. rdev->config.cik.tile_config |=
  3239. ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
  3240. rdev->config.cik.tile_config |=
  3241. ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
  3242. WREG32(GB_ADDR_CONFIG, gb_addr_config);
  3243. WREG32(HDP_ADDR_CONFIG, gb_addr_config);
  3244. WREG32(DMIF_ADDR_CALC, gb_addr_config);
  3245. WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
  3246. WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
  3247. WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
  3248. WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
  3249. WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
  3250. cik_tiling_mode_table_init(rdev);
  3251. cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
  3252. rdev->config.cik.max_sh_per_se,
  3253. rdev->config.cik.max_backends_per_se);
  3254. rdev->config.cik.active_cus = 0;
  3255. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  3256. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  3257. rdev->config.cik.active_cus +=
  3258. hweight32(cik_get_cu_active_bitmap(rdev, i, j));
  3259. }
  3260. }
  3261. /* set HW defaults for 3D engine */
  3262. WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
  3263. mutex_lock(&rdev->grbm_idx_mutex);
  3264. /*
  3265. * making sure that the following register writes will be broadcasted
  3266. * to all the shaders
  3267. */
  3268. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  3269. WREG32(SX_DEBUG_1, 0x20);
  3270. WREG32(TA_CNTL_AUX, 0x00010000);
  3271. tmp = RREG32(SPI_CONFIG_CNTL);
  3272. tmp |= 0x03000000;
  3273. WREG32(SPI_CONFIG_CNTL, tmp);
  3274. WREG32(SQ_CONFIG, 1);
  3275. WREG32(DB_DEBUG, 0);
  3276. tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
  3277. tmp |= 0x00000400;
  3278. WREG32(DB_DEBUG2, tmp);
  3279. tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
  3280. tmp |= 0x00020200;
  3281. WREG32(DB_DEBUG3, tmp);
  3282. tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
  3283. tmp |= 0x00018208;
  3284. WREG32(CB_HW_CONTROL, tmp);
  3285. WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
  3286. WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
  3287. SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
  3288. SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
  3289. SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
  3290. WREG32(VGT_NUM_INSTANCES, 1);
  3291. WREG32(CP_PERFMON_CNTL, 0);
  3292. WREG32(SQ_CONFIG, 0);
  3293. WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
  3294. FORCE_EOV_MAX_REZ_CNT(255)));
  3295. WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
  3296. AUTO_INVLD_EN(ES_AND_GS_AUTO));
  3297. WREG32(VGT_GS_VERTEX_REUSE, 16);
  3298. WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  3299. tmp = RREG32(HDP_MISC_CNTL);
  3300. tmp |= HDP_FLUSH_INVALIDATE_CACHE;
  3301. WREG32(HDP_MISC_CNTL, tmp);
  3302. hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
  3303. WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  3304. WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
  3305. WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
  3306. mutex_unlock(&rdev->grbm_idx_mutex);
  3307. udelay(50);
  3308. }
  3309. /*
  3310. * GPU scratch registers helpers function.
  3311. */
  3312. /**
  3313. * cik_scratch_init - setup driver info for CP scratch regs
  3314. *
  3315. * @rdev: radeon_device pointer
  3316. *
  3317. * Set up the number and offset of the CP scratch registers.
  3318. * NOTE: use of CP scratch registers is a legacy inferface and
  3319. * is not used by default on newer asics (r6xx+). On newer asics,
  3320. * memory buffers are used for fences rather than scratch regs.
  3321. */
  3322. static void cik_scratch_init(struct radeon_device *rdev)
  3323. {
  3324. int i;
  3325. rdev->scratch.num_reg = 7;
  3326. rdev->scratch.reg_base = SCRATCH_REG0;
  3327. for (i = 0; i < rdev->scratch.num_reg; i++) {
  3328. rdev->scratch.free[i] = true;
  3329. rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
  3330. }
  3331. }
  3332. /**
  3333. * cik_ring_test - basic gfx ring test
  3334. *
  3335. * @rdev: radeon_device pointer
  3336. * @ring: radeon_ring structure holding ring information
  3337. *
  3338. * Allocate a scratch register and write to it using the gfx ring (CIK).
  3339. * Provides a basic gfx ring test to verify that the ring is working.
  3340. * Used by cik_cp_gfx_resume();
  3341. * Returns 0 on success, error on failure.
  3342. */
  3343. int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
  3344. {
  3345. uint32_t scratch;
  3346. uint32_t tmp = 0;
  3347. unsigned i;
  3348. int r;
  3349. r = radeon_scratch_get(rdev, &scratch);
  3350. if (r) {
  3351. DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
  3352. return r;
  3353. }
  3354. WREG32(scratch, 0xCAFEDEAD);
  3355. r = radeon_ring_lock(rdev, ring, 3);
  3356. if (r) {
  3357. DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
  3358. radeon_scratch_free(rdev, scratch);
  3359. return r;
  3360. }
  3361. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  3362. radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
  3363. radeon_ring_write(ring, 0xDEADBEEF);
  3364. radeon_ring_unlock_commit(rdev, ring, false);
  3365. for (i = 0; i < rdev->usec_timeout; i++) {
  3366. tmp = RREG32(scratch);
  3367. if (tmp == 0xDEADBEEF)
  3368. break;
  3369. DRM_UDELAY(1);
  3370. }
  3371. if (i < rdev->usec_timeout) {
  3372. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  3373. } else {
  3374. DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
  3375. ring->idx, scratch, tmp);
  3376. r = -EINVAL;
  3377. }
  3378. radeon_scratch_free(rdev, scratch);
  3379. return r;
  3380. }
  3381. /**
  3382. * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
  3383. *
  3384. * @rdev: radeon_device pointer
  3385. * @ridx: radeon ring index
  3386. *
  3387. * Emits an hdp flush on the cp.
  3388. */
  3389. static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
  3390. int ridx)
  3391. {
  3392. struct radeon_ring *ring = &rdev->ring[ridx];
  3393. u32 ref_and_mask;
  3394. switch (ring->idx) {
  3395. case CAYMAN_RING_TYPE_CP1_INDEX:
  3396. case CAYMAN_RING_TYPE_CP2_INDEX:
  3397. default:
  3398. switch (ring->me) {
  3399. case 0:
  3400. ref_and_mask = CP2 << ring->pipe;
  3401. break;
  3402. case 1:
  3403. ref_and_mask = CP6 << ring->pipe;
  3404. break;
  3405. default:
  3406. return;
  3407. }
  3408. break;
  3409. case RADEON_RING_TYPE_GFX_INDEX:
  3410. ref_and_mask = CP0;
  3411. break;
  3412. }
  3413. radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
  3414. radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
  3415. WAIT_REG_MEM_FUNCTION(3) | /* == */
  3416. WAIT_REG_MEM_ENGINE(1))); /* pfp */
  3417. radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
  3418. radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
  3419. radeon_ring_write(ring, ref_and_mask);
  3420. radeon_ring_write(ring, ref_and_mask);
  3421. radeon_ring_write(ring, 0x20); /* poll interval */
  3422. }
  3423. /**
  3424. * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
  3425. *
  3426. * @rdev: radeon_device pointer
  3427. * @fence: radeon fence object
  3428. *
  3429. * Emits a fence sequnce number on the gfx ring and flushes
  3430. * GPU caches.
  3431. */
  3432. void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
  3433. struct radeon_fence *fence)
  3434. {
  3435. struct radeon_ring *ring = &rdev->ring[fence->ring];
  3436. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  3437. /* Workaround for cache flush problems. First send a dummy EOP
  3438. * event down the pipe with seq one below.
  3439. */
  3440. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  3441. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  3442. EOP_TC_ACTION_EN |
  3443. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  3444. EVENT_INDEX(5)));
  3445. radeon_ring_write(ring, addr & 0xfffffffc);
  3446. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
  3447. DATA_SEL(1) | INT_SEL(0));
  3448. radeon_ring_write(ring, fence->seq - 1);
  3449. radeon_ring_write(ring, 0);
  3450. /* Then send the real EOP event down the pipe. */
  3451. radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
  3452. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  3453. EOP_TC_ACTION_EN |
  3454. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  3455. EVENT_INDEX(5)));
  3456. radeon_ring_write(ring, addr & 0xfffffffc);
  3457. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
  3458. radeon_ring_write(ring, fence->seq);
  3459. radeon_ring_write(ring, 0);
  3460. }
  3461. /**
  3462. * cik_fence_compute_ring_emit - emit a fence on the compute ring
  3463. *
  3464. * @rdev: radeon_device pointer
  3465. * @fence: radeon fence object
  3466. *
  3467. * Emits a fence sequnce number on the compute ring and flushes
  3468. * GPU caches.
  3469. */
  3470. void cik_fence_compute_ring_emit(struct radeon_device *rdev,
  3471. struct radeon_fence *fence)
  3472. {
  3473. struct radeon_ring *ring = &rdev->ring[fence->ring];
  3474. u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
  3475. /* RELEASE_MEM - flush caches, send int */
  3476. radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
  3477. radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
  3478. EOP_TC_ACTION_EN |
  3479. EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
  3480. EVENT_INDEX(5)));
  3481. radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
  3482. radeon_ring_write(ring, addr & 0xfffffffc);
  3483. radeon_ring_write(ring, upper_32_bits(addr));
  3484. radeon_ring_write(ring, fence->seq);
  3485. radeon_ring_write(ring, 0);
  3486. }
  3487. /**
  3488. * cik_semaphore_ring_emit - emit a semaphore on the CP ring
  3489. *
  3490. * @rdev: radeon_device pointer
  3491. * @ring: radeon ring buffer object
  3492. * @semaphore: radeon semaphore object
  3493. * @emit_wait: Is this a sempahore wait?
  3494. *
  3495. * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
  3496. * from running ahead of semaphore waits.
  3497. */
  3498. bool cik_semaphore_ring_emit(struct radeon_device *rdev,
  3499. struct radeon_ring *ring,
  3500. struct radeon_semaphore *semaphore,
  3501. bool emit_wait)
  3502. {
  3503. uint64_t addr = semaphore->gpu_addr;
  3504. unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
  3505. radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
  3506. radeon_ring_write(ring, lower_32_bits(addr));
  3507. radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
  3508. if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
  3509. /* Prevent the PFP from running ahead of the semaphore wait */
  3510. radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  3511. radeon_ring_write(ring, 0x0);
  3512. }
  3513. return true;
  3514. }
  3515. /**
  3516. * cik_copy_cpdma - copy pages using the CP DMA engine
  3517. *
  3518. * @rdev: radeon_device pointer
  3519. * @src_offset: src GPU address
  3520. * @dst_offset: dst GPU address
  3521. * @num_gpu_pages: number of GPU pages to xfer
  3522. * @resv: reservation object to sync to
  3523. *
  3524. * Copy GPU paging using the CP DMA engine (CIK+).
  3525. * Used by the radeon ttm implementation to move pages if
  3526. * registered as the asic copy callback.
  3527. */
  3528. struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
  3529. uint64_t src_offset, uint64_t dst_offset,
  3530. unsigned num_gpu_pages,
  3531. struct reservation_object *resv)
  3532. {
  3533. struct radeon_fence *fence;
  3534. struct radeon_sync sync;
  3535. int ring_index = rdev->asic->copy.blit_ring_index;
  3536. struct radeon_ring *ring = &rdev->ring[ring_index];
  3537. u32 size_in_bytes, cur_size_in_bytes, control;
  3538. int i, num_loops;
  3539. int r = 0;
  3540. radeon_sync_create(&sync);
  3541. size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
  3542. num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
  3543. r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
  3544. if (r) {
  3545. DRM_ERROR("radeon: moving bo (%d).\n", r);
  3546. radeon_sync_free(rdev, &sync, NULL);
  3547. return ERR_PTR(r);
  3548. }
  3549. radeon_sync_resv(rdev, &sync, resv, false);
  3550. radeon_sync_rings(rdev, &sync, ring->idx);
  3551. for (i = 0; i < num_loops; i++) {
  3552. cur_size_in_bytes = size_in_bytes;
  3553. if (cur_size_in_bytes > 0x1fffff)
  3554. cur_size_in_bytes = 0x1fffff;
  3555. size_in_bytes -= cur_size_in_bytes;
  3556. control = 0;
  3557. if (size_in_bytes == 0)
  3558. control |= PACKET3_DMA_DATA_CP_SYNC;
  3559. radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
  3560. radeon_ring_write(ring, control);
  3561. radeon_ring_write(ring, lower_32_bits(src_offset));
  3562. radeon_ring_write(ring, upper_32_bits(src_offset));
  3563. radeon_ring_write(ring, lower_32_bits(dst_offset));
  3564. radeon_ring_write(ring, upper_32_bits(dst_offset));
  3565. radeon_ring_write(ring, cur_size_in_bytes);
  3566. src_offset += cur_size_in_bytes;
  3567. dst_offset += cur_size_in_bytes;
  3568. }
  3569. r = radeon_fence_emit(rdev, &fence, ring->idx);
  3570. if (r) {
  3571. radeon_ring_unlock_undo(rdev, ring);
  3572. radeon_sync_free(rdev, &sync, NULL);
  3573. return ERR_PTR(r);
  3574. }
  3575. radeon_ring_unlock_commit(rdev, ring, false);
  3576. radeon_sync_free(rdev, &sync, fence);
  3577. return fence;
  3578. }
  3579. /*
  3580. * IB stuff
  3581. */
  3582. /**
  3583. * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
  3584. *
  3585. * @rdev: radeon_device pointer
  3586. * @ib: radeon indirect buffer object
  3587. *
  3588. * Emits a DE (drawing engine) or CE (constant engine) IB
  3589. * on the gfx ring. IBs are usually generated by userspace
  3590. * acceleration drivers and submitted to the kernel for
  3591. * scheduling on the ring. This function schedules the IB
  3592. * on the gfx ring for execution by the GPU.
  3593. */
  3594. void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
  3595. {
  3596. struct radeon_ring *ring = &rdev->ring[ib->ring];
  3597. unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
  3598. u32 header, control = INDIRECT_BUFFER_VALID;
  3599. if (ib->is_const_ib) {
  3600. /* set switch buffer packet before const IB */
  3601. radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
  3602. radeon_ring_write(ring, 0);
  3603. header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
  3604. } else {
  3605. u32 next_rptr;
  3606. if (ring->rptr_save_reg) {
  3607. next_rptr = ring->wptr + 3 + 4;
  3608. radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
  3609. radeon_ring_write(ring, ((ring->rptr_save_reg -
  3610. PACKET3_SET_UCONFIG_REG_START) >> 2));
  3611. radeon_ring_write(ring, next_rptr);
  3612. } else if (rdev->wb.enabled) {
  3613. next_rptr = ring->wptr + 5 + 4;
  3614. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  3615. radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
  3616. radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  3617. radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
  3618. radeon_ring_write(ring, next_rptr);
  3619. }
  3620. header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
  3621. }
  3622. control |= ib->length_dw | (vm_id << 24);
  3623. radeon_ring_write(ring, header);
  3624. radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
  3625. radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
  3626. radeon_ring_write(ring, control);
  3627. }
  3628. /**
  3629. * cik_ib_test - basic gfx ring IB test
  3630. *
  3631. * @rdev: radeon_device pointer
  3632. * @ring: radeon_ring structure holding ring information
  3633. *
  3634. * Allocate an IB and execute it on the gfx ring (CIK).
  3635. * Provides a basic gfx ring test to verify that IBs are working.
  3636. * Returns 0 on success, error on failure.
  3637. */
  3638. int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
  3639. {
  3640. struct radeon_ib ib;
  3641. uint32_t scratch;
  3642. uint32_t tmp = 0;
  3643. unsigned i;
  3644. int r;
  3645. r = radeon_scratch_get(rdev, &scratch);
  3646. if (r) {
  3647. DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
  3648. return r;
  3649. }
  3650. WREG32(scratch, 0xCAFEDEAD);
  3651. r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
  3652. if (r) {
  3653. DRM_ERROR("radeon: failed to get ib (%d).\n", r);
  3654. radeon_scratch_free(rdev, scratch);
  3655. return r;
  3656. }
  3657. ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
  3658. ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
  3659. ib.ptr[2] = 0xDEADBEEF;
  3660. ib.length_dw = 3;
  3661. r = radeon_ib_schedule(rdev, &ib, NULL, false);
  3662. if (r) {
  3663. radeon_scratch_free(rdev, scratch);
  3664. radeon_ib_free(rdev, &ib);
  3665. DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
  3666. return r;
  3667. }
  3668. r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
  3669. RADEON_USEC_IB_TEST_TIMEOUT));
  3670. if (r < 0) {
  3671. DRM_ERROR("radeon: fence wait failed (%d).\n", r);
  3672. radeon_scratch_free(rdev, scratch);
  3673. radeon_ib_free(rdev, &ib);
  3674. return r;
  3675. } else if (r == 0) {
  3676. DRM_ERROR("radeon: fence wait timed out.\n");
  3677. radeon_scratch_free(rdev, scratch);
  3678. radeon_ib_free(rdev, &ib);
  3679. return -ETIMEDOUT;
  3680. }
  3681. r = 0;
  3682. for (i = 0; i < rdev->usec_timeout; i++) {
  3683. tmp = RREG32(scratch);
  3684. if (tmp == 0xDEADBEEF)
  3685. break;
  3686. DRM_UDELAY(1);
  3687. }
  3688. if (i < rdev->usec_timeout) {
  3689. DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
  3690. } else {
  3691. DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
  3692. scratch, tmp);
  3693. r = -EINVAL;
  3694. }
  3695. radeon_scratch_free(rdev, scratch);
  3696. radeon_ib_free(rdev, &ib);
  3697. return r;
  3698. }
  3699. /*
  3700. * CP.
  3701. * On CIK, gfx and compute now have independant command processors.
  3702. *
  3703. * GFX
  3704. * Gfx consists of a single ring and can process both gfx jobs and
  3705. * compute jobs. The gfx CP consists of three microengines (ME):
  3706. * PFP - Pre-Fetch Parser
  3707. * ME - Micro Engine
  3708. * CE - Constant Engine
  3709. * The PFP and ME make up what is considered the Drawing Engine (DE).
  3710. * The CE is an asynchronous engine used for updating buffer desciptors
  3711. * used by the DE so that they can be loaded into cache in parallel
  3712. * while the DE is processing state update packets.
  3713. *
  3714. * Compute
  3715. * The compute CP consists of two microengines (ME):
  3716. * MEC1 - Compute MicroEngine 1
  3717. * MEC2 - Compute MicroEngine 2
  3718. * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
  3719. * The queues are exposed to userspace and are programmed directly
  3720. * by the compute runtime.
  3721. */
  3722. /**
  3723. * cik_cp_gfx_enable - enable/disable the gfx CP MEs
  3724. *
  3725. * @rdev: radeon_device pointer
  3726. * @enable: enable or disable the MEs
  3727. *
  3728. * Halts or unhalts the gfx MEs.
  3729. */
  3730. static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
  3731. {
  3732. if (enable)
  3733. WREG32(CP_ME_CNTL, 0);
  3734. else {
  3735. if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
  3736. radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
  3737. WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
  3738. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3739. }
  3740. udelay(50);
  3741. }
  3742. /**
  3743. * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
  3744. *
  3745. * @rdev: radeon_device pointer
  3746. *
  3747. * Loads the gfx PFP, ME, and CE ucode.
  3748. * Returns 0 for success, -EINVAL if the ucode is not available.
  3749. */
  3750. static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
  3751. {
  3752. int i;
  3753. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
  3754. return -EINVAL;
  3755. cik_cp_gfx_enable(rdev, false);
  3756. if (rdev->new_fw) {
  3757. const struct gfx_firmware_header_v1_0 *pfp_hdr =
  3758. (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
  3759. const struct gfx_firmware_header_v1_0 *ce_hdr =
  3760. (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
  3761. const struct gfx_firmware_header_v1_0 *me_hdr =
  3762. (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
  3763. const __le32 *fw_data;
  3764. u32 fw_size;
  3765. radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
  3766. radeon_ucode_print_gfx_hdr(&ce_hdr->header);
  3767. radeon_ucode_print_gfx_hdr(&me_hdr->header);
  3768. /* PFP */
  3769. fw_data = (const __le32 *)
  3770. (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
  3771. fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
  3772. WREG32(CP_PFP_UCODE_ADDR, 0);
  3773. for (i = 0; i < fw_size; i++)
  3774. WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
  3775. WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version));
  3776. /* CE */
  3777. fw_data = (const __le32 *)
  3778. (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
  3779. fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
  3780. WREG32(CP_CE_UCODE_ADDR, 0);
  3781. for (i = 0; i < fw_size; i++)
  3782. WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
  3783. WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version));
  3784. /* ME */
  3785. fw_data = (const __be32 *)
  3786. (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
  3787. fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
  3788. WREG32(CP_ME_RAM_WADDR, 0);
  3789. for (i = 0; i < fw_size; i++)
  3790. WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
  3791. WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version));
  3792. WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version));
  3793. } else {
  3794. const __be32 *fw_data;
  3795. /* PFP */
  3796. fw_data = (const __be32 *)rdev->pfp_fw->data;
  3797. WREG32(CP_PFP_UCODE_ADDR, 0);
  3798. for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
  3799. WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
  3800. WREG32(CP_PFP_UCODE_ADDR, 0);
  3801. /* CE */
  3802. fw_data = (const __be32 *)rdev->ce_fw->data;
  3803. WREG32(CP_CE_UCODE_ADDR, 0);
  3804. for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
  3805. WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
  3806. WREG32(CP_CE_UCODE_ADDR, 0);
  3807. /* ME */
  3808. fw_data = (const __be32 *)rdev->me_fw->data;
  3809. WREG32(CP_ME_RAM_WADDR, 0);
  3810. for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
  3811. WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
  3812. WREG32(CP_ME_RAM_WADDR, 0);
  3813. }
  3814. return 0;
  3815. }
  3816. /**
  3817. * cik_cp_gfx_start - start the gfx ring
  3818. *
  3819. * @rdev: radeon_device pointer
  3820. *
  3821. * Enables the ring and loads the clear state context and other
  3822. * packets required to init the ring.
  3823. * Returns 0 for success, error for failure.
  3824. */
  3825. static int cik_cp_gfx_start(struct radeon_device *rdev)
  3826. {
  3827. struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3828. int r, i;
  3829. /* init the CP */
  3830. WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
  3831. WREG32(CP_ENDIAN_SWAP, 0);
  3832. WREG32(CP_DEVICE_ID, 1);
  3833. cik_cp_gfx_enable(rdev, true);
  3834. r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
  3835. if (r) {
  3836. DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
  3837. return r;
  3838. }
  3839. /* init the CE partitions. CE only used for gfx on CIK */
  3840. radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
  3841. radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
  3842. radeon_ring_write(ring, 0x8000);
  3843. radeon_ring_write(ring, 0x8000);
  3844. /* setup clear context state */
  3845. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3846. radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  3847. radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  3848. radeon_ring_write(ring, 0x80000000);
  3849. radeon_ring_write(ring, 0x80000000);
  3850. for (i = 0; i < cik_default_size; i++)
  3851. radeon_ring_write(ring, cik_default_state[i]);
  3852. radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  3853. radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
  3854. /* set clear context state */
  3855. radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
  3856. radeon_ring_write(ring, 0);
  3857. radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  3858. radeon_ring_write(ring, 0x00000316);
  3859. radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
  3860. radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
  3861. radeon_ring_unlock_commit(rdev, ring, false);
  3862. return 0;
  3863. }
  3864. /**
  3865. * cik_cp_gfx_fini - stop the gfx ring
  3866. *
  3867. * @rdev: radeon_device pointer
  3868. *
  3869. * Stop the gfx ring and tear down the driver ring
  3870. * info.
  3871. */
  3872. static void cik_cp_gfx_fini(struct radeon_device *rdev)
  3873. {
  3874. cik_cp_gfx_enable(rdev, false);
  3875. radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3876. }
  3877. /**
  3878. * cik_cp_gfx_resume - setup the gfx ring buffer registers
  3879. *
  3880. * @rdev: radeon_device pointer
  3881. *
  3882. * Program the location and size of the gfx ring buffer
  3883. * and test it to make sure it's working.
  3884. * Returns 0 for success, error for failure.
  3885. */
  3886. static int cik_cp_gfx_resume(struct radeon_device *rdev)
  3887. {
  3888. struct radeon_ring *ring;
  3889. u32 tmp;
  3890. u32 rb_bufsz;
  3891. u64 rb_addr;
  3892. int r;
  3893. WREG32(CP_SEM_WAIT_TIMER, 0x0);
  3894. if (rdev->family != CHIP_HAWAII)
  3895. WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
  3896. /* Set the write pointer delay */
  3897. WREG32(CP_RB_WPTR_DELAY, 0);
  3898. /* set the RB to use vmid 0 */
  3899. WREG32(CP_RB_VMID, 0);
  3900. WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
  3901. /* ring 0 - compute and gfx */
  3902. /* Set ring buffer size */
  3903. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  3904. rb_bufsz = order_base_2(ring->ring_size / 8);
  3905. tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  3906. #ifdef __BIG_ENDIAN
  3907. tmp |= BUF_SWAP_32BIT;
  3908. #endif
  3909. WREG32(CP_RB0_CNTL, tmp);
  3910. /* Initialize the ring buffer's read and write pointers */
  3911. WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
  3912. ring->wptr = 0;
  3913. WREG32(CP_RB0_WPTR, ring->wptr);
  3914. /* set the wb address wether it's enabled or not */
  3915. WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
  3916. WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
  3917. /* scratch register shadowing is no longer supported */
  3918. WREG32(SCRATCH_UMSK, 0);
  3919. if (!rdev->wb.enabled)
  3920. tmp |= RB_NO_UPDATE;
  3921. mdelay(1);
  3922. WREG32(CP_RB0_CNTL, tmp);
  3923. rb_addr = ring->gpu_addr >> 8;
  3924. WREG32(CP_RB0_BASE, rb_addr);
  3925. WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
  3926. /* start the ring */
  3927. cik_cp_gfx_start(rdev);
  3928. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
  3929. r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
  3930. if (r) {
  3931. rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
  3932. return r;
  3933. }
  3934. if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
  3935. radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
  3936. return 0;
  3937. }
  3938. u32 cik_gfx_get_rptr(struct radeon_device *rdev,
  3939. struct radeon_ring *ring)
  3940. {
  3941. u32 rptr;
  3942. if (rdev->wb.enabled)
  3943. rptr = rdev->wb.wb[ring->rptr_offs/4];
  3944. else
  3945. rptr = RREG32(CP_RB0_RPTR);
  3946. return rptr;
  3947. }
  3948. u32 cik_gfx_get_wptr(struct radeon_device *rdev,
  3949. struct radeon_ring *ring)
  3950. {
  3951. u32 wptr;
  3952. wptr = RREG32(CP_RB0_WPTR);
  3953. return wptr;
  3954. }
  3955. void cik_gfx_set_wptr(struct radeon_device *rdev,
  3956. struct radeon_ring *ring)
  3957. {
  3958. WREG32(CP_RB0_WPTR, ring->wptr);
  3959. (void)RREG32(CP_RB0_WPTR);
  3960. }
  3961. u32 cik_compute_get_rptr(struct radeon_device *rdev,
  3962. struct radeon_ring *ring)
  3963. {
  3964. u32 rptr;
  3965. if (rdev->wb.enabled) {
  3966. rptr = rdev->wb.wb[ring->rptr_offs/4];
  3967. } else {
  3968. mutex_lock(&rdev->srbm_mutex);
  3969. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3970. rptr = RREG32(CP_HQD_PQ_RPTR);
  3971. cik_srbm_select(rdev, 0, 0, 0, 0);
  3972. mutex_unlock(&rdev->srbm_mutex);
  3973. }
  3974. return rptr;
  3975. }
  3976. u32 cik_compute_get_wptr(struct radeon_device *rdev,
  3977. struct radeon_ring *ring)
  3978. {
  3979. u32 wptr;
  3980. if (rdev->wb.enabled) {
  3981. /* XXX check if swapping is necessary on BE */
  3982. wptr = rdev->wb.wb[ring->wptr_offs/4];
  3983. } else {
  3984. mutex_lock(&rdev->srbm_mutex);
  3985. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  3986. wptr = RREG32(CP_HQD_PQ_WPTR);
  3987. cik_srbm_select(rdev, 0, 0, 0, 0);
  3988. mutex_unlock(&rdev->srbm_mutex);
  3989. }
  3990. return wptr;
  3991. }
  3992. void cik_compute_set_wptr(struct radeon_device *rdev,
  3993. struct radeon_ring *ring)
  3994. {
  3995. /* XXX check if swapping is necessary on BE */
  3996. rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
  3997. WDOORBELL32(ring->doorbell_index, ring->wptr);
  3998. }
  3999. static void cik_compute_stop(struct radeon_device *rdev,
  4000. struct radeon_ring *ring)
  4001. {
  4002. u32 j, tmp;
  4003. cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
  4004. /* Disable wptr polling. */
  4005. tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
  4006. tmp &= ~WPTR_POLL_EN;
  4007. WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
  4008. /* Disable HQD. */
  4009. if (RREG32(CP_HQD_ACTIVE) & 1) {
  4010. WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
  4011. for (j = 0; j < rdev->usec_timeout; j++) {
  4012. if (!(RREG32(CP_HQD_ACTIVE) & 1))
  4013. break;
  4014. udelay(1);
  4015. }
  4016. WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
  4017. WREG32(CP_HQD_PQ_RPTR, 0);
  4018. WREG32(CP_HQD_PQ_WPTR, 0);
  4019. }
  4020. cik_srbm_select(rdev, 0, 0, 0, 0);
  4021. }
  4022. /**
  4023. * cik_cp_compute_enable - enable/disable the compute CP MEs
  4024. *
  4025. * @rdev: radeon_device pointer
  4026. * @enable: enable or disable the MEs
  4027. *
  4028. * Halts or unhalts the compute MEs.
  4029. */
  4030. static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
  4031. {
  4032. if (enable)
  4033. WREG32(CP_MEC_CNTL, 0);
  4034. else {
  4035. /*
  4036. * To make hibernation reliable we need to clear compute ring
  4037. * configuration before halting the compute ring.
  4038. */
  4039. mutex_lock(&rdev->srbm_mutex);
  4040. cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
  4041. cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
  4042. mutex_unlock(&rdev->srbm_mutex);
  4043. WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
  4044. rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
  4045. rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
  4046. }
  4047. udelay(50);
  4048. }
  4049. /**
  4050. * cik_cp_compute_load_microcode - load the compute CP ME ucode
  4051. *
  4052. * @rdev: radeon_device pointer
  4053. *
  4054. * Loads the compute MEC1&2 ucode.
  4055. * Returns 0 for success, -EINVAL if the ucode is not available.
  4056. */
  4057. static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
  4058. {
  4059. int i;
  4060. if (!rdev->mec_fw)
  4061. return -EINVAL;
  4062. cik_cp_compute_enable(rdev, false);
  4063. if (rdev->new_fw) {
  4064. const struct gfx_firmware_header_v1_0 *mec_hdr =
  4065. (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
  4066. const __le32 *fw_data;
  4067. u32 fw_size;
  4068. radeon_ucode_print_gfx_hdr(&mec_hdr->header);
  4069. /* MEC1 */
  4070. fw_data = (const __le32 *)
  4071. (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
  4072. fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
  4073. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  4074. for (i = 0; i < fw_size; i++)
  4075. WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
  4076. WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version));
  4077. /* MEC2 */
  4078. if (rdev->family == CHIP_KAVERI) {
  4079. const struct gfx_firmware_header_v1_0 *mec2_hdr =
  4080. (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
  4081. fw_data = (const __le32 *)
  4082. (rdev->mec2_fw->data +
  4083. le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
  4084. fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
  4085. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  4086. for (i = 0; i < fw_size; i++)
  4087. WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
  4088. WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version));
  4089. }
  4090. } else {
  4091. const __be32 *fw_data;
  4092. /* MEC1 */
  4093. fw_data = (const __be32 *)rdev->mec_fw->data;
  4094. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  4095. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  4096. WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
  4097. WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
  4098. if (rdev->family == CHIP_KAVERI) {
  4099. /* MEC2 */
  4100. fw_data = (const __be32 *)rdev->mec_fw->data;
  4101. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  4102. for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
  4103. WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
  4104. WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
  4105. }
  4106. }
  4107. return 0;
  4108. }
  4109. /**
  4110. * cik_cp_compute_start - start the compute queues
  4111. *
  4112. * @rdev: radeon_device pointer
  4113. *
  4114. * Enable the compute queues.
  4115. * Returns 0 for success, error for failure.
  4116. */
  4117. static int cik_cp_compute_start(struct radeon_device *rdev)
  4118. {
  4119. cik_cp_compute_enable(rdev, true);
  4120. return 0;
  4121. }
  4122. /**
  4123. * cik_cp_compute_fini - stop the compute queues
  4124. *
  4125. * @rdev: radeon_device pointer
  4126. *
  4127. * Stop the compute queues and tear down the driver queue
  4128. * info.
  4129. */
  4130. static void cik_cp_compute_fini(struct radeon_device *rdev)
  4131. {
  4132. int i, idx, r;
  4133. cik_cp_compute_enable(rdev, false);
  4134. for (i = 0; i < 2; i++) {
  4135. if (i == 0)
  4136. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  4137. else
  4138. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  4139. if (rdev->ring[idx].mqd_obj) {
  4140. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  4141. if (unlikely(r != 0))
  4142. dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
  4143. radeon_bo_unpin(rdev->ring[idx].mqd_obj);
  4144. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  4145. radeon_bo_unref(&rdev->ring[idx].mqd_obj);
  4146. rdev->ring[idx].mqd_obj = NULL;
  4147. }
  4148. }
  4149. }
  4150. static void cik_mec_fini(struct radeon_device *rdev)
  4151. {
  4152. int r;
  4153. if (rdev->mec.hpd_eop_obj) {
  4154. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  4155. if (unlikely(r != 0))
  4156. dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
  4157. radeon_bo_unpin(rdev->mec.hpd_eop_obj);
  4158. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  4159. radeon_bo_unref(&rdev->mec.hpd_eop_obj);
  4160. rdev->mec.hpd_eop_obj = NULL;
  4161. }
  4162. }
  4163. #define MEC_HPD_SIZE 2048
  4164. static int cik_mec_init(struct radeon_device *rdev)
  4165. {
  4166. int r;
  4167. u32 *hpd;
  4168. /*
  4169. * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
  4170. * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
  4171. * Nonetheless, we assign only 1 pipe because all other pipes will
  4172. * be handled by KFD
  4173. */
  4174. rdev->mec.num_mec = 1;
  4175. rdev->mec.num_pipe = 1;
  4176. rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
  4177. if (rdev->mec.hpd_eop_obj == NULL) {
  4178. r = radeon_bo_create(rdev,
  4179. rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
  4180. PAGE_SIZE, true,
  4181. RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
  4182. &rdev->mec.hpd_eop_obj);
  4183. if (r) {
  4184. dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
  4185. return r;
  4186. }
  4187. }
  4188. r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
  4189. if (unlikely(r != 0)) {
  4190. cik_mec_fini(rdev);
  4191. return r;
  4192. }
  4193. r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
  4194. &rdev->mec.hpd_eop_gpu_addr);
  4195. if (r) {
  4196. dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
  4197. cik_mec_fini(rdev);
  4198. return r;
  4199. }
  4200. r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
  4201. if (r) {
  4202. dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
  4203. cik_mec_fini(rdev);
  4204. return r;
  4205. }
  4206. /* clear memory. Not sure if this is required or not */
  4207. memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
  4208. radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
  4209. radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
  4210. return 0;
  4211. }
  4212. struct hqd_registers
  4213. {
  4214. u32 cp_mqd_base_addr;
  4215. u32 cp_mqd_base_addr_hi;
  4216. u32 cp_hqd_active;
  4217. u32 cp_hqd_vmid;
  4218. u32 cp_hqd_persistent_state;
  4219. u32 cp_hqd_pipe_priority;
  4220. u32 cp_hqd_queue_priority;
  4221. u32 cp_hqd_quantum;
  4222. u32 cp_hqd_pq_base;
  4223. u32 cp_hqd_pq_base_hi;
  4224. u32 cp_hqd_pq_rptr;
  4225. u32 cp_hqd_pq_rptr_report_addr;
  4226. u32 cp_hqd_pq_rptr_report_addr_hi;
  4227. u32 cp_hqd_pq_wptr_poll_addr;
  4228. u32 cp_hqd_pq_wptr_poll_addr_hi;
  4229. u32 cp_hqd_pq_doorbell_control;
  4230. u32 cp_hqd_pq_wptr;
  4231. u32 cp_hqd_pq_control;
  4232. u32 cp_hqd_ib_base_addr;
  4233. u32 cp_hqd_ib_base_addr_hi;
  4234. u32 cp_hqd_ib_rptr;
  4235. u32 cp_hqd_ib_control;
  4236. u32 cp_hqd_iq_timer;
  4237. u32 cp_hqd_iq_rptr;
  4238. u32 cp_hqd_dequeue_request;
  4239. u32 cp_hqd_dma_offload;
  4240. u32 cp_hqd_sema_cmd;
  4241. u32 cp_hqd_msg_type;
  4242. u32 cp_hqd_atomic0_preop_lo;
  4243. u32 cp_hqd_atomic0_preop_hi;
  4244. u32 cp_hqd_atomic1_preop_lo;
  4245. u32 cp_hqd_atomic1_preop_hi;
  4246. u32 cp_hqd_hq_scheduler0;
  4247. u32 cp_hqd_hq_scheduler1;
  4248. u32 cp_mqd_control;
  4249. };
  4250. struct bonaire_mqd
  4251. {
  4252. u32 header;
  4253. u32 dispatch_initiator;
  4254. u32 dimensions[3];
  4255. u32 start_idx[3];
  4256. u32 num_threads[3];
  4257. u32 pipeline_stat_enable;
  4258. u32 perf_counter_enable;
  4259. u32 pgm[2];
  4260. u32 tba[2];
  4261. u32 tma[2];
  4262. u32 pgm_rsrc[2];
  4263. u32 vmid;
  4264. u32 resource_limits;
  4265. u32 static_thread_mgmt01[2];
  4266. u32 tmp_ring_size;
  4267. u32 static_thread_mgmt23[2];
  4268. u32 restart[3];
  4269. u32 thread_trace_enable;
  4270. u32 reserved1;
  4271. u32 user_data[16];
  4272. u32 vgtcs_invoke_count[2];
  4273. struct hqd_registers queue_state;
  4274. u32 dequeue_cntr;
  4275. u32 interrupt_queue[64];
  4276. };
  4277. /**
  4278. * cik_cp_compute_resume - setup the compute queue registers
  4279. *
  4280. * @rdev: radeon_device pointer
  4281. *
  4282. * Program the compute queues and test them to make sure they
  4283. * are working.
  4284. * Returns 0 for success, error for failure.
  4285. */
  4286. static int cik_cp_compute_resume(struct radeon_device *rdev)
  4287. {
  4288. int r, i, j, idx;
  4289. u32 tmp;
  4290. bool use_doorbell = true;
  4291. u64 hqd_gpu_addr;
  4292. u64 mqd_gpu_addr;
  4293. u64 eop_gpu_addr;
  4294. u64 wb_gpu_addr;
  4295. u32 *buf;
  4296. struct bonaire_mqd *mqd;
  4297. r = cik_cp_compute_start(rdev);
  4298. if (r)
  4299. return r;
  4300. /* fix up chicken bits */
  4301. tmp = RREG32(CP_CPF_DEBUG);
  4302. tmp |= (1 << 23);
  4303. WREG32(CP_CPF_DEBUG, tmp);
  4304. /* init the pipes */
  4305. mutex_lock(&rdev->srbm_mutex);
  4306. eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr;
  4307. cik_srbm_select(rdev, 0, 0, 0, 0);
  4308. /* write the EOP addr */
  4309. WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
  4310. WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
  4311. /* set the VMID assigned */
  4312. WREG32(CP_HPD_EOP_VMID, 0);
  4313. /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
  4314. tmp = RREG32(CP_HPD_EOP_CONTROL);
  4315. tmp &= ~EOP_SIZE_MASK;
  4316. tmp |= order_base_2(MEC_HPD_SIZE / 8);
  4317. WREG32(CP_HPD_EOP_CONTROL, tmp);
  4318. mutex_unlock(&rdev->srbm_mutex);
  4319. /* init the queues. Just two for now. */
  4320. for (i = 0; i < 2; i++) {
  4321. if (i == 0)
  4322. idx = CAYMAN_RING_TYPE_CP1_INDEX;
  4323. else
  4324. idx = CAYMAN_RING_TYPE_CP2_INDEX;
  4325. if (rdev->ring[idx].mqd_obj == NULL) {
  4326. r = radeon_bo_create(rdev,
  4327. sizeof(struct bonaire_mqd),
  4328. PAGE_SIZE, true,
  4329. RADEON_GEM_DOMAIN_GTT, 0, NULL,
  4330. NULL, &rdev->ring[idx].mqd_obj);
  4331. if (r) {
  4332. dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
  4333. return r;
  4334. }
  4335. }
  4336. r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
  4337. if (unlikely(r != 0)) {
  4338. cik_cp_compute_fini(rdev);
  4339. return r;
  4340. }
  4341. r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
  4342. &mqd_gpu_addr);
  4343. if (r) {
  4344. dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
  4345. cik_cp_compute_fini(rdev);
  4346. return r;
  4347. }
  4348. r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
  4349. if (r) {
  4350. dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
  4351. cik_cp_compute_fini(rdev);
  4352. return r;
  4353. }
  4354. /* init the mqd struct */
  4355. memset(buf, 0, sizeof(struct bonaire_mqd));
  4356. mqd = (struct bonaire_mqd *)buf;
  4357. mqd->header = 0xC0310800;
  4358. mqd->static_thread_mgmt01[0] = 0xffffffff;
  4359. mqd->static_thread_mgmt01[1] = 0xffffffff;
  4360. mqd->static_thread_mgmt23[0] = 0xffffffff;
  4361. mqd->static_thread_mgmt23[1] = 0xffffffff;
  4362. mutex_lock(&rdev->srbm_mutex);
  4363. cik_srbm_select(rdev, rdev->ring[idx].me,
  4364. rdev->ring[idx].pipe,
  4365. rdev->ring[idx].queue, 0);
  4366. /* disable wptr polling */
  4367. tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
  4368. tmp &= ~WPTR_POLL_EN;
  4369. WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
  4370. /* enable doorbell? */
  4371. mqd->queue_state.cp_hqd_pq_doorbell_control =
  4372. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  4373. if (use_doorbell)
  4374. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  4375. else
  4376. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
  4377. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  4378. mqd->queue_state.cp_hqd_pq_doorbell_control);
  4379. /* disable the queue if it's active */
  4380. mqd->queue_state.cp_hqd_dequeue_request = 0;
  4381. mqd->queue_state.cp_hqd_pq_rptr = 0;
  4382. mqd->queue_state.cp_hqd_pq_wptr= 0;
  4383. if (RREG32(CP_HQD_ACTIVE) & 1) {
  4384. WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
  4385. for (j = 0; j < rdev->usec_timeout; j++) {
  4386. if (!(RREG32(CP_HQD_ACTIVE) & 1))
  4387. break;
  4388. udelay(1);
  4389. }
  4390. WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
  4391. WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
  4392. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  4393. }
  4394. /* set the pointer to the MQD */
  4395. mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
  4396. mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
  4397. WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
  4398. WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
  4399. /* set MQD vmid to 0 */
  4400. mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
  4401. mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
  4402. WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
  4403. /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
  4404. hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
  4405. mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
  4406. mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
  4407. WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
  4408. WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
  4409. /* set up the HQD, this is similar to CP_RB0_CNTL */
  4410. mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
  4411. mqd->queue_state.cp_hqd_pq_control &=
  4412. ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
  4413. mqd->queue_state.cp_hqd_pq_control |=
  4414. order_base_2(rdev->ring[idx].ring_size / 8);
  4415. mqd->queue_state.cp_hqd_pq_control |=
  4416. (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
  4417. #ifdef __BIG_ENDIAN
  4418. mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
  4419. #endif
  4420. mqd->queue_state.cp_hqd_pq_control &=
  4421. ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
  4422. mqd->queue_state.cp_hqd_pq_control |=
  4423. PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
  4424. WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
  4425. /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
  4426. if (i == 0)
  4427. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
  4428. else
  4429. wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
  4430. mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
  4431. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
  4432. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
  4433. WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
  4434. mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
  4435. /* set the wb address wether it's enabled or not */
  4436. if (i == 0)
  4437. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
  4438. else
  4439. wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
  4440. mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
  4441. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
  4442. upper_32_bits(wb_gpu_addr) & 0xffff;
  4443. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
  4444. mqd->queue_state.cp_hqd_pq_rptr_report_addr);
  4445. WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
  4446. mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
  4447. /* enable the doorbell if requested */
  4448. if (use_doorbell) {
  4449. mqd->queue_state.cp_hqd_pq_doorbell_control =
  4450. RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
  4451. mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
  4452. mqd->queue_state.cp_hqd_pq_doorbell_control |=
  4453. DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
  4454. mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
  4455. mqd->queue_state.cp_hqd_pq_doorbell_control &=
  4456. ~(DOORBELL_SOURCE | DOORBELL_HIT);
  4457. } else {
  4458. mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
  4459. }
  4460. WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
  4461. mqd->queue_state.cp_hqd_pq_doorbell_control);
  4462. /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
  4463. rdev->ring[idx].wptr = 0;
  4464. mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
  4465. WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
  4466. mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR);
  4467. /* set the vmid for the queue */
  4468. mqd->queue_state.cp_hqd_vmid = 0;
  4469. WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
  4470. /* activate the queue */
  4471. mqd->queue_state.cp_hqd_active = 1;
  4472. WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
  4473. cik_srbm_select(rdev, 0, 0, 0, 0);
  4474. mutex_unlock(&rdev->srbm_mutex);
  4475. radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
  4476. radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
  4477. rdev->ring[idx].ready = true;
  4478. r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
  4479. if (r)
  4480. rdev->ring[idx].ready = false;
  4481. }
  4482. return 0;
  4483. }
  4484. static void cik_cp_enable(struct radeon_device *rdev, bool enable)
  4485. {
  4486. cik_cp_gfx_enable(rdev, enable);
  4487. cik_cp_compute_enable(rdev, enable);
  4488. }
  4489. static int cik_cp_load_microcode(struct radeon_device *rdev)
  4490. {
  4491. int r;
  4492. r = cik_cp_gfx_load_microcode(rdev);
  4493. if (r)
  4494. return r;
  4495. r = cik_cp_compute_load_microcode(rdev);
  4496. if (r)
  4497. return r;
  4498. return 0;
  4499. }
  4500. static void cik_cp_fini(struct radeon_device *rdev)
  4501. {
  4502. cik_cp_gfx_fini(rdev);
  4503. cik_cp_compute_fini(rdev);
  4504. }
  4505. static int cik_cp_resume(struct radeon_device *rdev)
  4506. {
  4507. int r;
  4508. cik_enable_gui_idle_interrupt(rdev, false);
  4509. r = cik_cp_load_microcode(rdev);
  4510. if (r)
  4511. return r;
  4512. r = cik_cp_gfx_resume(rdev);
  4513. if (r)
  4514. return r;
  4515. r = cik_cp_compute_resume(rdev);
  4516. if (r)
  4517. return r;
  4518. cik_enable_gui_idle_interrupt(rdev, true);
  4519. return 0;
  4520. }
  4521. static void cik_print_gpu_status_regs(struct radeon_device *rdev)
  4522. {
  4523. dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
  4524. RREG32(GRBM_STATUS));
  4525. dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
  4526. RREG32(GRBM_STATUS2));
  4527. dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
  4528. RREG32(GRBM_STATUS_SE0));
  4529. dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
  4530. RREG32(GRBM_STATUS_SE1));
  4531. dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
  4532. RREG32(GRBM_STATUS_SE2));
  4533. dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
  4534. RREG32(GRBM_STATUS_SE3));
  4535. dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
  4536. RREG32(SRBM_STATUS));
  4537. dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
  4538. RREG32(SRBM_STATUS2));
  4539. dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
  4540. RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
  4541. dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
  4542. RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
  4543. dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
  4544. dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
  4545. RREG32(CP_STALLED_STAT1));
  4546. dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
  4547. RREG32(CP_STALLED_STAT2));
  4548. dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
  4549. RREG32(CP_STALLED_STAT3));
  4550. dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
  4551. RREG32(CP_CPF_BUSY_STAT));
  4552. dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
  4553. RREG32(CP_CPF_STALLED_STAT1));
  4554. dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
  4555. dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
  4556. dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
  4557. RREG32(CP_CPC_STALLED_STAT1));
  4558. dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
  4559. }
  4560. /**
  4561. * cik_gpu_check_soft_reset - check which blocks are busy
  4562. *
  4563. * @rdev: radeon_device pointer
  4564. *
  4565. * Check which blocks are busy and return the relevant reset
  4566. * mask to be used by cik_gpu_soft_reset().
  4567. * Returns a mask of the blocks to be reset.
  4568. */
  4569. u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
  4570. {
  4571. u32 reset_mask = 0;
  4572. u32 tmp;
  4573. /* GRBM_STATUS */
  4574. tmp = RREG32(GRBM_STATUS);
  4575. if (tmp & (PA_BUSY | SC_BUSY |
  4576. BCI_BUSY | SX_BUSY |
  4577. TA_BUSY | VGT_BUSY |
  4578. DB_BUSY | CB_BUSY |
  4579. GDS_BUSY | SPI_BUSY |
  4580. IA_BUSY | IA_BUSY_NO_DMA))
  4581. reset_mask |= RADEON_RESET_GFX;
  4582. if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
  4583. reset_mask |= RADEON_RESET_CP;
  4584. /* GRBM_STATUS2 */
  4585. tmp = RREG32(GRBM_STATUS2);
  4586. if (tmp & RLC_BUSY)
  4587. reset_mask |= RADEON_RESET_RLC;
  4588. /* SDMA0_STATUS_REG */
  4589. tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
  4590. if (!(tmp & SDMA_IDLE))
  4591. reset_mask |= RADEON_RESET_DMA;
  4592. /* SDMA1_STATUS_REG */
  4593. tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
  4594. if (!(tmp & SDMA_IDLE))
  4595. reset_mask |= RADEON_RESET_DMA1;
  4596. /* SRBM_STATUS2 */
  4597. tmp = RREG32(SRBM_STATUS2);
  4598. if (tmp & SDMA_BUSY)
  4599. reset_mask |= RADEON_RESET_DMA;
  4600. if (tmp & SDMA1_BUSY)
  4601. reset_mask |= RADEON_RESET_DMA1;
  4602. /* SRBM_STATUS */
  4603. tmp = RREG32(SRBM_STATUS);
  4604. if (tmp & IH_BUSY)
  4605. reset_mask |= RADEON_RESET_IH;
  4606. if (tmp & SEM_BUSY)
  4607. reset_mask |= RADEON_RESET_SEM;
  4608. if (tmp & GRBM_RQ_PENDING)
  4609. reset_mask |= RADEON_RESET_GRBM;
  4610. if (tmp & VMC_BUSY)
  4611. reset_mask |= RADEON_RESET_VMC;
  4612. if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
  4613. MCC_BUSY | MCD_BUSY))
  4614. reset_mask |= RADEON_RESET_MC;
  4615. if (evergreen_is_display_hung(rdev))
  4616. reset_mask |= RADEON_RESET_DISPLAY;
  4617. /* Skip MC reset as it's mostly likely not hung, just busy */
  4618. if (reset_mask & RADEON_RESET_MC) {
  4619. DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
  4620. reset_mask &= ~RADEON_RESET_MC;
  4621. }
  4622. return reset_mask;
  4623. }
  4624. /**
  4625. * cik_gpu_soft_reset - soft reset GPU
  4626. *
  4627. * @rdev: radeon_device pointer
  4628. * @reset_mask: mask of which blocks to reset
  4629. *
  4630. * Soft reset the blocks specified in @reset_mask.
  4631. */
  4632. static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
  4633. {
  4634. struct evergreen_mc_save save;
  4635. u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
  4636. u32 tmp;
  4637. if (reset_mask == 0)
  4638. return;
  4639. dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
  4640. cik_print_gpu_status_regs(rdev);
  4641. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  4642. RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
  4643. dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  4644. RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
  4645. /* disable CG/PG */
  4646. cik_fini_pg(rdev);
  4647. cik_fini_cg(rdev);
  4648. /* stop the rlc */
  4649. cik_rlc_stop(rdev);
  4650. /* Disable GFX parsing/prefetching */
  4651. WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
  4652. /* Disable MEC parsing/prefetching */
  4653. WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
  4654. if (reset_mask & RADEON_RESET_DMA) {
  4655. /* sdma0 */
  4656. tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
  4657. tmp |= SDMA_HALT;
  4658. WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  4659. }
  4660. if (reset_mask & RADEON_RESET_DMA1) {
  4661. /* sdma1 */
  4662. tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
  4663. tmp |= SDMA_HALT;
  4664. WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  4665. }
  4666. evergreen_mc_stop(rdev, &save);
  4667. if (evergreen_mc_wait_for_idle(rdev)) {
  4668. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4669. }
  4670. if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
  4671. grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
  4672. if (reset_mask & RADEON_RESET_CP) {
  4673. grbm_soft_reset |= SOFT_RESET_CP;
  4674. srbm_soft_reset |= SOFT_RESET_GRBM;
  4675. }
  4676. if (reset_mask & RADEON_RESET_DMA)
  4677. srbm_soft_reset |= SOFT_RESET_SDMA;
  4678. if (reset_mask & RADEON_RESET_DMA1)
  4679. srbm_soft_reset |= SOFT_RESET_SDMA1;
  4680. if (reset_mask & RADEON_RESET_DISPLAY)
  4681. srbm_soft_reset |= SOFT_RESET_DC;
  4682. if (reset_mask & RADEON_RESET_RLC)
  4683. grbm_soft_reset |= SOFT_RESET_RLC;
  4684. if (reset_mask & RADEON_RESET_SEM)
  4685. srbm_soft_reset |= SOFT_RESET_SEM;
  4686. if (reset_mask & RADEON_RESET_IH)
  4687. srbm_soft_reset |= SOFT_RESET_IH;
  4688. if (reset_mask & RADEON_RESET_GRBM)
  4689. srbm_soft_reset |= SOFT_RESET_GRBM;
  4690. if (reset_mask & RADEON_RESET_VMC)
  4691. srbm_soft_reset |= SOFT_RESET_VMC;
  4692. if (!(rdev->flags & RADEON_IS_IGP)) {
  4693. if (reset_mask & RADEON_RESET_MC)
  4694. srbm_soft_reset |= SOFT_RESET_MC;
  4695. }
  4696. if (grbm_soft_reset) {
  4697. tmp = RREG32(GRBM_SOFT_RESET);
  4698. tmp |= grbm_soft_reset;
  4699. dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
  4700. WREG32(GRBM_SOFT_RESET, tmp);
  4701. tmp = RREG32(GRBM_SOFT_RESET);
  4702. udelay(50);
  4703. tmp &= ~grbm_soft_reset;
  4704. WREG32(GRBM_SOFT_RESET, tmp);
  4705. tmp = RREG32(GRBM_SOFT_RESET);
  4706. }
  4707. if (srbm_soft_reset) {
  4708. tmp = RREG32(SRBM_SOFT_RESET);
  4709. tmp |= srbm_soft_reset;
  4710. dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  4711. WREG32(SRBM_SOFT_RESET, tmp);
  4712. tmp = RREG32(SRBM_SOFT_RESET);
  4713. udelay(50);
  4714. tmp &= ~srbm_soft_reset;
  4715. WREG32(SRBM_SOFT_RESET, tmp);
  4716. tmp = RREG32(SRBM_SOFT_RESET);
  4717. }
  4718. /* Wait a little for things to settle down */
  4719. udelay(50);
  4720. evergreen_mc_resume(rdev, &save);
  4721. udelay(50);
  4722. cik_print_gpu_status_regs(rdev);
  4723. }
  4724. struct kv_reset_save_regs {
  4725. u32 gmcon_reng_execute;
  4726. u32 gmcon_misc;
  4727. u32 gmcon_misc3;
  4728. };
  4729. static void kv_save_regs_for_reset(struct radeon_device *rdev,
  4730. struct kv_reset_save_regs *save)
  4731. {
  4732. save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
  4733. save->gmcon_misc = RREG32(GMCON_MISC);
  4734. save->gmcon_misc3 = RREG32(GMCON_MISC3);
  4735. WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
  4736. WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
  4737. STCTRL_STUTTER_EN));
  4738. }
  4739. static void kv_restore_regs_for_reset(struct radeon_device *rdev,
  4740. struct kv_reset_save_regs *save)
  4741. {
  4742. int i;
  4743. WREG32(GMCON_PGFSM_WRITE, 0);
  4744. WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
  4745. for (i = 0; i < 5; i++)
  4746. WREG32(GMCON_PGFSM_WRITE, 0);
  4747. WREG32(GMCON_PGFSM_WRITE, 0);
  4748. WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
  4749. for (i = 0; i < 5; i++)
  4750. WREG32(GMCON_PGFSM_WRITE, 0);
  4751. WREG32(GMCON_PGFSM_WRITE, 0x210000);
  4752. WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
  4753. for (i = 0; i < 5; i++)
  4754. WREG32(GMCON_PGFSM_WRITE, 0);
  4755. WREG32(GMCON_PGFSM_WRITE, 0x21003);
  4756. WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
  4757. for (i = 0; i < 5; i++)
  4758. WREG32(GMCON_PGFSM_WRITE, 0);
  4759. WREG32(GMCON_PGFSM_WRITE, 0x2b00);
  4760. WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
  4761. for (i = 0; i < 5; i++)
  4762. WREG32(GMCON_PGFSM_WRITE, 0);
  4763. WREG32(GMCON_PGFSM_WRITE, 0);
  4764. WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
  4765. for (i = 0; i < 5; i++)
  4766. WREG32(GMCON_PGFSM_WRITE, 0);
  4767. WREG32(GMCON_PGFSM_WRITE, 0x420000);
  4768. WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
  4769. for (i = 0; i < 5; i++)
  4770. WREG32(GMCON_PGFSM_WRITE, 0);
  4771. WREG32(GMCON_PGFSM_WRITE, 0x120202);
  4772. WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
  4773. for (i = 0; i < 5; i++)
  4774. WREG32(GMCON_PGFSM_WRITE, 0);
  4775. WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
  4776. WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
  4777. for (i = 0; i < 5; i++)
  4778. WREG32(GMCON_PGFSM_WRITE, 0);
  4779. WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
  4780. WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
  4781. for (i = 0; i < 5; i++)
  4782. WREG32(GMCON_PGFSM_WRITE, 0);
  4783. WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
  4784. WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
  4785. WREG32(GMCON_MISC3, save->gmcon_misc3);
  4786. WREG32(GMCON_MISC, save->gmcon_misc);
  4787. WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
  4788. }
  4789. static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
  4790. {
  4791. struct evergreen_mc_save save;
  4792. struct kv_reset_save_regs kv_save = { 0 };
  4793. u32 tmp, i;
  4794. dev_info(rdev->dev, "GPU pci config reset\n");
  4795. /* disable dpm? */
  4796. /* disable cg/pg */
  4797. cik_fini_pg(rdev);
  4798. cik_fini_cg(rdev);
  4799. /* Disable GFX parsing/prefetching */
  4800. WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
  4801. /* Disable MEC parsing/prefetching */
  4802. WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
  4803. /* sdma0 */
  4804. tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
  4805. tmp |= SDMA_HALT;
  4806. WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  4807. /* sdma1 */
  4808. tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
  4809. tmp |= SDMA_HALT;
  4810. WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  4811. /* XXX other engines? */
  4812. /* halt the rlc, disable cp internal ints */
  4813. cik_rlc_stop(rdev);
  4814. udelay(50);
  4815. /* disable mem access */
  4816. evergreen_mc_stop(rdev, &save);
  4817. if (evergreen_mc_wait_for_idle(rdev)) {
  4818. dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
  4819. }
  4820. if (rdev->flags & RADEON_IS_IGP)
  4821. kv_save_regs_for_reset(rdev, &kv_save);
  4822. /* disable BM */
  4823. pci_clear_master(rdev->pdev);
  4824. /* reset */
  4825. radeon_pci_config_reset(rdev);
  4826. udelay(100);
  4827. /* wait for asic to come out of reset */
  4828. for (i = 0; i < rdev->usec_timeout; i++) {
  4829. if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
  4830. break;
  4831. udelay(1);
  4832. }
  4833. /* does asic init need to be run first??? */
  4834. if (rdev->flags & RADEON_IS_IGP)
  4835. kv_restore_regs_for_reset(rdev, &kv_save);
  4836. }
  4837. /**
  4838. * cik_asic_reset - soft reset GPU
  4839. *
  4840. * @rdev: radeon_device pointer
  4841. *
  4842. * Look up which blocks are hung and attempt
  4843. * to reset them.
  4844. * Returns 0 for success.
  4845. */
  4846. int cik_asic_reset(struct radeon_device *rdev)
  4847. {
  4848. u32 reset_mask;
  4849. reset_mask = cik_gpu_check_soft_reset(rdev);
  4850. if (reset_mask)
  4851. r600_set_bios_scratch_engine_hung(rdev, true);
  4852. /* try soft reset */
  4853. cik_gpu_soft_reset(rdev, reset_mask);
  4854. reset_mask = cik_gpu_check_soft_reset(rdev);
  4855. /* try pci config reset */
  4856. if (reset_mask && radeon_hard_reset)
  4857. cik_gpu_pci_config_reset(rdev);
  4858. reset_mask = cik_gpu_check_soft_reset(rdev);
  4859. if (!reset_mask)
  4860. r600_set_bios_scratch_engine_hung(rdev, false);
  4861. return 0;
  4862. }
  4863. /**
  4864. * cik_gfx_is_lockup - check if the 3D engine is locked up
  4865. *
  4866. * @rdev: radeon_device pointer
  4867. * @ring: radeon_ring structure holding ring information
  4868. *
  4869. * Check if the 3D engine is locked up (CIK).
  4870. * Returns true if the engine is locked, false if not.
  4871. */
  4872. bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  4873. {
  4874. u32 reset_mask = cik_gpu_check_soft_reset(rdev);
  4875. if (!(reset_mask & (RADEON_RESET_GFX |
  4876. RADEON_RESET_COMPUTE |
  4877. RADEON_RESET_CP))) {
  4878. radeon_ring_lockup_update(rdev, ring);
  4879. return false;
  4880. }
  4881. return radeon_ring_test_lockup(rdev, ring);
  4882. }
  4883. /* MC */
  4884. /**
  4885. * cik_mc_program - program the GPU memory controller
  4886. *
  4887. * @rdev: radeon_device pointer
  4888. *
  4889. * Set the location of vram, gart, and AGP in the GPU's
  4890. * physical address space (CIK).
  4891. */
  4892. static void cik_mc_program(struct radeon_device *rdev)
  4893. {
  4894. struct evergreen_mc_save save;
  4895. u32 tmp;
  4896. int i, j;
  4897. /* Initialize HDP */
  4898. for (i = 0, j = 0; i < 32; i++, j += 0x18) {
  4899. WREG32((0x2c14 + j), 0x00000000);
  4900. WREG32((0x2c18 + j), 0x00000000);
  4901. WREG32((0x2c1c + j), 0x00000000);
  4902. WREG32((0x2c20 + j), 0x00000000);
  4903. WREG32((0x2c24 + j), 0x00000000);
  4904. }
  4905. WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
  4906. evergreen_mc_stop(rdev, &save);
  4907. if (radeon_mc_wait_for_idle(rdev)) {
  4908. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4909. }
  4910. /* Lockout access through VGA aperture*/
  4911. WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
  4912. /* Update configuration */
  4913. WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
  4914. rdev->mc.vram_start >> 12);
  4915. WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  4916. rdev->mc.vram_end >> 12);
  4917. WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  4918. rdev->vram_scratch.gpu_addr >> 12);
  4919. tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
  4920. tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
  4921. WREG32(MC_VM_FB_LOCATION, tmp);
  4922. /* XXX double check these! */
  4923. WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
  4924. WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
  4925. WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
  4926. WREG32(MC_VM_AGP_BASE, 0);
  4927. WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
  4928. WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
  4929. if (radeon_mc_wait_for_idle(rdev)) {
  4930. dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
  4931. }
  4932. evergreen_mc_resume(rdev, &save);
  4933. /* we need to own VRAM, so turn off the VGA renderer here
  4934. * to stop it overwriting our objects */
  4935. rv515_vga_render_disable(rdev);
  4936. }
  4937. /**
  4938. * cik_mc_init - initialize the memory controller driver params
  4939. *
  4940. * @rdev: radeon_device pointer
  4941. *
  4942. * Look up the amount of vram, vram width, and decide how to place
  4943. * vram and gart within the GPU's physical address space (CIK).
  4944. * Returns 0 for success.
  4945. */
  4946. static int cik_mc_init(struct radeon_device *rdev)
  4947. {
  4948. u32 tmp;
  4949. int chansize, numchan;
  4950. /* Get VRAM informations */
  4951. rdev->mc.vram_is_ddr = true;
  4952. tmp = RREG32(MC_ARB_RAMCFG);
  4953. if (tmp & CHANSIZE_MASK) {
  4954. chansize = 64;
  4955. } else {
  4956. chansize = 32;
  4957. }
  4958. tmp = RREG32(MC_SHARED_CHMAP);
  4959. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  4960. case 0:
  4961. default:
  4962. numchan = 1;
  4963. break;
  4964. case 1:
  4965. numchan = 2;
  4966. break;
  4967. case 2:
  4968. numchan = 4;
  4969. break;
  4970. case 3:
  4971. numchan = 8;
  4972. break;
  4973. case 4:
  4974. numchan = 3;
  4975. break;
  4976. case 5:
  4977. numchan = 6;
  4978. break;
  4979. case 6:
  4980. numchan = 10;
  4981. break;
  4982. case 7:
  4983. numchan = 12;
  4984. break;
  4985. case 8:
  4986. numchan = 16;
  4987. break;
  4988. }
  4989. rdev->mc.vram_width = numchan * chansize;
  4990. /* Could aper size report 0 ? */
  4991. rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
  4992. rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
  4993. /* size in MB on si */
  4994. rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  4995. rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  4996. rdev->mc.visible_vram_size = rdev->mc.aper_size;
  4997. si_vram_gtt_location(rdev, &rdev->mc);
  4998. radeon_update_bandwidth_info(rdev);
  4999. return 0;
  5000. }
  5001. /*
  5002. * GART
  5003. * VMID 0 is the physical GPU addresses as used by the kernel.
  5004. * VMIDs 1-15 are used for userspace clients and are handled
  5005. * by the radeon vm/hsa code.
  5006. */
  5007. /**
  5008. * cik_pcie_gart_tlb_flush - gart tlb flush callback
  5009. *
  5010. * @rdev: radeon_device pointer
  5011. *
  5012. * Flush the TLB for the VMID 0 page table (CIK).
  5013. */
  5014. void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
  5015. {
  5016. /* flush hdp cache */
  5017. WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  5018. /* bits 0-15 are the VM contexts0-15 */
  5019. WREG32(VM_INVALIDATE_REQUEST, 0x1);
  5020. }
  5021. static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
  5022. {
  5023. int i;
  5024. uint32_t sh_mem_bases, sh_mem_config;
  5025. sh_mem_bases = 0x6000 | 0x6000 << 16;
  5026. sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
  5027. sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
  5028. mutex_lock(&rdev->srbm_mutex);
  5029. for (i = 8; i < 16; i++) {
  5030. cik_srbm_select(rdev, 0, 0, 0, i);
  5031. /* CP and shaders */
  5032. WREG32(SH_MEM_CONFIG, sh_mem_config);
  5033. WREG32(SH_MEM_APE1_BASE, 1);
  5034. WREG32(SH_MEM_APE1_LIMIT, 0);
  5035. WREG32(SH_MEM_BASES, sh_mem_bases);
  5036. }
  5037. cik_srbm_select(rdev, 0, 0, 0, 0);
  5038. mutex_unlock(&rdev->srbm_mutex);
  5039. }
  5040. /**
  5041. * cik_pcie_gart_enable - gart enable
  5042. *
  5043. * @rdev: radeon_device pointer
  5044. *
  5045. * This sets up the TLBs, programs the page tables for VMID0,
  5046. * sets up the hw for VMIDs 1-15 which are allocated on
  5047. * demand, and sets up the global locations for the LDS, GDS,
  5048. * and GPUVM for FSA64 clients (CIK).
  5049. * Returns 0 for success, errors for failure.
  5050. */
  5051. static int cik_pcie_gart_enable(struct radeon_device *rdev)
  5052. {
  5053. int r, i;
  5054. if (rdev->gart.robj == NULL) {
  5055. dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
  5056. return -EINVAL;
  5057. }
  5058. r = radeon_gart_table_vram_pin(rdev);
  5059. if (r)
  5060. return r;
  5061. /* Setup TLB control */
  5062. WREG32(MC_VM_MX_L1_TLB_CNTL,
  5063. (0xA << 7) |
  5064. ENABLE_L1_TLB |
  5065. ENABLE_L1_FRAGMENT_PROCESSING |
  5066. SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  5067. ENABLE_ADVANCED_DRIVER_MODEL |
  5068. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  5069. /* Setup L2 cache */
  5070. WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
  5071. ENABLE_L2_FRAGMENT_PROCESSING |
  5072. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  5073. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  5074. EFFECTIVE_L2_QUEUE_SIZE(7) |
  5075. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  5076. WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
  5077. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  5078. BANK_SELECT(4) |
  5079. L2_CACHE_BIGK_FRAGMENT_SIZE(4));
  5080. /* setup context0 */
  5081. WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
  5082. WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
  5083. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
  5084. WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  5085. (u32)(rdev->dummy_page.addr >> 12));
  5086. WREG32(VM_CONTEXT0_CNTL2, 0);
  5087. WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
  5088. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
  5089. WREG32(0x15D4, 0);
  5090. WREG32(0x15D8, 0);
  5091. WREG32(0x15DC, 0);
  5092. /* restore context1-15 */
  5093. /* set vm size, must be a multiple of 4 */
  5094. WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  5095. WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
  5096. for (i = 1; i < 16; i++) {
  5097. if (i < 8)
  5098. WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
  5099. rdev->vm_manager.saved_table_addr[i]);
  5100. else
  5101. WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
  5102. rdev->vm_manager.saved_table_addr[i]);
  5103. }
  5104. /* enable context1-15 */
  5105. WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  5106. (u32)(rdev->dummy_page.addr >> 12));
  5107. WREG32(VM_CONTEXT1_CNTL2, 4);
  5108. WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
  5109. PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
  5110. RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  5111. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  5112. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  5113. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
  5114. PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
  5115. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
  5116. VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
  5117. VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
  5118. READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
  5119. READ_PROTECTION_FAULT_ENABLE_DEFAULT |
  5120. WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
  5121. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
  5122. if (rdev->family == CHIP_KAVERI) {
  5123. u32 tmp = RREG32(CHUB_CONTROL);
  5124. tmp &= ~BYPASS_VM;
  5125. WREG32(CHUB_CONTROL, tmp);
  5126. }
  5127. /* XXX SH_MEM regs */
  5128. /* where to put LDS, scratch, GPUVM in FSA64 space */
  5129. mutex_lock(&rdev->srbm_mutex);
  5130. for (i = 0; i < 16; i++) {
  5131. cik_srbm_select(rdev, 0, 0, 0, i);
  5132. /* CP and shaders */
  5133. WREG32(SH_MEM_CONFIG, 0);
  5134. WREG32(SH_MEM_APE1_BASE, 1);
  5135. WREG32(SH_MEM_APE1_LIMIT, 0);
  5136. WREG32(SH_MEM_BASES, 0);
  5137. /* SDMA GFX */
  5138. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
  5139. WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
  5140. WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
  5141. WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
  5142. /* XXX SDMA RLC - todo */
  5143. }
  5144. cik_srbm_select(rdev, 0, 0, 0, 0);
  5145. mutex_unlock(&rdev->srbm_mutex);
  5146. cik_pcie_init_compute_vmid(rdev);
  5147. cik_pcie_gart_tlb_flush(rdev);
  5148. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  5149. (unsigned)(rdev->mc.gtt_size >> 20),
  5150. (unsigned long long)rdev->gart.table_addr);
  5151. rdev->gart.ready = true;
  5152. return 0;
  5153. }
  5154. /**
  5155. * cik_pcie_gart_disable - gart disable
  5156. *
  5157. * @rdev: radeon_device pointer
  5158. *
  5159. * This disables all VM page table (CIK).
  5160. */
  5161. static void cik_pcie_gart_disable(struct radeon_device *rdev)
  5162. {
  5163. unsigned i;
  5164. for (i = 1; i < 16; ++i) {
  5165. uint32_t reg;
  5166. if (i < 8)
  5167. reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
  5168. else
  5169. reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
  5170. rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
  5171. }
  5172. /* Disable all tables */
  5173. WREG32(VM_CONTEXT0_CNTL, 0);
  5174. WREG32(VM_CONTEXT1_CNTL, 0);
  5175. /* Setup TLB control */
  5176. WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
  5177. SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
  5178. /* Setup L2 cache */
  5179. WREG32(VM_L2_CNTL,
  5180. ENABLE_L2_FRAGMENT_PROCESSING |
  5181. ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
  5182. ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
  5183. EFFECTIVE_L2_QUEUE_SIZE(7) |
  5184. CONTEXT1_IDENTITY_ACCESS_MODE(1));
  5185. WREG32(VM_L2_CNTL2, 0);
  5186. WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
  5187. L2_CACHE_BIGK_FRAGMENT_SIZE(6));
  5188. radeon_gart_table_vram_unpin(rdev);
  5189. }
  5190. /**
  5191. * cik_pcie_gart_fini - vm fini callback
  5192. *
  5193. * @rdev: radeon_device pointer
  5194. *
  5195. * Tears down the driver GART/VM setup (CIK).
  5196. */
  5197. static void cik_pcie_gart_fini(struct radeon_device *rdev)
  5198. {
  5199. cik_pcie_gart_disable(rdev);
  5200. radeon_gart_table_vram_free(rdev);
  5201. radeon_gart_fini(rdev);
  5202. }
  5203. /* vm parser */
  5204. /**
  5205. * cik_ib_parse - vm ib_parse callback
  5206. *
  5207. * @rdev: radeon_device pointer
  5208. * @ib: indirect buffer pointer
  5209. *
  5210. * CIK uses hw IB checking so this is a nop (CIK).
  5211. */
  5212. int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
  5213. {
  5214. return 0;
  5215. }
  5216. /*
  5217. * vm
  5218. * VMID 0 is the physical GPU addresses as used by the kernel.
  5219. * VMIDs 1-15 are used for userspace clients and are handled
  5220. * by the radeon vm/hsa code.
  5221. */
  5222. /**
  5223. * cik_vm_init - cik vm init callback
  5224. *
  5225. * @rdev: radeon_device pointer
  5226. *
  5227. * Inits cik specific vm parameters (number of VMs, base of vram for
  5228. * VMIDs 1-15) (CIK).
  5229. * Returns 0 for success.
  5230. */
  5231. int cik_vm_init(struct radeon_device *rdev)
  5232. {
  5233. /*
  5234. * number of VMs
  5235. * VMID 0 is reserved for System
  5236. * radeon graphics/compute will use VMIDs 1-7
  5237. * amdkfd will use VMIDs 8-15
  5238. */
  5239. rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
  5240. /* base offset of vram pages */
  5241. if (rdev->flags & RADEON_IS_IGP) {
  5242. u64 tmp = RREG32(MC_VM_FB_OFFSET);
  5243. tmp <<= 22;
  5244. rdev->vm_manager.vram_base_offset = tmp;
  5245. } else
  5246. rdev->vm_manager.vram_base_offset = 0;
  5247. return 0;
  5248. }
  5249. /**
  5250. * cik_vm_fini - cik vm fini callback
  5251. *
  5252. * @rdev: radeon_device pointer
  5253. *
  5254. * Tear down any asic specific VM setup (CIK).
  5255. */
  5256. void cik_vm_fini(struct radeon_device *rdev)
  5257. {
  5258. }
  5259. /**
  5260. * cik_vm_decode_fault - print human readable fault info
  5261. *
  5262. * @rdev: radeon_device pointer
  5263. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  5264. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  5265. *
  5266. * Print human readable fault information (CIK).
  5267. */
  5268. static void cik_vm_decode_fault(struct radeon_device *rdev,
  5269. u32 status, u32 addr, u32 mc_client)
  5270. {
  5271. u32 mc_id;
  5272. u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
  5273. u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
  5274. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  5275. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  5276. if (rdev->family == CHIP_HAWAII)
  5277. mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
  5278. else
  5279. mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
  5280. printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  5281. protections, vmid, addr,
  5282. (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
  5283. block, mc_client, mc_id);
  5284. }
  5285. /**
  5286. * cik_vm_flush - cik vm flush using the CP
  5287. *
  5288. * @rdev: radeon_device pointer
  5289. *
  5290. * Update the page table base and flush the VM TLB
  5291. * using the CP (CIK).
  5292. */
  5293. void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
  5294. unsigned vm_id, uint64_t pd_addr)
  5295. {
  5296. int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
  5297. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5298. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
  5299. WRITE_DATA_DST_SEL(0)));
  5300. if (vm_id < 8) {
  5301. radeon_ring_write(ring,
  5302. (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
  5303. } else {
  5304. radeon_ring_write(ring,
  5305. (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
  5306. }
  5307. radeon_ring_write(ring, 0);
  5308. radeon_ring_write(ring, pd_addr >> 12);
  5309. /* update SH_MEM_* regs */
  5310. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5311. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
  5312. WRITE_DATA_DST_SEL(0)));
  5313. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  5314. radeon_ring_write(ring, 0);
  5315. radeon_ring_write(ring, VMID(vm_id));
  5316. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
  5317. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
  5318. WRITE_DATA_DST_SEL(0)));
  5319. radeon_ring_write(ring, SH_MEM_BASES >> 2);
  5320. radeon_ring_write(ring, 0);
  5321. radeon_ring_write(ring, 0); /* SH_MEM_BASES */
  5322. radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
  5323. radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
  5324. radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
  5325. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5326. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
  5327. WRITE_DATA_DST_SEL(0)));
  5328. radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
  5329. radeon_ring_write(ring, 0);
  5330. radeon_ring_write(ring, VMID(0));
  5331. /* HDP flush */
  5332. cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
  5333. /* bits 0-15 are the VM contexts0-15 */
  5334. radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
  5335. radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
  5336. WRITE_DATA_DST_SEL(0)));
  5337. radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
  5338. radeon_ring_write(ring, 0);
  5339. radeon_ring_write(ring, 1 << vm_id);
  5340. /* wait for the invalidate to complete */
  5341. radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
  5342. radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
  5343. WAIT_REG_MEM_FUNCTION(0) | /* always */
  5344. WAIT_REG_MEM_ENGINE(0))); /* me */
  5345. radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
  5346. radeon_ring_write(ring, 0);
  5347. radeon_ring_write(ring, 0); /* ref */
  5348. radeon_ring_write(ring, 0); /* mask */
  5349. radeon_ring_write(ring, 0x20); /* poll interval */
  5350. /* compute doesn't have PFP */
  5351. if (usepfp) {
  5352. /* sync PFP to ME, otherwise we might get invalid PFP reads */
  5353. radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
  5354. radeon_ring_write(ring, 0x0);
  5355. }
  5356. }
  5357. /*
  5358. * RLC
  5359. * The RLC is a multi-purpose microengine that handles a
  5360. * variety of functions, the most important of which is
  5361. * the interrupt controller.
  5362. */
  5363. static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
  5364. bool enable)
  5365. {
  5366. u32 tmp = RREG32(CP_INT_CNTL_RING0);
  5367. if (enable)
  5368. tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5369. else
  5370. tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  5371. WREG32(CP_INT_CNTL_RING0, tmp);
  5372. }
  5373. static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
  5374. {
  5375. u32 tmp;
  5376. tmp = RREG32(RLC_LB_CNTL);
  5377. if (enable)
  5378. tmp |= LOAD_BALANCE_ENABLE;
  5379. else
  5380. tmp &= ~LOAD_BALANCE_ENABLE;
  5381. WREG32(RLC_LB_CNTL, tmp);
  5382. }
  5383. static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
  5384. {
  5385. u32 i, j, k;
  5386. u32 mask;
  5387. mutex_lock(&rdev->grbm_idx_mutex);
  5388. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  5389. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  5390. cik_select_se_sh(rdev, i, j);
  5391. for (k = 0; k < rdev->usec_timeout; k++) {
  5392. if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
  5393. break;
  5394. udelay(1);
  5395. }
  5396. }
  5397. }
  5398. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5399. mutex_unlock(&rdev->grbm_idx_mutex);
  5400. mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
  5401. for (k = 0; k < rdev->usec_timeout; k++) {
  5402. if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
  5403. break;
  5404. udelay(1);
  5405. }
  5406. }
  5407. static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
  5408. {
  5409. u32 tmp;
  5410. tmp = RREG32(RLC_CNTL);
  5411. if (tmp != rlc)
  5412. WREG32(RLC_CNTL, rlc);
  5413. }
  5414. static u32 cik_halt_rlc(struct radeon_device *rdev)
  5415. {
  5416. u32 data, orig;
  5417. orig = data = RREG32(RLC_CNTL);
  5418. if (data & RLC_ENABLE) {
  5419. u32 i;
  5420. data &= ~RLC_ENABLE;
  5421. WREG32(RLC_CNTL, data);
  5422. for (i = 0; i < rdev->usec_timeout; i++) {
  5423. if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
  5424. break;
  5425. udelay(1);
  5426. }
  5427. cik_wait_for_rlc_serdes(rdev);
  5428. }
  5429. return orig;
  5430. }
  5431. void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
  5432. {
  5433. u32 tmp, i, mask;
  5434. tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
  5435. WREG32(RLC_GPR_REG2, tmp);
  5436. mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
  5437. for (i = 0; i < rdev->usec_timeout; i++) {
  5438. if ((RREG32(RLC_GPM_STAT) & mask) == mask)
  5439. break;
  5440. udelay(1);
  5441. }
  5442. for (i = 0; i < rdev->usec_timeout; i++) {
  5443. if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
  5444. break;
  5445. udelay(1);
  5446. }
  5447. }
  5448. void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
  5449. {
  5450. u32 tmp;
  5451. tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
  5452. WREG32(RLC_GPR_REG2, tmp);
  5453. }
  5454. /**
  5455. * cik_rlc_stop - stop the RLC ME
  5456. *
  5457. * @rdev: radeon_device pointer
  5458. *
  5459. * Halt the RLC ME (MicroEngine) (CIK).
  5460. */
  5461. static void cik_rlc_stop(struct radeon_device *rdev)
  5462. {
  5463. WREG32(RLC_CNTL, 0);
  5464. cik_enable_gui_idle_interrupt(rdev, false);
  5465. cik_wait_for_rlc_serdes(rdev);
  5466. }
  5467. /**
  5468. * cik_rlc_start - start the RLC ME
  5469. *
  5470. * @rdev: radeon_device pointer
  5471. *
  5472. * Unhalt the RLC ME (MicroEngine) (CIK).
  5473. */
  5474. static void cik_rlc_start(struct radeon_device *rdev)
  5475. {
  5476. WREG32(RLC_CNTL, RLC_ENABLE);
  5477. cik_enable_gui_idle_interrupt(rdev, true);
  5478. udelay(50);
  5479. }
  5480. /**
  5481. * cik_rlc_resume - setup the RLC hw
  5482. *
  5483. * @rdev: radeon_device pointer
  5484. *
  5485. * Initialize the RLC registers, load the ucode,
  5486. * and start the RLC (CIK).
  5487. * Returns 0 for success, -EINVAL if the ucode is not available.
  5488. */
  5489. static int cik_rlc_resume(struct radeon_device *rdev)
  5490. {
  5491. u32 i, size, tmp;
  5492. if (!rdev->rlc_fw)
  5493. return -EINVAL;
  5494. cik_rlc_stop(rdev);
  5495. /* disable CG */
  5496. tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
  5497. WREG32(RLC_CGCG_CGLS_CTRL, tmp);
  5498. si_rlc_reset(rdev);
  5499. cik_init_pg(rdev);
  5500. cik_init_cg(rdev);
  5501. WREG32(RLC_LB_CNTR_INIT, 0);
  5502. WREG32(RLC_LB_CNTR_MAX, 0x00008000);
  5503. mutex_lock(&rdev->grbm_idx_mutex);
  5504. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5505. WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
  5506. WREG32(RLC_LB_PARAMS, 0x00600408);
  5507. WREG32(RLC_LB_CNTL, 0x80000004);
  5508. mutex_unlock(&rdev->grbm_idx_mutex);
  5509. WREG32(RLC_MC_CNTL, 0);
  5510. WREG32(RLC_UCODE_CNTL, 0);
  5511. if (rdev->new_fw) {
  5512. const struct rlc_firmware_header_v1_0 *hdr =
  5513. (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
  5514. const __le32 *fw_data = (const __le32 *)
  5515. (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  5516. radeon_ucode_print_rlc_hdr(&hdr->header);
  5517. size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  5518. WREG32(RLC_GPM_UCODE_ADDR, 0);
  5519. for (i = 0; i < size; i++)
  5520. WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
  5521. WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version));
  5522. } else {
  5523. const __be32 *fw_data;
  5524. switch (rdev->family) {
  5525. case CHIP_BONAIRE:
  5526. case CHIP_HAWAII:
  5527. default:
  5528. size = BONAIRE_RLC_UCODE_SIZE;
  5529. break;
  5530. case CHIP_KAVERI:
  5531. size = KV_RLC_UCODE_SIZE;
  5532. break;
  5533. case CHIP_KABINI:
  5534. size = KB_RLC_UCODE_SIZE;
  5535. break;
  5536. case CHIP_MULLINS:
  5537. size = ML_RLC_UCODE_SIZE;
  5538. break;
  5539. }
  5540. fw_data = (const __be32 *)rdev->rlc_fw->data;
  5541. WREG32(RLC_GPM_UCODE_ADDR, 0);
  5542. for (i = 0; i < size; i++)
  5543. WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
  5544. WREG32(RLC_GPM_UCODE_ADDR, 0);
  5545. }
  5546. /* XXX - find out what chips support lbpw */
  5547. cik_enable_lbpw(rdev, false);
  5548. if (rdev->family == CHIP_BONAIRE)
  5549. WREG32(RLC_DRIVER_DMA_STATUS, 0);
  5550. cik_rlc_start(rdev);
  5551. return 0;
  5552. }
  5553. static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
  5554. {
  5555. u32 data, orig, tmp, tmp2;
  5556. orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
  5557. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
  5558. cik_enable_gui_idle_interrupt(rdev, true);
  5559. tmp = cik_halt_rlc(rdev);
  5560. mutex_lock(&rdev->grbm_idx_mutex);
  5561. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5562. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  5563. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  5564. tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
  5565. WREG32(RLC_SERDES_WR_CTRL, tmp2);
  5566. mutex_unlock(&rdev->grbm_idx_mutex);
  5567. cik_update_rlc(rdev, tmp);
  5568. data |= CGCG_EN | CGLS_EN;
  5569. } else {
  5570. cik_enable_gui_idle_interrupt(rdev, false);
  5571. RREG32(CB_CGTT_SCLK_CTRL);
  5572. RREG32(CB_CGTT_SCLK_CTRL);
  5573. RREG32(CB_CGTT_SCLK_CTRL);
  5574. RREG32(CB_CGTT_SCLK_CTRL);
  5575. data &= ~(CGCG_EN | CGLS_EN);
  5576. }
  5577. if (orig != data)
  5578. WREG32(RLC_CGCG_CGLS_CTRL, data);
  5579. }
  5580. static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
  5581. {
  5582. u32 data, orig, tmp = 0;
  5583. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
  5584. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
  5585. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
  5586. orig = data = RREG32(CP_MEM_SLP_CNTL);
  5587. data |= CP_MEM_LS_EN;
  5588. if (orig != data)
  5589. WREG32(CP_MEM_SLP_CNTL, data);
  5590. }
  5591. }
  5592. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  5593. data |= 0x00000001;
  5594. data &= 0xfffffffd;
  5595. if (orig != data)
  5596. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  5597. tmp = cik_halt_rlc(rdev);
  5598. mutex_lock(&rdev->grbm_idx_mutex);
  5599. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5600. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  5601. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  5602. data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
  5603. WREG32(RLC_SERDES_WR_CTRL, data);
  5604. mutex_unlock(&rdev->grbm_idx_mutex);
  5605. cik_update_rlc(rdev, tmp);
  5606. if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
  5607. orig = data = RREG32(CGTS_SM_CTRL_REG);
  5608. data &= ~SM_MODE_MASK;
  5609. data |= SM_MODE(0x2);
  5610. data |= SM_MODE_ENABLE;
  5611. data &= ~CGTS_OVERRIDE;
  5612. if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
  5613. (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
  5614. data &= ~CGTS_LS_OVERRIDE;
  5615. data &= ~ON_MONITOR_ADD_MASK;
  5616. data |= ON_MONITOR_ADD_EN;
  5617. data |= ON_MONITOR_ADD(0x96);
  5618. if (orig != data)
  5619. WREG32(CGTS_SM_CTRL_REG, data);
  5620. }
  5621. } else {
  5622. orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
  5623. data |= 0x00000003;
  5624. if (orig != data)
  5625. WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
  5626. data = RREG32(RLC_MEM_SLP_CNTL);
  5627. if (data & RLC_MEM_LS_EN) {
  5628. data &= ~RLC_MEM_LS_EN;
  5629. WREG32(RLC_MEM_SLP_CNTL, data);
  5630. }
  5631. data = RREG32(CP_MEM_SLP_CNTL);
  5632. if (data & CP_MEM_LS_EN) {
  5633. data &= ~CP_MEM_LS_EN;
  5634. WREG32(CP_MEM_SLP_CNTL, data);
  5635. }
  5636. orig = data = RREG32(CGTS_SM_CTRL_REG);
  5637. data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
  5638. if (orig != data)
  5639. WREG32(CGTS_SM_CTRL_REG, data);
  5640. tmp = cik_halt_rlc(rdev);
  5641. mutex_lock(&rdev->grbm_idx_mutex);
  5642. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  5643. WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
  5644. WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
  5645. data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
  5646. WREG32(RLC_SERDES_WR_CTRL, data);
  5647. mutex_unlock(&rdev->grbm_idx_mutex);
  5648. cik_update_rlc(rdev, tmp);
  5649. }
  5650. }
  5651. static const u32 mc_cg_registers[] =
  5652. {
  5653. MC_HUB_MISC_HUB_CG,
  5654. MC_HUB_MISC_SIP_CG,
  5655. MC_HUB_MISC_VM_CG,
  5656. MC_XPB_CLK_GAT,
  5657. ATC_MISC_CG,
  5658. MC_CITF_MISC_WR_CG,
  5659. MC_CITF_MISC_RD_CG,
  5660. MC_CITF_MISC_VM_CG,
  5661. VM_L2_CG,
  5662. };
  5663. static void cik_enable_mc_ls(struct radeon_device *rdev,
  5664. bool enable)
  5665. {
  5666. int i;
  5667. u32 orig, data;
  5668. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  5669. orig = data = RREG32(mc_cg_registers[i]);
  5670. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
  5671. data |= MC_LS_ENABLE;
  5672. else
  5673. data &= ~MC_LS_ENABLE;
  5674. if (data != orig)
  5675. WREG32(mc_cg_registers[i], data);
  5676. }
  5677. }
  5678. static void cik_enable_mc_mgcg(struct radeon_device *rdev,
  5679. bool enable)
  5680. {
  5681. int i;
  5682. u32 orig, data;
  5683. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  5684. orig = data = RREG32(mc_cg_registers[i]);
  5685. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
  5686. data |= MC_CG_ENABLE;
  5687. else
  5688. data &= ~MC_CG_ENABLE;
  5689. if (data != orig)
  5690. WREG32(mc_cg_registers[i], data);
  5691. }
  5692. }
  5693. static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
  5694. bool enable)
  5695. {
  5696. u32 orig, data;
  5697. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
  5698. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
  5699. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
  5700. } else {
  5701. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
  5702. data |= 0xff000000;
  5703. if (data != orig)
  5704. WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
  5705. orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
  5706. data |= 0xff000000;
  5707. if (data != orig)
  5708. WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
  5709. }
  5710. }
  5711. static void cik_enable_sdma_mgls(struct radeon_device *rdev,
  5712. bool enable)
  5713. {
  5714. u32 orig, data;
  5715. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
  5716. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  5717. data |= 0x100;
  5718. if (orig != data)
  5719. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  5720. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  5721. data |= 0x100;
  5722. if (orig != data)
  5723. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  5724. } else {
  5725. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  5726. data &= ~0x100;
  5727. if (orig != data)
  5728. WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  5729. orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  5730. data &= ~0x100;
  5731. if (orig != data)
  5732. WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  5733. }
  5734. }
  5735. static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
  5736. bool enable)
  5737. {
  5738. u32 orig, data;
  5739. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
  5740. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  5741. data = 0xfff;
  5742. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  5743. orig = data = RREG32(UVD_CGC_CTRL);
  5744. data |= DCM;
  5745. if (orig != data)
  5746. WREG32(UVD_CGC_CTRL, data);
  5747. } else {
  5748. data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
  5749. data &= ~0xfff;
  5750. WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
  5751. orig = data = RREG32(UVD_CGC_CTRL);
  5752. data &= ~DCM;
  5753. if (orig != data)
  5754. WREG32(UVD_CGC_CTRL, data);
  5755. }
  5756. }
  5757. static void cik_enable_bif_mgls(struct radeon_device *rdev,
  5758. bool enable)
  5759. {
  5760. u32 orig, data;
  5761. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  5762. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
  5763. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
  5764. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
  5765. else
  5766. data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
  5767. REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
  5768. if (orig != data)
  5769. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  5770. }
  5771. static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
  5772. bool enable)
  5773. {
  5774. u32 orig, data;
  5775. orig = data = RREG32(HDP_HOST_PATH_CNTL);
  5776. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
  5777. data &= ~CLOCK_GATING_DIS;
  5778. else
  5779. data |= CLOCK_GATING_DIS;
  5780. if (orig != data)
  5781. WREG32(HDP_HOST_PATH_CNTL, data);
  5782. }
  5783. static void cik_enable_hdp_ls(struct radeon_device *rdev,
  5784. bool enable)
  5785. {
  5786. u32 orig, data;
  5787. orig = data = RREG32(HDP_MEM_POWER_LS);
  5788. if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
  5789. data |= HDP_LS_ENABLE;
  5790. else
  5791. data &= ~HDP_LS_ENABLE;
  5792. if (orig != data)
  5793. WREG32(HDP_MEM_POWER_LS, data);
  5794. }
  5795. void cik_update_cg(struct radeon_device *rdev,
  5796. u32 block, bool enable)
  5797. {
  5798. if (block & RADEON_CG_BLOCK_GFX) {
  5799. cik_enable_gui_idle_interrupt(rdev, false);
  5800. /* order matters! */
  5801. if (enable) {
  5802. cik_enable_mgcg(rdev, true);
  5803. cik_enable_cgcg(rdev, true);
  5804. } else {
  5805. cik_enable_cgcg(rdev, false);
  5806. cik_enable_mgcg(rdev, false);
  5807. }
  5808. cik_enable_gui_idle_interrupt(rdev, true);
  5809. }
  5810. if (block & RADEON_CG_BLOCK_MC) {
  5811. if (!(rdev->flags & RADEON_IS_IGP)) {
  5812. cik_enable_mc_mgcg(rdev, enable);
  5813. cik_enable_mc_ls(rdev, enable);
  5814. }
  5815. }
  5816. if (block & RADEON_CG_BLOCK_SDMA) {
  5817. cik_enable_sdma_mgcg(rdev, enable);
  5818. cik_enable_sdma_mgls(rdev, enable);
  5819. }
  5820. if (block & RADEON_CG_BLOCK_BIF) {
  5821. cik_enable_bif_mgls(rdev, enable);
  5822. }
  5823. if (block & RADEON_CG_BLOCK_UVD) {
  5824. if (rdev->has_uvd)
  5825. cik_enable_uvd_mgcg(rdev, enable);
  5826. }
  5827. if (block & RADEON_CG_BLOCK_HDP) {
  5828. cik_enable_hdp_mgcg(rdev, enable);
  5829. cik_enable_hdp_ls(rdev, enable);
  5830. }
  5831. if (block & RADEON_CG_BLOCK_VCE) {
  5832. vce_v2_0_enable_mgcg(rdev, enable);
  5833. }
  5834. }
  5835. static void cik_init_cg(struct radeon_device *rdev)
  5836. {
  5837. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
  5838. if (rdev->has_uvd)
  5839. si_init_uvd_internal_cg(rdev);
  5840. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  5841. RADEON_CG_BLOCK_SDMA |
  5842. RADEON_CG_BLOCK_BIF |
  5843. RADEON_CG_BLOCK_UVD |
  5844. RADEON_CG_BLOCK_HDP), true);
  5845. }
  5846. static void cik_fini_cg(struct radeon_device *rdev)
  5847. {
  5848. cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
  5849. RADEON_CG_BLOCK_SDMA |
  5850. RADEON_CG_BLOCK_BIF |
  5851. RADEON_CG_BLOCK_UVD |
  5852. RADEON_CG_BLOCK_HDP), false);
  5853. cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
  5854. }
  5855. static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
  5856. bool enable)
  5857. {
  5858. u32 data, orig;
  5859. orig = data = RREG32(RLC_PG_CNTL);
  5860. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5861. data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5862. else
  5863. data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
  5864. if (orig != data)
  5865. WREG32(RLC_PG_CNTL, data);
  5866. }
  5867. static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
  5868. bool enable)
  5869. {
  5870. u32 data, orig;
  5871. orig = data = RREG32(RLC_PG_CNTL);
  5872. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
  5873. data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5874. else
  5875. data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
  5876. if (orig != data)
  5877. WREG32(RLC_PG_CNTL, data);
  5878. }
  5879. static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
  5880. {
  5881. u32 data, orig;
  5882. orig = data = RREG32(RLC_PG_CNTL);
  5883. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
  5884. data &= ~DISABLE_CP_PG;
  5885. else
  5886. data |= DISABLE_CP_PG;
  5887. if (orig != data)
  5888. WREG32(RLC_PG_CNTL, data);
  5889. }
  5890. static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
  5891. {
  5892. u32 data, orig;
  5893. orig = data = RREG32(RLC_PG_CNTL);
  5894. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
  5895. data &= ~DISABLE_GDS_PG;
  5896. else
  5897. data |= DISABLE_GDS_PG;
  5898. if (orig != data)
  5899. WREG32(RLC_PG_CNTL, data);
  5900. }
  5901. #define CP_ME_TABLE_SIZE 96
  5902. #define CP_ME_TABLE_OFFSET 2048
  5903. #define CP_MEC_TABLE_OFFSET 4096
  5904. void cik_init_cp_pg_table(struct radeon_device *rdev)
  5905. {
  5906. volatile u32 *dst_ptr;
  5907. int me, i, max_me = 4;
  5908. u32 bo_offset = 0;
  5909. u32 table_offset, table_size;
  5910. if (rdev->family == CHIP_KAVERI)
  5911. max_me = 5;
  5912. if (rdev->rlc.cp_table_ptr == NULL)
  5913. return;
  5914. /* write the cp table buffer */
  5915. dst_ptr = rdev->rlc.cp_table_ptr;
  5916. for (me = 0; me < max_me; me++) {
  5917. if (rdev->new_fw) {
  5918. const __le32 *fw_data;
  5919. const struct gfx_firmware_header_v1_0 *hdr;
  5920. if (me == 0) {
  5921. hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
  5922. fw_data = (const __le32 *)
  5923. (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  5924. table_offset = le32_to_cpu(hdr->jt_offset);
  5925. table_size = le32_to_cpu(hdr->jt_size);
  5926. } else if (me == 1) {
  5927. hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
  5928. fw_data = (const __le32 *)
  5929. (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  5930. table_offset = le32_to_cpu(hdr->jt_offset);
  5931. table_size = le32_to_cpu(hdr->jt_size);
  5932. } else if (me == 2) {
  5933. hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
  5934. fw_data = (const __le32 *)
  5935. (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  5936. table_offset = le32_to_cpu(hdr->jt_offset);
  5937. table_size = le32_to_cpu(hdr->jt_size);
  5938. } else if (me == 3) {
  5939. hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
  5940. fw_data = (const __le32 *)
  5941. (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  5942. table_offset = le32_to_cpu(hdr->jt_offset);
  5943. table_size = le32_to_cpu(hdr->jt_size);
  5944. } else {
  5945. hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
  5946. fw_data = (const __le32 *)
  5947. (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  5948. table_offset = le32_to_cpu(hdr->jt_offset);
  5949. table_size = le32_to_cpu(hdr->jt_size);
  5950. }
  5951. for (i = 0; i < table_size; i ++) {
  5952. dst_ptr[bo_offset + i] =
  5953. cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
  5954. }
  5955. bo_offset += table_size;
  5956. } else {
  5957. const __be32 *fw_data;
  5958. table_size = CP_ME_TABLE_SIZE;
  5959. if (me == 0) {
  5960. fw_data = (const __be32 *)rdev->ce_fw->data;
  5961. table_offset = CP_ME_TABLE_OFFSET;
  5962. } else if (me == 1) {
  5963. fw_data = (const __be32 *)rdev->pfp_fw->data;
  5964. table_offset = CP_ME_TABLE_OFFSET;
  5965. } else if (me == 2) {
  5966. fw_data = (const __be32 *)rdev->me_fw->data;
  5967. table_offset = CP_ME_TABLE_OFFSET;
  5968. } else {
  5969. fw_data = (const __be32 *)rdev->mec_fw->data;
  5970. table_offset = CP_MEC_TABLE_OFFSET;
  5971. }
  5972. for (i = 0; i < table_size; i ++) {
  5973. dst_ptr[bo_offset + i] =
  5974. cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
  5975. }
  5976. bo_offset += table_size;
  5977. }
  5978. }
  5979. }
  5980. static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
  5981. bool enable)
  5982. {
  5983. u32 data, orig;
  5984. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
  5985. orig = data = RREG32(RLC_PG_CNTL);
  5986. data |= GFX_PG_ENABLE;
  5987. if (orig != data)
  5988. WREG32(RLC_PG_CNTL, data);
  5989. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5990. data |= AUTO_PG_EN;
  5991. if (orig != data)
  5992. WREG32(RLC_AUTO_PG_CTRL, data);
  5993. } else {
  5994. orig = data = RREG32(RLC_PG_CNTL);
  5995. data &= ~GFX_PG_ENABLE;
  5996. if (orig != data)
  5997. WREG32(RLC_PG_CNTL, data);
  5998. orig = data = RREG32(RLC_AUTO_PG_CTRL);
  5999. data &= ~AUTO_PG_EN;
  6000. if (orig != data)
  6001. WREG32(RLC_AUTO_PG_CTRL, data);
  6002. data = RREG32(DB_RENDER_CONTROL);
  6003. }
  6004. }
  6005. static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
  6006. {
  6007. u32 mask = 0, tmp, tmp1;
  6008. int i;
  6009. mutex_lock(&rdev->grbm_idx_mutex);
  6010. cik_select_se_sh(rdev, se, sh);
  6011. tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
  6012. tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
  6013. cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
  6014. mutex_unlock(&rdev->grbm_idx_mutex);
  6015. tmp &= 0xffff0000;
  6016. tmp |= tmp1;
  6017. tmp >>= 16;
  6018. for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
  6019. mask <<= 1;
  6020. mask |= 1;
  6021. }
  6022. return (~tmp) & mask;
  6023. }
  6024. static void cik_init_ao_cu_mask(struct radeon_device *rdev)
  6025. {
  6026. u32 i, j, k, active_cu_number = 0;
  6027. u32 mask, counter, cu_bitmap;
  6028. u32 tmp = 0;
  6029. for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
  6030. for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
  6031. mask = 1;
  6032. cu_bitmap = 0;
  6033. counter = 0;
  6034. for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
  6035. if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
  6036. if (counter < 2)
  6037. cu_bitmap |= mask;
  6038. counter ++;
  6039. }
  6040. mask <<= 1;
  6041. }
  6042. active_cu_number += counter;
  6043. tmp |= (cu_bitmap << (i * 16 + j * 8));
  6044. }
  6045. }
  6046. WREG32(RLC_PG_AO_CU_MASK, tmp);
  6047. tmp = RREG32(RLC_MAX_PG_CU);
  6048. tmp &= ~MAX_PU_CU_MASK;
  6049. tmp |= MAX_PU_CU(active_cu_number);
  6050. WREG32(RLC_MAX_PG_CU, tmp);
  6051. }
  6052. static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
  6053. bool enable)
  6054. {
  6055. u32 data, orig;
  6056. orig = data = RREG32(RLC_PG_CNTL);
  6057. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
  6058. data |= STATIC_PER_CU_PG_ENABLE;
  6059. else
  6060. data &= ~STATIC_PER_CU_PG_ENABLE;
  6061. if (orig != data)
  6062. WREG32(RLC_PG_CNTL, data);
  6063. }
  6064. static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
  6065. bool enable)
  6066. {
  6067. u32 data, orig;
  6068. orig = data = RREG32(RLC_PG_CNTL);
  6069. if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
  6070. data |= DYN_PER_CU_PG_ENABLE;
  6071. else
  6072. data &= ~DYN_PER_CU_PG_ENABLE;
  6073. if (orig != data)
  6074. WREG32(RLC_PG_CNTL, data);
  6075. }
  6076. #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
  6077. #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
  6078. static void cik_init_gfx_cgpg(struct radeon_device *rdev)
  6079. {
  6080. u32 data, orig;
  6081. u32 i;
  6082. if (rdev->rlc.cs_data) {
  6083. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  6084. WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
  6085. WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
  6086. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
  6087. } else {
  6088. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
  6089. for (i = 0; i < 3; i++)
  6090. WREG32(RLC_GPM_SCRATCH_DATA, 0);
  6091. }
  6092. if (rdev->rlc.reg_list) {
  6093. WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
  6094. for (i = 0; i < rdev->rlc.reg_list_size; i++)
  6095. WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
  6096. }
  6097. orig = data = RREG32(RLC_PG_CNTL);
  6098. data |= GFX_PG_SRC;
  6099. if (orig != data)
  6100. WREG32(RLC_PG_CNTL, data);
  6101. WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
  6102. WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
  6103. data = RREG32(CP_RB_WPTR_POLL_CNTL);
  6104. data &= ~IDLE_POLL_COUNT_MASK;
  6105. data |= IDLE_POLL_COUNT(0x60);
  6106. WREG32(CP_RB_WPTR_POLL_CNTL, data);
  6107. data = 0x10101010;
  6108. WREG32(RLC_PG_DELAY, data);
  6109. data = RREG32(RLC_PG_DELAY_2);
  6110. data &= ~0xff;
  6111. data |= 0x3;
  6112. WREG32(RLC_PG_DELAY_2, data);
  6113. data = RREG32(RLC_AUTO_PG_CTRL);
  6114. data &= ~GRBM_REG_SGIT_MASK;
  6115. data |= GRBM_REG_SGIT(0x700);
  6116. WREG32(RLC_AUTO_PG_CTRL, data);
  6117. }
  6118. static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
  6119. {
  6120. cik_enable_gfx_cgpg(rdev, enable);
  6121. cik_enable_gfx_static_mgpg(rdev, enable);
  6122. cik_enable_gfx_dynamic_mgpg(rdev, enable);
  6123. }
  6124. u32 cik_get_csb_size(struct radeon_device *rdev)
  6125. {
  6126. u32 count = 0;
  6127. const struct cs_section_def *sect = NULL;
  6128. const struct cs_extent_def *ext = NULL;
  6129. if (rdev->rlc.cs_data == NULL)
  6130. return 0;
  6131. /* begin clear state */
  6132. count += 2;
  6133. /* context control state */
  6134. count += 3;
  6135. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  6136. for (ext = sect->section; ext->extent != NULL; ++ext) {
  6137. if (sect->id == SECT_CONTEXT)
  6138. count += 2 + ext->reg_count;
  6139. else
  6140. return 0;
  6141. }
  6142. }
  6143. /* pa_sc_raster_config/pa_sc_raster_config1 */
  6144. count += 4;
  6145. /* end clear state */
  6146. count += 2;
  6147. /* clear state */
  6148. count += 2;
  6149. return count;
  6150. }
  6151. void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
  6152. {
  6153. u32 count = 0, i;
  6154. const struct cs_section_def *sect = NULL;
  6155. const struct cs_extent_def *ext = NULL;
  6156. if (rdev->rlc.cs_data == NULL)
  6157. return;
  6158. if (buffer == NULL)
  6159. return;
  6160. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  6161. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
  6162. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
  6163. buffer[count++] = cpu_to_le32(0x80000000);
  6164. buffer[count++] = cpu_to_le32(0x80000000);
  6165. for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
  6166. for (ext = sect->section; ext->extent != NULL; ++ext) {
  6167. if (sect->id == SECT_CONTEXT) {
  6168. buffer[count++] =
  6169. cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
  6170. buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
  6171. for (i = 0; i < ext->reg_count; i++)
  6172. buffer[count++] = cpu_to_le32(ext->extent[i]);
  6173. } else {
  6174. return;
  6175. }
  6176. }
  6177. }
  6178. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
  6179. buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
  6180. switch (rdev->family) {
  6181. case CHIP_BONAIRE:
  6182. buffer[count++] = cpu_to_le32(0x16000012);
  6183. buffer[count++] = cpu_to_le32(0x00000000);
  6184. break;
  6185. case CHIP_KAVERI:
  6186. buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
  6187. buffer[count++] = cpu_to_le32(0x00000000);
  6188. break;
  6189. case CHIP_KABINI:
  6190. case CHIP_MULLINS:
  6191. buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
  6192. buffer[count++] = cpu_to_le32(0x00000000);
  6193. break;
  6194. case CHIP_HAWAII:
  6195. buffer[count++] = cpu_to_le32(0x3a00161a);
  6196. buffer[count++] = cpu_to_le32(0x0000002e);
  6197. break;
  6198. default:
  6199. buffer[count++] = cpu_to_le32(0x00000000);
  6200. buffer[count++] = cpu_to_le32(0x00000000);
  6201. break;
  6202. }
  6203. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
  6204. buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
  6205. buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
  6206. buffer[count++] = cpu_to_le32(0);
  6207. }
  6208. static void cik_init_pg(struct radeon_device *rdev)
  6209. {
  6210. if (rdev->pg_flags) {
  6211. cik_enable_sck_slowdown_on_pu(rdev, true);
  6212. cik_enable_sck_slowdown_on_pd(rdev, true);
  6213. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
  6214. cik_init_gfx_cgpg(rdev);
  6215. cik_enable_cp_pg(rdev, true);
  6216. cik_enable_gds_pg(rdev, true);
  6217. }
  6218. cik_init_ao_cu_mask(rdev);
  6219. cik_update_gfx_pg(rdev, true);
  6220. }
  6221. }
  6222. static void cik_fini_pg(struct radeon_device *rdev)
  6223. {
  6224. if (rdev->pg_flags) {
  6225. cik_update_gfx_pg(rdev, false);
  6226. if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
  6227. cik_enable_cp_pg(rdev, false);
  6228. cik_enable_gds_pg(rdev, false);
  6229. }
  6230. }
  6231. }
  6232. /*
  6233. * Interrupts
  6234. * Starting with r6xx, interrupts are handled via a ring buffer.
  6235. * Ring buffers are areas of GPU accessible memory that the GPU
  6236. * writes interrupt vectors into and the host reads vectors out of.
  6237. * There is a rptr (read pointer) that determines where the
  6238. * host is currently reading, and a wptr (write pointer)
  6239. * which determines where the GPU has written. When the
  6240. * pointers are equal, the ring is idle. When the GPU
  6241. * writes vectors to the ring buffer, it increments the
  6242. * wptr. When there is an interrupt, the host then starts
  6243. * fetching commands and processing them until the pointers are
  6244. * equal again at which point it updates the rptr.
  6245. */
  6246. /**
  6247. * cik_enable_interrupts - Enable the interrupt ring buffer
  6248. *
  6249. * @rdev: radeon_device pointer
  6250. *
  6251. * Enable the interrupt ring buffer (CIK).
  6252. */
  6253. static void cik_enable_interrupts(struct radeon_device *rdev)
  6254. {
  6255. u32 ih_cntl = RREG32(IH_CNTL);
  6256. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  6257. ih_cntl |= ENABLE_INTR;
  6258. ih_rb_cntl |= IH_RB_ENABLE;
  6259. WREG32(IH_CNTL, ih_cntl);
  6260. WREG32(IH_RB_CNTL, ih_rb_cntl);
  6261. rdev->ih.enabled = true;
  6262. }
  6263. /**
  6264. * cik_disable_interrupts - Disable the interrupt ring buffer
  6265. *
  6266. * @rdev: radeon_device pointer
  6267. *
  6268. * Disable the interrupt ring buffer (CIK).
  6269. */
  6270. static void cik_disable_interrupts(struct radeon_device *rdev)
  6271. {
  6272. u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
  6273. u32 ih_cntl = RREG32(IH_CNTL);
  6274. ih_rb_cntl &= ~IH_RB_ENABLE;
  6275. ih_cntl &= ~ENABLE_INTR;
  6276. WREG32(IH_RB_CNTL, ih_rb_cntl);
  6277. WREG32(IH_CNTL, ih_cntl);
  6278. /* set rptr, wptr to 0 */
  6279. WREG32(IH_RB_RPTR, 0);
  6280. WREG32(IH_RB_WPTR, 0);
  6281. rdev->ih.enabled = false;
  6282. rdev->ih.rptr = 0;
  6283. }
  6284. /**
  6285. * cik_disable_interrupt_state - Disable all interrupt sources
  6286. *
  6287. * @rdev: radeon_device pointer
  6288. *
  6289. * Clear all interrupt enable bits used by the driver (CIK).
  6290. */
  6291. static void cik_disable_interrupt_state(struct radeon_device *rdev)
  6292. {
  6293. u32 tmp;
  6294. /* gfx ring */
  6295. tmp = RREG32(CP_INT_CNTL_RING0) &
  6296. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  6297. WREG32(CP_INT_CNTL_RING0, tmp);
  6298. /* sdma */
  6299. tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  6300. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  6301. tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  6302. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  6303. /* compute queues */
  6304. WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
  6305. WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
  6306. WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
  6307. WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
  6308. WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
  6309. WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
  6310. WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
  6311. WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
  6312. /* grbm */
  6313. WREG32(GRBM_INT_CNTL, 0);
  6314. /* SRBM */
  6315. WREG32(SRBM_INT_CNTL, 0);
  6316. /* vline/vblank, etc. */
  6317. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  6318. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  6319. if (rdev->num_crtc >= 4) {
  6320. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  6321. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  6322. }
  6323. if (rdev->num_crtc >= 6) {
  6324. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  6325. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  6326. }
  6327. /* pflip */
  6328. if (rdev->num_crtc >= 2) {
  6329. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
  6330. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
  6331. }
  6332. if (rdev->num_crtc >= 4) {
  6333. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
  6334. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
  6335. }
  6336. if (rdev->num_crtc >= 6) {
  6337. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
  6338. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
  6339. }
  6340. /* dac hotplug */
  6341. WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
  6342. /* digital hotplug */
  6343. tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  6344. WREG32(DC_HPD1_INT_CONTROL, tmp);
  6345. tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  6346. WREG32(DC_HPD2_INT_CONTROL, tmp);
  6347. tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  6348. WREG32(DC_HPD3_INT_CONTROL, tmp);
  6349. tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  6350. WREG32(DC_HPD4_INT_CONTROL, tmp);
  6351. tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  6352. WREG32(DC_HPD5_INT_CONTROL, tmp);
  6353. tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
  6354. WREG32(DC_HPD6_INT_CONTROL, tmp);
  6355. }
  6356. /**
  6357. * cik_irq_init - init and enable the interrupt ring
  6358. *
  6359. * @rdev: radeon_device pointer
  6360. *
  6361. * Allocate a ring buffer for the interrupt controller,
  6362. * enable the RLC, disable interrupts, enable the IH
  6363. * ring buffer and enable it (CIK).
  6364. * Called at device load and reume.
  6365. * Returns 0 for success, errors for failure.
  6366. */
  6367. static int cik_irq_init(struct radeon_device *rdev)
  6368. {
  6369. int ret = 0;
  6370. int rb_bufsz;
  6371. u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
  6372. /* allocate ring */
  6373. ret = r600_ih_ring_alloc(rdev);
  6374. if (ret)
  6375. return ret;
  6376. /* disable irqs */
  6377. cik_disable_interrupts(rdev);
  6378. /* init rlc */
  6379. ret = cik_rlc_resume(rdev);
  6380. if (ret) {
  6381. r600_ih_ring_fini(rdev);
  6382. return ret;
  6383. }
  6384. /* setup interrupt control */
  6385. /* XXX this should actually be a bus address, not an MC address. same on older asics */
  6386. WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
  6387. interrupt_cntl = RREG32(INTERRUPT_CNTL);
  6388. /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
  6389. * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
  6390. */
  6391. interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
  6392. /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
  6393. interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
  6394. WREG32(INTERRUPT_CNTL, interrupt_cntl);
  6395. WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
  6396. rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
  6397. ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
  6398. IH_WPTR_OVERFLOW_CLEAR |
  6399. (rb_bufsz << 1));
  6400. if (rdev->wb.enabled)
  6401. ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
  6402. /* set the writeback address whether it's enabled or not */
  6403. WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
  6404. WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
  6405. WREG32(IH_RB_CNTL, ih_rb_cntl);
  6406. /* set rptr, wptr to 0 */
  6407. WREG32(IH_RB_RPTR, 0);
  6408. WREG32(IH_RB_WPTR, 0);
  6409. /* Default settings for IH_CNTL (disabled at first) */
  6410. ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
  6411. /* RPTR_REARM only works if msi's are enabled */
  6412. if (rdev->msi_enabled)
  6413. ih_cntl |= RPTR_REARM;
  6414. WREG32(IH_CNTL, ih_cntl);
  6415. /* force the active interrupt state to all disabled */
  6416. cik_disable_interrupt_state(rdev);
  6417. pci_set_master(rdev->pdev);
  6418. /* enable irqs */
  6419. cik_enable_interrupts(rdev);
  6420. return ret;
  6421. }
  6422. /**
  6423. * cik_irq_set - enable/disable interrupt sources
  6424. *
  6425. * @rdev: radeon_device pointer
  6426. *
  6427. * Enable interrupt sources on the GPU (vblanks, hpd,
  6428. * etc.) (CIK).
  6429. * Returns 0 for success, errors for failure.
  6430. */
  6431. int cik_irq_set(struct radeon_device *rdev)
  6432. {
  6433. u32 cp_int_cntl;
  6434. u32 cp_m1p0;
  6435. u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
  6436. u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
  6437. u32 grbm_int_cntl = 0;
  6438. u32 dma_cntl, dma_cntl1;
  6439. if (!rdev->irq.installed) {
  6440. WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
  6441. return -EINVAL;
  6442. }
  6443. /* don't enable anything if the ih is disabled */
  6444. if (!rdev->ih.enabled) {
  6445. cik_disable_interrupts(rdev);
  6446. /* force the active interrupt state to all disabled */
  6447. cik_disable_interrupt_state(rdev);
  6448. return 0;
  6449. }
  6450. cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
  6451. (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
  6452. cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
  6453. hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
  6454. hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
  6455. hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
  6456. hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
  6457. hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
  6458. hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
  6459. dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
  6460. dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
  6461. cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
  6462. /* enable CP interrupts on all rings */
  6463. if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
  6464. DRM_DEBUG("cik_irq_set: sw int gfx\n");
  6465. cp_int_cntl |= TIME_STAMP_INT_ENABLE;
  6466. }
  6467. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
  6468. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6469. DRM_DEBUG("si_irq_set: sw int cp1\n");
  6470. if (ring->me == 1) {
  6471. switch (ring->pipe) {
  6472. case 0:
  6473. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  6474. break;
  6475. default:
  6476. DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
  6477. break;
  6478. }
  6479. } else {
  6480. DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
  6481. }
  6482. }
  6483. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
  6484. struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6485. DRM_DEBUG("si_irq_set: sw int cp2\n");
  6486. if (ring->me == 1) {
  6487. switch (ring->pipe) {
  6488. case 0:
  6489. cp_m1p0 |= TIME_STAMP_INT_ENABLE;
  6490. break;
  6491. default:
  6492. DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
  6493. break;
  6494. }
  6495. } else {
  6496. DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
  6497. }
  6498. }
  6499. if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
  6500. DRM_DEBUG("cik_irq_set: sw int dma\n");
  6501. dma_cntl |= TRAP_ENABLE;
  6502. }
  6503. if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
  6504. DRM_DEBUG("cik_irq_set: sw int dma1\n");
  6505. dma_cntl1 |= TRAP_ENABLE;
  6506. }
  6507. if (rdev->irq.crtc_vblank_int[0] ||
  6508. atomic_read(&rdev->irq.pflip[0])) {
  6509. DRM_DEBUG("cik_irq_set: vblank 0\n");
  6510. crtc1 |= VBLANK_INTERRUPT_MASK;
  6511. }
  6512. if (rdev->irq.crtc_vblank_int[1] ||
  6513. atomic_read(&rdev->irq.pflip[1])) {
  6514. DRM_DEBUG("cik_irq_set: vblank 1\n");
  6515. crtc2 |= VBLANK_INTERRUPT_MASK;
  6516. }
  6517. if (rdev->irq.crtc_vblank_int[2] ||
  6518. atomic_read(&rdev->irq.pflip[2])) {
  6519. DRM_DEBUG("cik_irq_set: vblank 2\n");
  6520. crtc3 |= VBLANK_INTERRUPT_MASK;
  6521. }
  6522. if (rdev->irq.crtc_vblank_int[3] ||
  6523. atomic_read(&rdev->irq.pflip[3])) {
  6524. DRM_DEBUG("cik_irq_set: vblank 3\n");
  6525. crtc4 |= VBLANK_INTERRUPT_MASK;
  6526. }
  6527. if (rdev->irq.crtc_vblank_int[4] ||
  6528. atomic_read(&rdev->irq.pflip[4])) {
  6529. DRM_DEBUG("cik_irq_set: vblank 4\n");
  6530. crtc5 |= VBLANK_INTERRUPT_MASK;
  6531. }
  6532. if (rdev->irq.crtc_vblank_int[5] ||
  6533. atomic_read(&rdev->irq.pflip[5])) {
  6534. DRM_DEBUG("cik_irq_set: vblank 5\n");
  6535. crtc6 |= VBLANK_INTERRUPT_MASK;
  6536. }
  6537. if (rdev->irq.hpd[0]) {
  6538. DRM_DEBUG("cik_irq_set: hpd 1\n");
  6539. hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
  6540. }
  6541. if (rdev->irq.hpd[1]) {
  6542. DRM_DEBUG("cik_irq_set: hpd 2\n");
  6543. hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
  6544. }
  6545. if (rdev->irq.hpd[2]) {
  6546. DRM_DEBUG("cik_irq_set: hpd 3\n");
  6547. hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
  6548. }
  6549. if (rdev->irq.hpd[3]) {
  6550. DRM_DEBUG("cik_irq_set: hpd 4\n");
  6551. hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
  6552. }
  6553. if (rdev->irq.hpd[4]) {
  6554. DRM_DEBUG("cik_irq_set: hpd 5\n");
  6555. hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
  6556. }
  6557. if (rdev->irq.hpd[5]) {
  6558. DRM_DEBUG("cik_irq_set: hpd 6\n");
  6559. hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
  6560. }
  6561. WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
  6562. WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
  6563. WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
  6564. WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
  6565. WREG32(GRBM_INT_CNTL, grbm_int_cntl);
  6566. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
  6567. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
  6568. if (rdev->num_crtc >= 4) {
  6569. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
  6570. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
  6571. }
  6572. if (rdev->num_crtc >= 6) {
  6573. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
  6574. WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
  6575. }
  6576. if (rdev->num_crtc >= 2) {
  6577. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
  6578. GRPH_PFLIP_INT_MASK);
  6579. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
  6580. GRPH_PFLIP_INT_MASK);
  6581. }
  6582. if (rdev->num_crtc >= 4) {
  6583. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
  6584. GRPH_PFLIP_INT_MASK);
  6585. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
  6586. GRPH_PFLIP_INT_MASK);
  6587. }
  6588. if (rdev->num_crtc >= 6) {
  6589. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
  6590. GRPH_PFLIP_INT_MASK);
  6591. WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
  6592. GRPH_PFLIP_INT_MASK);
  6593. }
  6594. WREG32(DC_HPD1_INT_CONTROL, hpd1);
  6595. WREG32(DC_HPD2_INT_CONTROL, hpd2);
  6596. WREG32(DC_HPD3_INT_CONTROL, hpd3);
  6597. WREG32(DC_HPD4_INT_CONTROL, hpd4);
  6598. WREG32(DC_HPD5_INT_CONTROL, hpd5);
  6599. WREG32(DC_HPD6_INT_CONTROL, hpd6);
  6600. /* posting read */
  6601. RREG32(SRBM_STATUS);
  6602. return 0;
  6603. }
  6604. /**
  6605. * cik_irq_ack - ack interrupt sources
  6606. *
  6607. * @rdev: radeon_device pointer
  6608. *
  6609. * Ack interrupt sources on the GPU (vblanks, hpd,
  6610. * etc.) (CIK). Certain interrupts sources are sw
  6611. * generated and do not require an explicit ack.
  6612. */
  6613. static inline void cik_irq_ack(struct radeon_device *rdev)
  6614. {
  6615. u32 tmp;
  6616. rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
  6617. rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
  6618. rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
  6619. rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
  6620. rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
  6621. rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
  6622. rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
  6623. rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
  6624. EVERGREEN_CRTC0_REGISTER_OFFSET);
  6625. rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
  6626. EVERGREEN_CRTC1_REGISTER_OFFSET);
  6627. if (rdev->num_crtc >= 4) {
  6628. rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
  6629. EVERGREEN_CRTC2_REGISTER_OFFSET);
  6630. rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
  6631. EVERGREEN_CRTC3_REGISTER_OFFSET);
  6632. }
  6633. if (rdev->num_crtc >= 6) {
  6634. rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
  6635. EVERGREEN_CRTC4_REGISTER_OFFSET);
  6636. rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
  6637. EVERGREEN_CRTC5_REGISTER_OFFSET);
  6638. }
  6639. if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
  6640. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
  6641. GRPH_PFLIP_INT_CLEAR);
  6642. if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
  6643. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
  6644. GRPH_PFLIP_INT_CLEAR);
  6645. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
  6646. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
  6647. if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
  6648. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
  6649. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
  6650. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
  6651. if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
  6652. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
  6653. if (rdev->num_crtc >= 4) {
  6654. if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
  6655. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
  6656. GRPH_PFLIP_INT_CLEAR);
  6657. if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
  6658. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
  6659. GRPH_PFLIP_INT_CLEAR);
  6660. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
  6661. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
  6662. if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
  6663. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
  6664. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
  6665. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
  6666. if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
  6667. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
  6668. }
  6669. if (rdev->num_crtc >= 6) {
  6670. if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
  6671. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
  6672. GRPH_PFLIP_INT_CLEAR);
  6673. if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
  6674. WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
  6675. GRPH_PFLIP_INT_CLEAR);
  6676. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
  6677. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
  6678. if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
  6679. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
  6680. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
  6681. WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
  6682. if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
  6683. WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
  6684. }
  6685. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
  6686. tmp = RREG32(DC_HPD1_INT_CONTROL);
  6687. tmp |= DC_HPDx_INT_ACK;
  6688. WREG32(DC_HPD1_INT_CONTROL, tmp);
  6689. }
  6690. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
  6691. tmp = RREG32(DC_HPD2_INT_CONTROL);
  6692. tmp |= DC_HPDx_INT_ACK;
  6693. WREG32(DC_HPD2_INT_CONTROL, tmp);
  6694. }
  6695. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
  6696. tmp = RREG32(DC_HPD3_INT_CONTROL);
  6697. tmp |= DC_HPDx_INT_ACK;
  6698. WREG32(DC_HPD3_INT_CONTROL, tmp);
  6699. }
  6700. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
  6701. tmp = RREG32(DC_HPD4_INT_CONTROL);
  6702. tmp |= DC_HPDx_INT_ACK;
  6703. WREG32(DC_HPD4_INT_CONTROL, tmp);
  6704. }
  6705. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
  6706. tmp = RREG32(DC_HPD5_INT_CONTROL);
  6707. tmp |= DC_HPDx_INT_ACK;
  6708. WREG32(DC_HPD5_INT_CONTROL, tmp);
  6709. }
  6710. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
  6711. tmp = RREG32(DC_HPD5_INT_CONTROL);
  6712. tmp |= DC_HPDx_INT_ACK;
  6713. WREG32(DC_HPD6_INT_CONTROL, tmp);
  6714. }
  6715. if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
  6716. tmp = RREG32(DC_HPD1_INT_CONTROL);
  6717. tmp |= DC_HPDx_RX_INT_ACK;
  6718. WREG32(DC_HPD1_INT_CONTROL, tmp);
  6719. }
  6720. if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
  6721. tmp = RREG32(DC_HPD2_INT_CONTROL);
  6722. tmp |= DC_HPDx_RX_INT_ACK;
  6723. WREG32(DC_HPD2_INT_CONTROL, tmp);
  6724. }
  6725. if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
  6726. tmp = RREG32(DC_HPD3_INT_CONTROL);
  6727. tmp |= DC_HPDx_RX_INT_ACK;
  6728. WREG32(DC_HPD3_INT_CONTROL, tmp);
  6729. }
  6730. if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
  6731. tmp = RREG32(DC_HPD4_INT_CONTROL);
  6732. tmp |= DC_HPDx_RX_INT_ACK;
  6733. WREG32(DC_HPD4_INT_CONTROL, tmp);
  6734. }
  6735. if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
  6736. tmp = RREG32(DC_HPD5_INT_CONTROL);
  6737. tmp |= DC_HPDx_RX_INT_ACK;
  6738. WREG32(DC_HPD5_INT_CONTROL, tmp);
  6739. }
  6740. if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
  6741. tmp = RREG32(DC_HPD5_INT_CONTROL);
  6742. tmp |= DC_HPDx_RX_INT_ACK;
  6743. WREG32(DC_HPD6_INT_CONTROL, tmp);
  6744. }
  6745. }
  6746. /**
  6747. * cik_irq_disable - disable interrupts
  6748. *
  6749. * @rdev: radeon_device pointer
  6750. *
  6751. * Disable interrupts on the hw (CIK).
  6752. */
  6753. static void cik_irq_disable(struct radeon_device *rdev)
  6754. {
  6755. cik_disable_interrupts(rdev);
  6756. /* Wait and acknowledge irq */
  6757. mdelay(1);
  6758. cik_irq_ack(rdev);
  6759. cik_disable_interrupt_state(rdev);
  6760. }
  6761. /**
  6762. * cik_irq_disable - disable interrupts for suspend
  6763. *
  6764. * @rdev: radeon_device pointer
  6765. *
  6766. * Disable interrupts and stop the RLC (CIK).
  6767. * Used for suspend.
  6768. */
  6769. static void cik_irq_suspend(struct radeon_device *rdev)
  6770. {
  6771. cik_irq_disable(rdev);
  6772. cik_rlc_stop(rdev);
  6773. }
  6774. /**
  6775. * cik_irq_fini - tear down interrupt support
  6776. *
  6777. * @rdev: radeon_device pointer
  6778. *
  6779. * Disable interrupts on the hw and free the IH ring
  6780. * buffer (CIK).
  6781. * Used for driver unload.
  6782. */
  6783. static void cik_irq_fini(struct radeon_device *rdev)
  6784. {
  6785. cik_irq_suspend(rdev);
  6786. r600_ih_ring_fini(rdev);
  6787. }
  6788. /**
  6789. * cik_get_ih_wptr - get the IH ring buffer wptr
  6790. *
  6791. * @rdev: radeon_device pointer
  6792. *
  6793. * Get the IH ring buffer wptr from either the register
  6794. * or the writeback memory buffer (CIK). Also check for
  6795. * ring buffer overflow and deal with it.
  6796. * Used by cik_irq_process().
  6797. * Returns the value of the wptr.
  6798. */
  6799. static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
  6800. {
  6801. u32 wptr, tmp;
  6802. if (rdev->wb.enabled)
  6803. wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
  6804. else
  6805. wptr = RREG32(IH_RB_WPTR);
  6806. if (wptr & RB_OVERFLOW) {
  6807. wptr &= ~RB_OVERFLOW;
  6808. /* When a ring buffer overflow happen start parsing interrupt
  6809. * from the last not overwritten vector (wptr + 16). Hopefully
  6810. * this should allow us to catchup.
  6811. */
  6812. dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
  6813. wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
  6814. rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
  6815. tmp = RREG32(IH_RB_CNTL);
  6816. tmp |= IH_WPTR_OVERFLOW_CLEAR;
  6817. WREG32(IH_RB_CNTL, tmp);
  6818. }
  6819. return (wptr & rdev->ih.ptr_mask);
  6820. }
  6821. /* CIK IV Ring
  6822. * Each IV ring entry is 128 bits:
  6823. * [7:0] - interrupt source id
  6824. * [31:8] - reserved
  6825. * [59:32] - interrupt source data
  6826. * [63:60] - reserved
  6827. * [71:64] - RINGID
  6828. * CP:
  6829. * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
  6830. * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
  6831. * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
  6832. * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
  6833. * PIPE_ID - ME0 0=3D
  6834. * - ME1&2 compute dispatcher (4 pipes each)
  6835. * SDMA:
  6836. * INSTANCE_ID [1:0], QUEUE_ID[1:0]
  6837. * INSTANCE_ID - 0 = sdma0, 1 = sdma1
  6838. * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
  6839. * [79:72] - VMID
  6840. * [95:80] - PASID
  6841. * [127:96] - reserved
  6842. */
  6843. /**
  6844. * cik_irq_process - interrupt handler
  6845. *
  6846. * @rdev: radeon_device pointer
  6847. *
  6848. * Interrupt hander (CIK). Walk the IH ring,
  6849. * ack interrupts and schedule work to handle
  6850. * interrupt events.
  6851. * Returns irq process return code.
  6852. */
  6853. int cik_irq_process(struct radeon_device *rdev)
  6854. {
  6855. struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  6856. struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  6857. u32 wptr;
  6858. u32 rptr;
  6859. u32 src_id, src_data, ring_id;
  6860. u8 me_id, pipe_id, queue_id;
  6861. u32 ring_index;
  6862. bool queue_hotplug = false;
  6863. bool queue_dp = false;
  6864. bool queue_reset = false;
  6865. u32 addr, status, mc_client;
  6866. bool queue_thermal = false;
  6867. if (!rdev->ih.enabled || rdev->shutdown)
  6868. return IRQ_NONE;
  6869. wptr = cik_get_ih_wptr(rdev);
  6870. restart_ih:
  6871. /* is somebody else already processing irqs? */
  6872. if (atomic_xchg(&rdev->ih.lock, 1))
  6873. return IRQ_NONE;
  6874. rptr = rdev->ih.rptr;
  6875. DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
  6876. /* Order reading of wptr vs. reading of IH ring data */
  6877. rmb();
  6878. /* display interrupts */
  6879. cik_irq_ack(rdev);
  6880. while (rptr != wptr) {
  6881. /* wptr/rptr are in bytes! */
  6882. ring_index = rptr / 4;
  6883. radeon_kfd_interrupt(rdev,
  6884. (const void *) &rdev->ih.ring[ring_index]);
  6885. src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
  6886. src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
  6887. ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
  6888. switch (src_id) {
  6889. case 1: /* D1 vblank/vline */
  6890. switch (src_data) {
  6891. case 0: /* D1 vblank */
  6892. if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
  6893. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6894. if (rdev->irq.crtc_vblank_int[0]) {
  6895. drm_handle_vblank(rdev->ddev, 0);
  6896. rdev->pm.vblank_sync = true;
  6897. wake_up(&rdev->irq.vblank_queue);
  6898. }
  6899. if (atomic_read(&rdev->irq.pflip[0]))
  6900. radeon_crtc_handle_vblank(rdev, 0);
  6901. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
  6902. DRM_DEBUG("IH: D1 vblank\n");
  6903. break;
  6904. case 1: /* D1 vline */
  6905. if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
  6906. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6907. rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
  6908. DRM_DEBUG("IH: D1 vline\n");
  6909. break;
  6910. default:
  6911. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6912. break;
  6913. }
  6914. break;
  6915. case 2: /* D2 vblank/vline */
  6916. switch (src_data) {
  6917. case 0: /* D2 vblank */
  6918. if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
  6919. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6920. if (rdev->irq.crtc_vblank_int[1]) {
  6921. drm_handle_vblank(rdev->ddev, 1);
  6922. rdev->pm.vblank_sync = true;
  6923. wake_up(&rdev->irq.vblank_queue);
  6924. }
  6925. if (atomic_read(&rdev->irq.pflip[1]))
  6926. radeon_crtc_handle_vblank(rdev, 1);
  6927. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
  6928. DRM_DEBUG("IH: D2 vblank\n");
  6929. break;
  6930. case 1: /* D2 vline */
  6931. if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
  6932. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6933. rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
  6934. DRM_DEBUG("IH: D2 vline\n");
  6935. break;
  6936. default:
  6937. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6938. break;
  6939. }
  6940. break;
  6941. case 3: /* D3 vblank/vline */
  6942. switch (src_data) {
  6943. case 0: /* D3 vblank */
  6944. if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
  6945. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6946. if (rdev->irq.crtc_vblank_int[2]) {
  6947. drm_handle_vblank(rdev->ddev, 2);
  6948. rdev->pm.vblank_sync = true;
  6949. wake_up(&rdev->irq.vblank_queue);
  6950. }
  6951. if (atomic_read(&rdev->irq.pflip[2]))
  6952. radeon_crtc_handle_vblank(rdev, 2);
  6953. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
  6954. DRM_DEBUG("IH: D3 vblank\n");
  6955. break;
  6956. case 1: /* D3 vline */
  6957. if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
  6958. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6959. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
  6960. DRM_DEBUG("IH: D3 vline\n");
  6961. break;
  6962. default:
  6963. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6964. break;
  6965. }
  6966. break;
  6967. case 4: /* D4 vblank/vline */
  6968. switch (src_data) {
  6969. case 0: /* D4 vblank */
  6970. if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
  6971. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6972. if (rdev->irq.crtc_vblank_int[3]) {
  6973. drm_handle_vblank(rdev->ddev, 3);
  6974. rdev->pm.vblank_sync = true;
  6975. wake_up(&rdev->irq.vblank_queue);
  6976. }
  6977. if (atomic_read(&rdev->irq.pflip[3]))
  6978. radeon_crtc_handle_vblank(rdev, 3);
  6979. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
  6980. DRM_DEBUG("IH: D4 vblank\n");
  6981. break;
  6982. case 1: /* D4 vline */
  6983. if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
  6984. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6985. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
  6986. DRM_DEBUG("IH: D4 vline\n");
  6987. break;
  6988. default:
  6989. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  6990. break;
  6991. }
  6992. break;
  6993. case 5: /* D5 vblank/vline */
  6994. switch (src_data) {
  6995. case 0: /* D5 vblank */
  6996. if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
  6997. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  6998. if (rdev->irq.crtc_vblank_int[4]) {
  6999. drm_handle_vblank(rdev->ddev, 4);
  7000. rdev->pm.vblank_sync = true;
  7001. wake_up(&rdev->irq.vblank_queue);
  7002. }
  7003. if (atomic_read(&rdev->irq.pflip[4]))
  7004. radeon_crtc_handle_vblank(rdev, 4);
  7005. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
  7006. DRM_DEBUG("IH: D5 vblank\n");
  7007. break;
  7008. case 1: /* D5 vline */
  7009. if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
  7010. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7011. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
  7012. DRM_DEBUG("IH: D5 vline\n");
  7013. break;
  7014. default:
  7015. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  7016. break;
  7017. }
  7018. break;
  7019. case 6: /* D6 vblank/vline */
  7020. switch (src_data) {
  7021. case 0: /* D6 vblank */
  7022. if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
  7023. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7024. if (rdev->irq.crtc_vblank_int[5]) {
  7025. drm_handle_vblank(rdev->ddev, 5);
  7026. rdev->pm.vblank_sync = true;
  7027. wake_up(&rdev->irq.vblank_queue);
  7028. }
  7029. if (atomic_read(&rdev->irq.pflip[5]))
  7030. radeon_crtc_handle_vblank(rdev, 5);
  7031. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
  7032. DRM_DEBUG("IH: D6 vblank\n");
  7033. break;
  7034. case 1: /* D6 vline */
  7035. if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
  7036. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7037. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
  7038. DRM_DEBUG("IH: D6 vline\n");
  7039. break;
  7040. default:
  7041. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  7042. break;
  7043. }
  7044. break;
  7045. case 8: /* D1 page flip */
  7046. case 10: /* D2 page flip */
  7047. case 12: /* D3 page flip */
  7048. case 14: /* D4 page flip */
  7049. case 16: /* D5 page flip */
  7050. case 18: /* D6 page flip */
  7051. DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
  7052. if (radeon_use_pflipirq > 0)
  7053. radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
  7054. break;
  7055. case 42: /* HPD hotplug */
  7056. switch (src_data) {
  7057. case 0:
  7058. if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
  7059. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7060. rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
  7061. queue_hotplug = true;
  7062. DRM_DEBUG("IH: HPD1\n");
  7063. break;
  7064. case 1:
  7065. if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
  7066. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7067. rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
  7068. queue_hotplug = true;
  7069. DRM_DEBUG("IH: HPD2\n");
  7070. break;
  7071. case 2:
  7072. if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
  7073. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7074. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
  7075. queue_hotplug = true;
  7076. DRM_DEBUG("IH: HPD3\n");
  7077. break;
  7078. case 3:
  7079. if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
  7080. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7081. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
  7082. queue_hotplug = true;
  7083. DRM_DEBUG("IH: HPD4\n");
  7084. break;
  7085. case 4:
  7086. if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
  7087. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7088. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
  7089. queue_hotplug = true;
  7090. DRM_DEBUG("IH: HPD5\n");
  7091. break;
  7092. case 5:
  7093. if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
  7094. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7095. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
  7096. queue_hotplug = true;
  7097. DRM_DEBUG("IH: HPD6\n");
  7098. break;
  7099. case 6:
  7100. if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
  7101. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7102. rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
  7103. queue_dp = true;
  7104. DRM_DEBUG("IH: HPD_RX 1\n");
  7105. break;
  7106. case 7:
  7107. if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
  7108. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7109. rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
  7110. queue_dp = true;
  7111. DRM_DEBUG("IH: HPD_RX 2\n");
  7112. break;
  7113. case 8:
  7114. if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
  7115. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7116. rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
  7117. queue_dp = true;
  7118. DRM_DEBUG("IH: HPD_RX 3\n");
  7119. break;
  7120. case 9:
  7121. if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
  7122. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7123. rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
  7124. queue_dp = true;
  7125. DRM_DEBUG("IH: HPD_RX 4\n");
  7126. break;
  7127. case 10:
  7128. if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
  7129. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7130. rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
  7131. queue_dp = true;
  7132. DRM_DEBUG("IH: HPD_RX 5\n");
  7133. break;
  7134. case 11:
  7135. if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
  7136. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  7137. rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
  7138. queue_dp = true;
  7139. DRM_DEBUG("IH: HPD_RX 6\n");
  7140. break;
  7141. default:
  7142. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  7143. break;
  7144. }
  7145. break;
  7146. case 96:
  7147. DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
  7148. WREG32(SRBM_INT_ACK, 0x1);
  7149. break;
  7150. case 124: /* UVD */
  7151. DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
  7152. radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
  7153. break;
  7154. case 146:
  7155. case 147:
  7156. addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
  7157. status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
  7158. mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  7159. /* reset addr and status */
  7160. WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
  7161. if (addr == 0x0 && status == 0x0)
  7162. break;
  7163. dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
  7164. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  7165. addr);
  7166. dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  7167. status);
  7168. cik_vm_decode_fault(rdev, status, addr, mc_client);
  7169. break;
  7170. case 167: /* VCE */
  7171. DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
  7172. switch (src_data) {
  7173. case 0:
  7174. radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX);
  7175. break;
  7176. case 1:
  7177. radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX);
  7178. break;
  7179. default:
  7180. DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
  7181. break;
  7182. }
  7183. break;
  7184. case 176: /* GFX RB CP_INT */
  7185. case 177: /* GFX IB CP_INT */
  7186. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  7187. break;
  7188. case 181: /* CP EOP event */
  7189. DRM_DEBUG("IH: CP EOP\n");
  7190. /* XXX check the bitfield order! */
  7191. me_id = (ring_id & 0x60) >> 5;
  7192. pipe_id = (ring_id & 0x18) >> 3;
  7193. queue_id = (ring_id & 0x7) >> 0;
  7194. switch (me_id) {
  7195. case 0:
  7196. radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
  7197. break;
  7198. case 1:
  7199. case 2:
  7200. if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
  7201. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  7202. if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
  7203. radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  7204. break;
  7205. }
  7206. break;
  7207. case 184: /* CP Privileged reg access */
  7208. DRM_ERROR("Illegal register access in command stream\n");
  7209. /* XXX check the bitfield order! */
  7210. me_id = (ring_id & 0x60) >> 5;
  7211. pipe_id = (ring_id & 0x18) >> 3;
  7212. queue_id = (ring_id & 0x7) >> 0;
  7213. switch (me_id) {
  7214. case 0:
  7215. /* This results in a full GPU reset, but all we need to do is soft
  7216. * reset the CP for gfx
  7217. */
  7218. queue_reset = true;
  7219. break;
  7220. case 1:
  7221. /* XXX compute */
  7222. queue_reset = true;
  7223. break;
  7224. case 2:
  7225. /* XXX compute */
  7226. queue_reset = true;
  7227. break;
  7228. }
  7229. break;
  7230. case 185: /* CP Privileged inst */
  7231. DRM_ERROR("Illegal instruction in command stream\n");
  7232. /* XXX check the bitfield order! */
  7233. me_id = (ring_id & 0x60) >> 5;
  7234. pipe_id = (ring_id & 0x18) >> 3;
  7235. queue_id = (ring_id & 0x7) >> 0;
  7236. switch (me_id) {
  7237. case 0:
  7238. /* This results in a full GPU reset, but all we need to do is soft
  7239. * reset the CP for gfx
  7240. */
  7241. queue_reset = true;
  7242. break;
  7243. case 1:
  7244. /* XXX compute */
  7245. queue_reset = true;
  7246. break;
  7247. case 2:
  7248. /* XXX compute */
  7249. queue_reset = true;
  7250. break;
  7251. }
  7252. break;
  7253. case 224: /* SDMA trap event */
  7254. /* XXX check the bitfield order! */
  7255. me_id = (ring_id & 0x3) >> 0;
  7256. queue_id = (ring_id & 0xc) >> 2;
  7257. DRM_DEBUG("IH: SDMA trap\n");
  7258. switch (me_id) {
  7259. case 0:
  7260. switch (queue_id) {
  7261. case 0:
  7262. radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
  7263. break;
  7264. case 1:
  7265. /* XXX compute */
  7266. break;
  7267. case 2:
  7268. /* XXX compute */
  7269. break;
  7270. }
  7271. break;
  7272. case 1:
  7273. switch (queue_id) {
  7274. case 0:
  7275. radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  7276. break;
  7277. case 1:
  7278. /* XXX compute */
  7279. break;
  7280. case 2:
  7281. /* XXX compute */
  7282. break;
  7283. }
  7284. break;
  7285. }
  7286. break;
  7287. case 230: /* thermal low to high */
  7288. DRM_DEBUG("IH: thermal low to high\n");
  7289. rdev->pm.dpm.thermal.high_to_low = false;
  7290. queue_thermal = true;
  7291. break;
  7292. case 231: /* thermal high to low */
  7293. DRM_DEBUG("IH: thermal high to low\n");
  7294. rdev->pm.dpm.thermal.high_to_low = true;
  7295. queue_thermal = true;
  7296. break;
  7297. case 233: /* GUI IDLE */
  7298. DRM_DEBUG("IH: GUI idle\n");
  7299. break;
  7300. case 241: /* SDMA Privileged inst */
  7301. case 247: /* SDMA Privileged inst */
  7302. DRM_ERROR("Illegal instruction in SDMA command stream\n");
  7303. /* XXX check the bitfield order! */
  7304. me_id = (ring_id & 0x3) >> 0;
  7305. queue_id = (ring_id & 0xc) >> 2;
  7306. switch (me_id) {
  7307. case 0:
  7308. switch (queue_id) {
  7309. case 0:
  7310. queue_reset = true;
  7311. break;
  7312. case 1:
  7313. /* XXX compute */
  7314. queue_reset = true;
  7315. break;
  7316. case 2:
  7317. /* XXX compute */
  7318. queue_reset = true;
  7319. break;
  7320. }
  7321. break;
  7322. case 1:
  7323. switch (queue_id) {
  7324. case 0:
  7325. queue_reset = true;
  7326. break;
  7327. case 1:
  7328. /* XXX compute */
  7329. queue_reset = true;
  7330. break;
  7331. case 2:
  7332. /* XXX compute */
  7333. queue_reset = true;
  7334. break;
  7335. }
  7336. break;
  7337. }
  7338. break;
  7339. default:
  7340. DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
  7341. break;
  7342. }
  7343. /* wptr/rptr are in bytes! */
  7344. rptr += 16;
  7345. rptr &= rdev->ih.ptr_mask;
  7346. WREG32(IH_RB_RPTR, rptr);
  7347. }
  7348. if (queue_dp)
  7349. schedule_work(&rdev->dp_work);
  7350. if (queue_hotplug)
  7351. schedule_delayed_work(&rdev->hotplug_work, 0);
  7352. if (queue_reset) {
  7353. rdev->needs_reset = true;
  7354. wake_up_all(&rdev->fence_queue);
  7355. }
  7356. if (queue_thermal)
  7357. schedule_work(&rdev->pm.dpm.thermal.work);
  7358. rdev->ih.rptr = rptr;
  7359. atomic_set(&rdev->ih.lock, 0);
  7360. /* make sure wptr hasn't changed while processing */
  7361. wptr = cik_get_ih_wptr(rdev);
  7362. if (wptr != rptr)
  7363. goto restart_ih;
  7364. return IRQ_HANDLED;
  7365. }
  7366. /*
  7367. * startup/shutdown callbacks
  7368. */
  7369. /**
  7370. * cik_startup - program the asic to a functional state
  7371. *
  7372. * @rdev: radeon_device pointer
  7373. *
  7374. * Programs the asic to a functional state (CIK).
  7375. * Called by cik_init() and cik_resume().
  7376. * Returns 0 for success, error for failure.
  7377. */
  7378. static int cik_startup(struct radeon_device *rdev)
  7379. {
  7380. struct radeon_ring *ring;
  7381. u32 nop;
  7382. int r;
  7383. /* enable pcie gen2/3 link */
  7384. cik_pcie_gen3_enable(rdev);
  7385. /* enable aspm */
  7386. cik_program_aspm(rdev);
  7387. /* scratch needs to be initialized before MC */
  7388. r = r600_vram_scratch_init(rdev);
  7389. if (r)
  7390. return r;
  7391. cik_mc_program(rdev);
  7392. if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
  7393. r = ci_mc_load_microcode(rdev);
  7394. if (r) {
  7395. DRM_ERROR("Failed to load MC firmware!\n");
  7396. return r;
  7397. }
  7398. }
  7399. r = cik_pcie_gart_enable(rdev);
  7400. if (r)
  7401. return r;
  7402. cik_gpu_init(rdev);
  7403. /* allocate rlc buffers */
  7404. if (rdev->flags & RADEON_IS_IGP) {
  7405. if (rdev->family == CHIP_KAVERI) {
  7406. rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
  7407. rdev->rlc.reg_list_size =
  7408. (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
  7409. } else {
  7410. rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
  7411. rdev->rlc.reg_list_size =
  7412. (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
  7413. }
  7414. }
  7415. rdev->rlc.cs_data = ci_cs_data;
  7416. rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
  7417. r = sumo_rlc_init(rdev);
  7418. if (r) {
  7419. DRM_ERROR("Failed to init rlc BOs!\n");
  7420. return r;
  7421. }
  7422. /* allocate wb buffer */
  7423. r = radeon_wb_init(rdev);
  7424. if (r)
  7425. return r;
  7426. /* allocate mec buffers */
  7427. r = cik_mec_init(rdev);
  7428. if (r) {
  7429. DRM_ERROR("Failed to init MEC BOs!\n");
  7430. return r;
  7431. }
  7432. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  7433. if (r) {
  7434. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  7435. return r;
  7436. }
  7437. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
  7438. if (r) {
  7439. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  7440. return r;
  7441. }
  7442. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
  7443. if (r) {
  7444. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  7445. return r;
  7446. }
  7447. r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
  7448. if (r) {
  7449. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  7450. return r;
  7451. }
  7452. r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
  7453. if (r) {
  7454. dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
  7455. return r;
  7456. }
  7457. r = radeon_uvd_resume(rdev);
  7458. if (!r) {
  7459. r = uvd_v4_2_resume(rdev);
  7460. if (!r) {
  7461. r = radeon_fence_driver_start_ring(rdev,
  7462. R600_RING_TYPE_UVD_INDEX);
  7463. if (r)
  7464. dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
  7465. }
  7466. }
  7467. if (r)
  7468. rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
  7469. r = radeon_vce_resume(rdev);
  7470. if (!r) {
  7471. r = vce_v2_0_resume(rdev);
  7472. if (!r)
  7473. r = radeon_fence_driver_start_ring(rdev,
  7474. TN_RING_TYPE_VCE1_INDEX);
  7475. if (!r)
  7476. r = radeon_fence_driver_start_ring(rdev,
  7477. TN_RING_TYPE_VCE2_INDEX);
  7478. }
  7479. if (r) {
  7480. dev_err(rdev->dev, "VCE init error (%d).\n", r);
  7481. rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
  7482. rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
  7483. }
  7484. /* Enable IRQ */
  7485. if (!rdev->irq.installed) {
  7486. r = radeon_irq_kms_init(rdev);
  7487. if (r)
  7488. return r;
  7489. }
  7490. r = cik_irq_init(rdev);
  7491. if (r) {
  7492. DRM_ERROR("radeon: IH init failed (%d).\n", r);
  7493. radeon_irq_kms_fini(rdev);
  7494. return r;
  7495. }
  7496. cik_irq_set(rdev);
  7497. if (rdev->family == CHIP_HAWAII) {
  7498. if (rdev->new_fw)
  7499. nop = PACKET3(PACKET3_NOP, 0x3FFF);
  7500. else
  7501. nop = RADEON_CP_PACKET2;
  7502. } else {
  7503. nop = PACKET3(PACKET3_NOP, 0x3FFF);
  7504. }
  7505. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  7506. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
  7507. nop);
  7508. if (r)
  7509. return r;
  7510. /* set up the compute queues */
  7511. /* type-2 packets are deprecated on MEC, use type-3 instead */
  7512. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  7513. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
  7514. nop);
  7515. if (r)
  7516. return r;
  7517. ring->me = 1; /* first MEC */
  7518. ring->pipe = 0; /* first pipe */
  7519. ring->queue = 0; /* first queue */
  7520. ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
  7521. /* type-2 packets are deprecated on MEC, use type-3 instead */
  7522. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  7523. r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
  7524. nop);
  7525. if (r)
  7526. return r;
  7527. /* dGPU only have 1 MEC */
  7528. ring->me = 1; /* first MEC */
  7529. ring->pipe = 0; /* first pipe */
  7530. ring->queue = 1; /* second queue */
  7531. ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
  7532. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  7533. r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
  7534. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  7535. if (r)
  7536. return r;
  7537. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  7538. r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
  7539. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  7540. if (r)
  7541. return r;
  7542. r = cik_cp_resume(rdev);
  7543. if (r)
  7544. return r;
  7545. r = cik_sdma_resume(rdev);
  7546. if (r)
  7547. return r;
  7548. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  7549. if (ring->ring_size) {
  7550. r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
  7551. RADEON_CP_PACKET2);
  7552. if (!r)
  7553. r = uvd_v1_0_init(rdev);
  7554. if (r)
  7555. DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
  7556. }
  7557. r = -ENOENT;
  7558. ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
  7559. if (ring->ring_size)
  7560. r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
  7561. VCE_CMD_NO_OP);
  7562. ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
  7563. if (ring->ring_size)
  7564. r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
  7565. VCE_CMD_NO_OP);
  7566. if (!r)
  7567. r = vce_v1_0_init(rdev);
  7568. else if (r != -ENOENT)
  7569. DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
  7570. r = radeon_ib_pool_init(rdev);
  7571. if (r) {
  7572. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  7573. return r;
  7574. }
  7575. r = radeon_vm_manager_init(rdev);
  7576. if (r) {
  7577. dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
  7578. return r;
  7579. }
  7580. r = radeon_audio_init(rdev);
  7581. if (r)
  7582. return r;
  7583. r = radeon_kfd_resume(rdev);
  7584. if (r)
  7585. return r;
  7586. return 0;
  7587. }
  7588. /**
  7589. * cik_resume - resume the asic to a functional state
  7590. *
  7591. * @rdev: radeon_device pointer
  7592. *
  7593. * Programs the asic to a functional state (CIK).
  7594. * Called at resume.
  7595. * Returns 0 for success, error for failure.
  7596. */
  7597. int cik_resume(struct radeon_device *rdev)
  7598. {
  7599. int r;
  7600. /* post card */
  7601. atom_asic_init(rdev->mode_info.atom_context);
  7602. /* init golden registers */
  7603. cik_init_golden_registers(rdev);
  7604. if (rdev->pm.pm_method == PM_METHOD_DPM)
  7605. radeon_pm_resume(rdev);
  7606. rdev->accel_working = true;
  7607. r = cik_startup(rdev);
  7608. if (r) {
  7609. DRM_ERROR("cik startup failed on resume\n");
  7610. rdev->accel_working = false;
  7611. return r;
  7612. }
  7613. return r;
  7614. }
  7615. /**
  7616. * cik_suspend - suspend the asic
  7617. *
  7618. * @rdev: radeon_device pointer
  7619. *
  7620. * Bring the chip into a state suitable for suspend (CIK).
  7621. * Called at suspend.
  7622. * Returns 0 for success.
  7623. */
  7624. int cik_suspend(struct radeon_device *rdev)
  7625. {
  7626. radeon_kfd_suspend(rdev);
  7627. radeon_pm_suspend(rdev);
  7628. radeon_audio_fini(rdev);
  7629. radeon_vm_manager_fini(rdev);
  7630. cik_cp_enable(rdev, false);
  7631. cik_sdma_enable(rdev, false);
  7632. uvd_v1_0_fini(rdev);
  7633. radeon_uvd_suspend(rdev);
  7634. radeon_vce_suspend(rdev);
  7635. cik_fini_pg(rdev);
  7636. cik_fini_cg(rdev);
  7637. cik_irq_suspend(rdev);
  7638. radeon_wb_disable(rdev);
  7639. cik_pcie_gart_disable(rdev);
  7640. return 0;
  7641. }
  7642. /* Plan is to move initialization in that function and use
  7643. * helper function so that radeon_device_init pretty much
  7644. * do nothing more than calling asic specific function. This
  7645. * should also allow to remove a bunch of callback function
  7646. * like vram_info.
  7647. */
  7648. /**
  7649. * cik_init - asic specific driver and hw init
  7650. *
  7651. * @rdev: radeon_device pointer
  7652. *
  7653. * Setup asic specific driver variables and program the hw
  7654. * to a functional state (CIK).
  7655. * Called at driver startup.
  7656. * Returns 0 for success, errors for failure.
  7657. */
  7658. int cik_init(struct radeon_device *rdev)
  7659. {
  7660. struct radeon_ring *ring;
  7661. int r;
  7662. /* Read BIOS */
  7663. if (!radeon_get_bios(rdev)) {
  7664. if (ASIC_IS_AVIVO(rdev))
  7665. return -EINVAL;
  7666. }
  7667. /* Must be an ATOMBIOS */
  7668. if (!rdev->is_atom_bios) {
  7669. dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
  7670. return -EINVAL;
  7671. }
  7672. r = radeon_atombios_init(rdev);
  7673. if (r)
  7674. return r;
  7675. /* Post card if necessary */
  7676. if (!radeon_card_posted(rdev)) {
  7677. if (!rdev->bios) {
  7678. dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  7679. return -EINVAL;
  7680. }
  7681. DRM_INFO("GPU not posted. posting now...\n");
  7682. atom_asic_init(rdev->mode_info.atom_context);
  7683. }
  7684. /* init golden registers */
  7685. cik_init_golden_registers(rdev);
  7686. /* Initialize scratch registers */
  7687. cik_scratch_init(rdev);
  7688. /* Initialize surface registers */
  7689. radeon_surface_init(rdev);
  7690. /* Initialize clocks */
  7691. radeon_get_clock_info(rdev->ddev);
  7692. /* Fence driver */
  7693. r = radeon_fence_driver_init(rdev);
  7694. if (r)
  7695. return r;
  7696. /* initialize memory controller */
  7697. r = cik_mc_init(rdev);
  7698. if (r)
  7699. return r;
  7700. /* Memory manager */
  7701. r = radeon_bo_init(rdev);
  7702. if (r)
  7703. return r;
  7704. if (rdev->flags & RADEON_IS_IGP) {
  7705. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  7706. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
  7707. r = cik_init_microcode(rdev);
  7708. if (r) {
  7709. DRM_ERROR("Failed to load firmware!\n");
  7710. return r;
  7711. }
  7712. }
  7713. } else {
  7714. if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
  7715. !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
  7716. !rdev->mc_fw) {
  7717. r = cik_init_microcode(rdev);
  7718. if (r) {
  7719. DRM_ERROR("Failed to load firmware!\n");
  7720. return r;
  7721. }
  7722. }
  7723. }
  7724. /* Initialize power management */
  7725. radeon_pm_init(rdev);
  7726. ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  7727. ring->ring_obj = NULL;
  7728. r600_ring_init(rdev, ring, 1024 * 1024);
  7729. ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
  7730. ring->ring_obj = NULL;
  7731. r600_ring_init(rdev, ring, 1024 * 1024);
  7732. r = radeon_doorbell_get(rdev, &ring->doorbell_index);
  7733. if (r)
  7734. return r;
  7735. ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
  7736. ring->ring_obj = NULL;
  7737. r600_ring_init(rdev, ring, 1024 * 1024);
  7738. r = radeon_doorbell_get(rdev, &ring->doorbell_index);
  7739. if (r)
  7740. return r;
  7741. ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
  7742. ring->ring_obj = NULL;
  7743. r600_ring_init(rdev, ring, 256 * 1024);
  7744. ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
  7745. ring->ring_obj = NULL;
  7746. r600_ring_init(rdev, ring, 256 * 1024);
  7747. r = radeon_uvd_init(rdev);
  7748. if (!r) {
  7749. ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
  7750. ring->ring_obj = NULL;
  7751. r600_ring_init(rdev, ring, 4096);
  7752. }
  7753. r = radeon_vce_init(rdev);
  7754. if (!r) {
  7755. ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
  7756. ring->ring_obj = NULL;
  7757. r600_ring_init(rdev, ring, 4096);
  7758. ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
  7759. ring->ring_obj = NULL;
  7760. r600_ring_init(rdev, ring, 4096);
  7761. }
  7762. rdev->ih.ring_obj = NULL;
  7763. r600_ih_ring_init(rdev, 64 * 1024);
  7764. r = r600_pcie_gart_init(rdev);
  7765. if (r)
  7766. return r;
  7767. rdev->accel_working = true;
  7768. r = cik_startup(rdev);
  7769. if (r) {
  7770. dev_err(rdev->dev, "disabling GPU acceleration\n");
  7771. cik_cp_fini(rdev);
  7772. cik_sdma_fini(rdev);
  7773. cik_irq_fini(rdev);
  7774. sumo_rlc_fini(rdev);
  7775. cik_mec_fini(rdev);
  7776. radeon_wb_fini(rdev);
  7777. radeon_ib_pool_fini(rdev);
  7778. radeon_vm_manager_fini(rdev);
  7779. radeon_irq_kms_fini(rdev);
  7780. cik_pcie_gart_fini(rdev);
  7781. rdev->accel_working = false;
  7782. }
  7783. /* Don't start up if the MC ucode is missing.
  7784. * The default clocks and voltages before the MC ucode
  7785. * is loaded are not suffient for advanced operations.
  7786. */
  7787. if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
  7788. DRM_ERROR("radeon: MC ucode required for NI+.\n");
  7789. return -EINVAL;
  7790. }
  7791. return 0;
  7792. }
  7793. /**
  7794. * cik_fini - asic specific driver and hw fini
  7795. *
  7796. * @rdev: radeon_device pointer
  7797. *
  7798. * Tear down the asic specific driver variables and program the hw
  7799. * to an idle state (CIK).
  7800. * Called at driver unload.
  7801. */
  7802. void cik_fini(struct radeon_device *rdev)
  7803. {
  7804. radeon_pm_fini(rdev);
  7805. cik_cp_fini(rdev);
  7806. cik_sdma_fini(rdev);
  7807. cik_fini_pg(rdev);
  7808. cik_fini_cg(rdev);
  7809. cik_irq_fini(rdev);
  7810. sumo_rlc_fini(rdev);
  7811. cik_mec_fini(rdev);
  7812. radeon_wb_fini(rdev);
  7813. radeon_vm_manager_fini(rdev);
  7814. radeon_ib_pool_fini(rdev);
  7815. radeon_irq_kms_fini(rdev);
  7816. uvd_v1_0_fini(rdev);
  7817. radeon_uvd_fini(rdev);
  7818. radeon_vce_fini(rdev);
  7819. cik_pcie_gart_fini(rdev);
  7820. r600_vram_scratch_fini(rdev);
  7821. radeon_gem_fini(rdev);
  7822. radeon_fence_driver_fini(rdev);
  7823. radeon_bo_fini(rdev);
  7824. radeon_atombios_fini(rdev);
  7825. kfree(rdev->bios);
  7826. rdev->bios = NULL;
  7827. }
  7828. void dce8_program_fmt(struct drm_encoder *encoder)
  7829. {
  7830. struct drm_device *dev = encoder->dev;
  7831. struct radeon_device *rdev = dev->dev_private;
  7832. struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
  7833. struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
  7834. struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
  7835. int bpc = 0;
  7836. u32 tmp = 0;
  7837. enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
  7838. if (connector) {
  7839. struct radeon_connector *radeon_connector = to_radeon_connector(connector);
  7840. bpc = radeon_get_monitor_bpc(connector);
  7841. dither = radeon_connector->dither;
  7842. }
  7843. /* LVDS/eDP FMT is set up by atom */
  7844. if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
  7845. return;
  7846. /* not needed for analog */
  7847. if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
  7848. (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
  7849. return;
  7850. if (bpc == 0)
  7851. return;
  7852. switch (bpc) {
  7853. case 6:
  7854. if (dither == RADEON_FMT_DITHER_ENABLE)
  7855. /* XXX sort out optimal dither settings */
  7856. tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
  7857. FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
  7858. else
  7859. tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
  7860. break;
  7861. case 8:
  7862. if (dither == RADEON_FMT_DITHER_ENABLE)
  7863. /* XXX sort out optimal dither settings */
  7864. tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
  7865. FMT_RGB_RANDOM_ENABLE |
  7866. FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
  7867. else
  7868. tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
  7869. break;
  7870. case 10:
  7871. if (dither == RADEON_FMT_DITHER_ENABLE)
  7872. /* XXX sort out optimal dither settings */
  7873. tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
  7874. FMT_RGB_RANDOM_ENABLE |
  7875. FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
  7876. else
  7877. tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
  7878. break;
  7879. default:
  7880. /* not needed */
  7881. break;
  7882. }
  7883. WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
  7884. }
  7885. /* display watermark setup */
  7886. /**
  7887. * dce8_line_buffer_adjust - Set up the line buffer
  7888. *
  7889. * @rdev: radeon_device pointer
  7890. * @radeon_crtc: the selected display controller
  7891. * @mode: the current display mode on the selected display
  7892. * controller
  7893. *
  7894. * Setup up the line buffer allocation for
  7895. * the selected display controller (CIK).
  7896. * Returns the line buffer size in pixels.
  7897. */
  7898. static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
  7899. struct radeon_crtc *radeon_crtc,
  7900. struct drm_display_mode *mode)
  7901. {
  7902. u32 tmp, buffer_alloc, i;
  7903. u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
  7904. /*
  7905. * Line Buffer Setup
  7906. * There are 6 line buffers, one for each display controllers.
  7907. * There are 3 partitions per LB. Select the number of partitions
  7908. * to enable based on the display width. For display widths larger
  7909. * than 4096, you need use to use 2 display controllers and combine
  7910. * them using the stereo blender.
  7911. */
  7912. if (radeon_crtc->base.enabled && mode) {
  7913. if (mode->crtc_hdisplay < 1920) {
  7914. tmp = 1;
  7915. buffer_alloc = 2;
  7916. } else if (mode->crtc_hdisplay < 2560) {
  7917. tmp = 2;
  7918. buffer_alloc = 2;
  7919. } else if (mode->crtc_hdisplay < 4096) {
  7920. tmp = 0;
  7921. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  7922. } else {
  7923. DRM_DEBUG_KMS("Mode too big for LB!\n");
  7924. tmp = 0;
  7925. buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
  7926. }
  7927. } else {
  7928. tmp = 1;
  7929. buffer_alloc = 0;
  7930. }
  7931. WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
  7932. LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
  7933. WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
  7934. DMIF_BUFFERS_ALLOCATED(buffer_alloc));
  7935. for (i = 0; i < rdev->usec_timeout; i++) {
  7936. if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
  7937. DMIF_BUFFERS_ALLOCATED_COMPLETED)
  7938. break;
  7939. udelay(1);
  7940. }
  7941. if (radeon_crtc->base.enabled && mode) {
  7942. switch (tmp) {
  7943. case 0:
  7944. default:
  7945. return 4096 * 2;
  7946. case 1:
  7947. return 1920 * 2;
  7948. case 2:
  7949. return 2560 * 2;
  7950. }
  7951. }
  7952. /* controller not enabled, so no lb used */
  7953. return 0;
  7954. }
  7955. /**
  7956. * cik_get_number_of_dram_channels - get the number of dram channels
  7957. *
  7958. * @rdev: radeon_device pointer
  7959. *
  7960. * Look up the number of video ram channels (CIK).
  7961. * Used for display watermark bandwidth calculations
  7962. * Returns the number of dram channels
  7963. */
  7964. static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
  7965. {
  7966. u32 tmp = RREG32(MC_SHARED_CHMAP);
  7967. switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
  7968. case 0:
  7969. default:
  7970. return 1;
  7971. case 1:
  7972. return 2;
  7973. case 2:
  7974. return 4;
  7975. case 3:
  7976. return 8;
  7977. case 4:
  7978. return 3;
  7979. case 5:
  7980. return 6;
  7981. case 6:
  7982. return 10;
  7983. case 7:
  7984. return 12;
  7985. case 8:
  7986. return 16;
  7987. }
  7988. }
  7989. struct dce8_wm_params {
  7990. u32 dram_channels; /* number of dram channels */
  7991. u32 yclk; /* bandwidth per dram data pin in kHz */
  7992. u32 sclk; /* engine clock in kHz */
  7993. u32 disp_clk; /* display clock in kHz */
  7994. u32 src_width; /* viewport width */
  7995. u32 active_time; /* active display time in ns */
  7996. u32 blank_time; /* blank time in ns */
  7997. bool interlaced; /* mode is interlaced */
  7998. fixed20_12 vsc; /* vertical scale ratio */
  7999. u32 num_heads; /* number of active crtcs */
  8000. u32 bytes_per_pixel; /* bytes per pixel display + overlay */
  8001. u32 lb_size; /* line buffer allocated to pipe */
  8002. u32 vtaps; /* vertical scaler taps */
  8003. };
  8004. /**
  8005. * dce8_dram_bandwidth - get the dram bandwidth
  8006. *
  8007. * @wm: watermark calculation data
  8008. *
  8009. * Calculate the raw dram bandwidth (CIK).
  8010. * Used for display watermark bandwidth calculations
  8011. * Returns the dram bandwidth in MBytes/s
  8012. */
  8013. static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
  8014. {
  8015. /* Calculate raw DRAM Bandwidth */
  8016. fixed20_12 dram_efficiency; /* 0.7 */
  8017. fixed20_12 yclk, dram_channels, bandwidth;
  8018. fixed20_12 a;
  8019. a.full = dfixed_const(1000);
  8020. yclk.full = dfixed_const(wm->yclk);
  8021. yclk.full = dfixed_div(yclk, a);
  8022. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  8023. a.full = dfixed_const(10);
  8024. dram_efficiency.full = dfixed_const(7);
  8025. dram_efficiency.full = dfixed_div(dram_efficiency, a);
  8026. bandwidth.full = dfixed_mul(dram_channels, yclk);
  8027. bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
  8028. return dfixed_trunc(bandwidth);
  8029. }
  8030. /**
  8031. * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
  8032. *
  8033. * @wm: watermark calculation data
  8034. *
  8035. * Calculate the dram bandwidth used for display (CIK).
  8036. * Used for display watermark bandwidth calculations
  8037. * Returns the dram bandwidth for display in MBytes/s
  8038. */
  8039. static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  8040. {
  8041. /* Calculate DRAM Bandwidth and the part allocated to display. */
  8042. fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
  8043. fixed20_12 yclk, dram_channels, bandwidth;
  8044. fixed20_12 a;
  8045. a.full = dfixed_const(1000);
  8046. yclk.full = dfixed_const(wm->yclk);
  8047. yclk.full = dfixed_div(yclk, a);
  8048. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  8049. a.full = dfixed_const(10);
  8050. disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
  8051. disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
  8052. bandwidth.full = dfixed_mul(dram_channels, yclk);
  8053. bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
  8054. return dfixed_trunc(bandwidth);
  8055. }
  8056. /**
  8057. * dce8_data_return_bandwidth - get the data return bandwidth
  8058. *
  8059. * @wm: watermark calculation data
  8060. *
  8061. * Calculate the data return bandwidth used for display (CIK).
  8062. * Used for display watermark bandwidth calculations
  8063. * Returns the data return bandwidth in MBytes/s
  8064. */
  8065. static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
  8066. {
  8067. /* Calculate the display Data return Bandwidth */
  8068. fixed20_12 return_efficiency; /* 0.8 */
  8069. fixed20_12 sclk, bandwidth;
  8070. fixed20_12 a;
  8071. a.full = dfixed_const(1000);
  8072. sclk.full = dfixed_const(wm->sclk);
  8073. sclk.full = dfixed_div(sclk, a);
  8074. a.full = dfixed_const(10);
  8075. return_efficiency.full = dfixed_const(8);
  8076. return_efficiency.full = dfixed_div(return_efficiency, a);
  8077. a.full = dfixed_const(32);
  8078. bandwidth.full = dfixed_mul(a, sclk);
  8079. bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
  8080. return dfixed_trunc(bandwidth);
  8081. }
  8082. /**
  8083. * dce8_dmif_request_bandwidth - get the dmif bandwidth
  8084. *
  8085. * @wm: watermark calculation data
  8086. *
  8087. * Calculate the dmif bandwidth used for display (CIK).
  8088. * Used for display watermark bandwidth calculations
  8089. * Returns the dmif bandwidth in MBytes/s
  8090. */
  8091. static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
  8092. {
  8093. /* Calculate the DMIF Request Bandwidth */
  8094. fixed20_12 disp_clk_request_efficiency; /* 0.8 */
  8095. fixed20_12 disp_clk, bandwidth;
  8096. fixed20_12 a, b;
  8097. a.full = dfixed_const(1000);
  8098. disp_clk.full = dfixed_const(wm->disp_clk);
  8099. disp_clk.full = dfixed_div(disp_clk, a);
  8100. a.full = dfixed_const(32);
  8101. b.full = dfixed_mul(a, disp_clk);
  8102. a.full = dfixed_const(10);
  8103. disp_clk_request_efficiency.full = dfixed_const(8);
  8104. disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
  8105. bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
  8106. return dfixed_trunc(bandwidth);
  8107. }
  8108. /**
  8109. * dce8_available_bandwidth - get the min available bandwidth
  8110. *
  8111. * @wm: watermark calculation data
  8112. *
  8113. * Calculate the min available bandwidth used for display (CIK).
  8114. * Used for display watermark bandwidth calculations
  8115. * Returns the min available bandwidth in MBytes/s
  8116. */
  8117. static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
  8118. {
  8119. /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
  8120. u32 dram_bandwidth = dce8_dram_bandwidth(wm);
  8121. u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
  8122. u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
  8123. return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
  8124. }
  8125. /**
  8126. * dce8_average_bandwidth - get the average available bandwidth
  8127. *
  8128. * @wm: watermark calculation data
  8129. *
  8130. * Calculate the average available bandwidth used for display (CIK).
  8131. * Used for display watermark bandwidth calculations
  8132. * Returns the average available bandwidth in MBytes/s
  8133. */
  8134. static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
  8135. {
  8136. /* Calculate the display mode Average Bandwidth
  8137. * DisplayMode should contain the source and destination dimensions,
  8138. * timing, etc.
  8139. */
  8140. fixed20_12 bpp;
  8141. fixed20_12 line_time;
  8142. fixed20_12 src_width;
  8143. fixed20_12 bandwidth;
  8144. fixed20_12 a;
  8145. a.full = dfixed_const(1000);
  8146. line_time.full = dfixed_const(wm->active_time + wm->blank_time);
  8147. line_time.full = dfixed_div(line_time, a);
  8148. bpp.full = dfixed_const(wm->bytes_per_pixel);
  8149. src_width.full = dfixed_const(wm->src_width);
  8150. bandwidth.full = dfixed_mul(src_width, bpp);
  8151. bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
  8152. bandwidth.full = dfixed_div(bandwidth, line_time);
  8153. return dfixed_trunc(bandwidth);
  8154. }
  8155. /**
  8156. * dce8_latency_watermark - get the latency watermark
  8157. *
  8158. * @wm: watermark calculation data
  8159. *
  8160. * Calculate the latency watermark (CIK).
  8161. * Used for display watermark bandwidth calculations
  8162. * Returns the latency watermark in ns
  8163. */
  8164. static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
  8165. {
  8166. /* First calculate the latency in ns */
  8167. u32 mc_latency = 2000; /* 2000 ns. */
  8168. u32 available_bandwidth = dce8_available_bandwidth(wm);
  8169. u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
  8170. u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
  8171. u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
  8172. u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
  8173. (wm->num_heads * cursor_line_pair_return_time);
  8174. u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
  8175. u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
  8176. u32 tmp, dmif_size = 12288;
  8177. fixed20_12 a, b, c;
  8178. if (wm->num_heads == 0)
  8179. return 0;
  8180. a.full = dfixed_const(2);
  8181. b.full = dfixed_const(1);
  8182. if ((wm->vsc.full > a.full) ||
  8183. ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
  8184. (wm->vtaps >= 5) ||
  8185. ((wm->vsc.full >= a.full) && wm->interlaced))
  8186. max_src_lines_per_dst_line = 4;
  8187. else
  8188. max_src_lines_per_dst_line = 2;
  8189. a.full = dfixed_const(available_bandwidth);
  8190. b.full = dfixed_const(wm->num_heads);
  8191. a.full = dfixed_div(a, b);
  8192. b.full = dfixed_const(mc_latency + 512);
  8193. c.full = dfixed_const(wm->disp_clk);
  8194. b.full = dfixed_div(b, c);
  8195. c.full = dfixed_const(dmif_size);
  8196. b.full = dfixed_div(c, b);
  8197. tmp = min(dfixed_trunc(a), dfixed_trunc(b));
  8198. b.full = dfixed_const(1000);
  8199. c.full = dfixed_const(wm->disp_clk);
  8200. b.full = dfixed_div(c, b);
  8201. c.full = dfixed_const(wm->bytes_per_pixel);
  8202. b.full = dfixed_mul(b, c);
  8203. lb_fill_bw = min(tmp, dfixed_trunc(b));
  8204. a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
  8205. b.full = dfixed_const(1000);
  8206. c.full = dfixed_const(lb_fill_bw);
  8207. b.full = dfixed_div(c, b);
  8208. a.full = dfixed_div(a, b);
  8209. line_fill_time = dfixed_trunc(a);
  8210. if (line_fill_time < wm->active_time)
  8211. return latency;
  8212. else
  8213. return latency + (line_fill_time - wm->active_time);
  8214. }
  8215. /**
  8216. * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
  8217. * average and available dram bandwidth
  8218. *
  8219. * @wm: watermark calculation data
  8220. *
  8221. * Check if the display average bandwidth fits in the display
  8222. * dram bandwidth (CIK).
  8223. * Used for display watermark bandwidth calculations
  8224. * Returns true if the display fits, false if not.
  8225. */
  8226. static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
  8227. {
  8228. if (dce8_average_bandwidth(wm) <=
  8229. (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
  8230. return true;
  8231. else
  8232. return false;
  8233. }
  8234. /**
  8235. * dce8_average_bandwidth_vs_available_bandwidth - check
  8236. * average and available bandwidth
  8237. *
  8238. * @wm: watermark calculation data
  8239. *
  8240. * Check if the display average bandwidth fits in the display
  8241. * available bandwidth (CIK).
  8242. * Used for display watermark bandwidth calculations
  8243. * Returns true if the display fits, false if not.
  8244. */
  8245. static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
  8246. {
  8247. if (dce8_average_bandwidth(wm) <=
  8248. (dce8_available_bandwidth(wm) / wm->num_heads))
  8249. return true;
  8250. else
  8251. return false;
  8252. }
  8253. /**
  8254. * dce8_check_latency_hiding - check latency hiding
  8255. *
  8256. * @wm: watermark calculation data
  8257. *
  8258. * Check latency hiding (CIK).
  8259. * Used for display watermark bandwidth calculations
  8260. * Returns true if the display fits, false if not.
  8261. */
  8262. static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
  8263. {
  8264. u32 lb_partitions = wm->lb_size / wm->src_width;
  8265. u32 line_time = wm->active_time + wm->blank_time;
  8266. u32 latency_tolerant_lines;
  8267. u32 latency_hiding;
  8268. fixed20_12 a;
  8269. a.full = dfixed_const(1);
  8270. if (wm->vsc.full > a.full)
  8271. latency_tolerant_lines = 1;
  8272. else {
  8273. if (lb_partitions <= (wm->vtaps + 1))
  8274. latency_tolerant_lines = 1;
  8275. else
  8276. latency_tolerant_lines = 2;
  8277. }
  8278. latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
  8279. if (dce8_latency_watermark(wm) <= latency_hiding)
  8280. return true;
  8281. else
  8282. return false;
  8283. }
  8284. /**
  8285. * dce8_program_watermarks - program display watermarks
  8286. *
  8287. * @rdev: radeon_device pointer
  8288. * @radeon_crtc: the selected display controller
  8289. * @lb_size: line buffer size
  8290. * @num_heads: number of display controllers in use
  8291. *
  8292. * Calculate and program the display watermarks for the
  8293. * selected display controller (CIK).
  8294. */
  8295. static void dce8_program_watermarks(struct radeon_device *rdev,
  8296. struct radeon_crtc *radeon_crtc,
  8297. u32 lb_size, u32 num_heads)
  8298. {
  8299. struct drm_display_mode *mode = &radeon_crtc->base.mode;
  8300. struct dce8_wm_params wm_low, wm_high;
  8301. u32 pixel_period;
  8302. u32 line_time = 0;
  8303. u32 latency_watermark_a = 0, latency_watermark_b = 0;
  8304. u32 tmp, wm_mask;
  8305. if (radeon_crtc->base.enabled && num_heads && mode) {
  8306. pixel_period = 1000000 / (u32)mode->clock;
  8307. line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
  8308. /* watermark for high clocks */
  8309. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  8310. rdev->pm.dpm_enabled) {
  8311. wm_high.yclk =
  8312. radeon_dpm_get_mclk(rdev, false) * 10;
  8313. wm_high.sclk =
  8314. radeon_dpm_get_sclk(rdev, false) * 10;
  8315. } else {
  8316. wm_high.yclk = rdev->pm.current_mclk * 10;
  8317. wm_high.sclk = rdev->pm.current_sclk * 10;
  8318. }
  8319. wm_high.disp_clk = mode->clock;
  8320. wm_high.src_width = mode->crtc_hdisplay;
  8321. wm_high.active_time = mode->crtc_hdisplay * pixel_period;
  8322. wm_high.blank_time = line_time - wm_high.active_time;
  8323. wm_high.interlaced = false;
  8324. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  8325. wm_high.interlaced = true;
  8326. wm_high.vsc = radeon_crtc->vsc;
  8327. wm_high.vtaps = 1;
  8328. if (radeon_crtc->rmx_type != RMX_OFF)
  8329. wm_high.vtaps = 2;
  8330. wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
  8331. wm_high.lb_size = lb_size;
  8332. wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
  8333. wm_high.num_heads = num_heads;
  8334. /* set for high clocks */
  8335. latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
  8336. /* possibly force display priority to high */
  8337. /* should really do this at mode validation time... */
  8338. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
  8339. !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
  8340. !dce8_check_latency_hiding(&wm_high) ||
  8341. (rdev->disp_priority == 2)) {
  8342. DRM_DEBUG_KMS("force priority to high\n");
  8343. }
  8344. /* watermark for low clocks */
  8345. if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
  8346. rdev->pm.dpm_enabled) {
  8347. wm_low.yclk =
  8348. radeon_dpm_get_mclk(rdev, true) * 10;
  8349. wm_low.sclk =
  8350. radeon_dpm_get_sclk(rdev, true) * 10;
  8351. } else {
  8352. wm_low.yclk = rdev->pm.current_mclk * 10;
  8353. wm_low.sclk = rdev->pm.current_sclk * 10;
  8354. }
  8355. wm_low.disp_clk = mode->clock;
  8356. wm_low.src_width = mode->crtc_hdisplay;
  8357. wm_low.active_time = mode->crtc_hdisplay * pixel_period;
  8358. wm_low.blank_time = line_time - wm_low.active_time;
  8359. wm_low.interlaced = false;
  8360. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  8361. wm_low.interlaced = true;
  8362. wm_low.vsc = radeon_crtc->vsc;
  8363. wm_low.vtaps = 1;
  8364. if (radeon_crtc->rmx_type != RMX_OFF)
  8365. wm_low.vtaps = 2;
  8366. wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
  8367. wm_low.lb_size = lb_size;
  8368. wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
  8369. wm_low.num_heads = num_heads;
  8370. /* set for low clocks */
  8371. latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
  8372. /* possibly force display priority to high */
  8373. /* should really do this at mode validation time... */
  8374. if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
  8375. !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
  8376. !dce8_check_latency_hiding(&wm_low) ||
  8377. (rdev->disp_priority == 2)) {
  8378. DRM_DEBUG_KMS("force priority to high\n");
  8379. }
  8380. /* Save number of lines the linebuffer leads before the scanout */
  8381. radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
  8382. }
  8383. /* select wm A */
  8384. wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  8385. tmp = wm_mask;
  8386. tmp &= ~LATENCY_WATERMARK_MASK(3);
  8387. tmp |= LATENCY_WATERMARK_MASK(1);
  8388. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  8389. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  8390. (LATENCY_LOW_WATERMARK(latency_watermark_a) |
  8391. LATENCY_HIGH_WATERMARK(line_time)));
  8392. /* select wm B */
  8393. tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
  8394. tmp &= ~LATENCY_WATERMARK_MASK(3);
  8395. tmp |= LATENCY_WATERMARK_MASK(2);
  8396. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
  8397. WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
  8398. (LATENCY_LOW_WATERMARK(latency_watermark_b) |
  8399. LATENCY_HIGH_WATERMARK(line_time)));
  8400. /* restore original selection */
  8401. WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
  8402. /* save values for DPM */
  8403. radeon_crtc->line_time = line_time;
  8404. radeon_crtc->wm_high = latency_watermark_a;
  8405. radeon_crtc->wm_low = latency_watermark_b;
  8406. }
  8407. /**
  8408. * dce8_bandwidth_update - program display watermarks
  8409. *
  8410. * @rdev: radeon_device pointer
  8411. *
  8412. * Calculate and program the display watermarks and line
  8413. * buffer allocation (CIK).
  8414. */
  8415. void dce8_bandwidth_update(struct radeon_device *rdev)
  8416. {
  8417. struct drm_display_mode *mode = NULL;
  8418. u32 num_heads = 0, lb_size;
  8419. int i;
  8420. if (!rdev->mode_info.mode_config_initialized)
  8421. return;
  8422. radeon_update_display_priority(rdev);
  8423. for (i = 0; i < rdev->num_crtc; i++) {
  8424. if (rdev->mode_info.crtcs[i]->base.enabled)
  8425. num_heads++;
  8426. }
  8427. for (i = 0; i < rdev->num_crtc; i++) {
  8428. mode = &rdev->mode_info.crtcs[i]->base.mode;
  8429. lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
  8430. dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
  8431. }
  8432. }
  8433. /**
  8434. * cik_get_gpu_clock_counter - return GPU clock counter snapshot
  8435. *
  8436. * @rdev: radeon_device pointer
  8437. *
  8438. * Fetches a GPU clock counter snapshot (SI).
  8439. * Returns the 64 bit clock counter snapshot.
  8440. */
  8441. uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
  8442. {
  8443. uint64_t clock;
  8444. mutex_lock(&rdev->gpu_clock_mutex);
  8445. WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
  8446. clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
  8447. ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
  8448. mutex_unlock(&rdev->gpu_clock_mutex);
  8449. return clock;
  8450. }
  8451. static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
  8452. u32 cntl_reg, u32 status_reg)
  8453. {
  8454. int r, i;
  8455. struct atom_clock_dividers dividers;
  8456. uint32_t tmp;
  8457. r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  8458. clock, false, &dividers);
  8459. if (r)
  8460. return r;
  8461. tmp = RREG32_SMC(cntl_reg);
  8462. tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
  8463. tmp |= dividers.post_divider;
  8464. WREG32_SMC(cntl_reg, tmp);
  8465. for (i = 0; i < 100; i++) {
  8466. if (RREG32_SMC(status_reg) & DCLK_STATUS)
  8467. break;
  8468. mdelay(10);
  8469. }
  8470. if (i == 100)
  8471. return -ETIMEDOUT;
  8472. return 0;
  8473. }
  8474. int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
  8475. {
  8476. int r = 0;
  8477. r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
  8478. if (r)
  8479. return r;
  8480. r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
  8481. return r;
  8482. }
  8483. int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
  8484. {
  8485. int r, i;
  8486. struct atom_clock_dividers dividers;
  8487. u32 tmp;
  8488. r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
  8489. ecclk, false, &dividers);
  8490. if (r)
  8491. return r;
  8492. for (i = 0; i < 100; i++) {
  8493. if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
  8494. break;
  8495. mdelay(10);
  8496. }
  8497. if (i == 100)
  8498. return -ETIMEDOUT;
  8499. tmp = RREG32_SMC(CG_ECLK_CNTL);
  8500. tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK);
  8501. tmp |= dividers.post_divider;
  8502. WREG32_SMC(CG_ECLK_CNTL, tmp);
  8503. for (i = 0; i < 100; i++) {
  8504. if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
  8505. break;
  8506. mdelay(10);
  8507. }
  8508. if (i == 100)
  8509. return -ETIMEDOUT;
  8510. return 0;
  8511. }
  8512. static void cik_pcie_gen3_enable(struct radeon_device *rdev)
  8513. {
  8514. struct pci_dev *root = rdev->pdev->bus->self;
  8515. int bridge_pos, gpu_pos;
  8516. u32 speed_cntl, mask, current_data_rate;
  8517. int ret, i;
  8518. u16 tmp16;
  8519. if (pci_is_root_bus(rdev->pdev->bus))
  8520. return;
  8521. if (radeon_pcie_gen2 == 0)
  8522. return;
  8523. if (rdev->flags & RADEON_IS_IGP)
  8524. return;
  8525. if (!(rdev->flags & RADEON_IS_PCIE))
  8526. return;
  8527. ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
  8528. if (ret != 0)
  8529. return;
  8530. if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
  8531. return;
  8532. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  8533. current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
  8534. LC_CURRENT_DATA_RATE_SHIFT;
  8535. if (mask & DRM_PCIE_SPEED_80) {
  8536. if (current_data_rate == 2) {
  8537. DRM_INFO("PCIE gen 3 link speeds already enabled\n");
  8538. return;
  8539. }
  8540. DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
  8541. } else if (mask & DRM_PCIE_SPEED_50) {
  8542. if (current_data_rate == 1) {
  8543. DRM_INFO("PCIE gen 2 link speeds already enabled\n");
  8544. return;
  8545. }
  8546. DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
  8547. }
  8548. bridge_pos = pci_pcie_cap(root);
  8549. if (!bridge_pos)
  8550. return;
  8551. gpu_pos = pci_pcie_cap(rdev->pdev);
  8552. if (!gpu_pos)
  8553. return;
  8554. if (mask & DRM_PCIE_SPEED_80) {
  8555. /* re-try equalization if gen3 is not already enabled */
  8556. if (current_data_rate != 2) {
  8557. u16 bridge_cfg, gpu_cfg;
  8558. u16 bridge_cfg2, gpu_cfg2;
  8559. u32 max_lw, current_lw, tmp;
  8560. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  8561. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  8562. tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
  8563. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  8564. tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
  8565. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  8566. tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  8567. max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
  8568. current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
  8569. if (current_lw < max_lw) {
  8570. tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  8571. if (tmp & LC_RENEGOTIATION_SUPPORT) {
  8572. tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
  8573. tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
  8574. tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
  8575. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
  8576. }
  8577. }
  8578. for (i = 0; i < 10; i++) {
  8579. /* check status */
  8580. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
  8581. if (tmp16 & PCI_EXP_DEVSTA_TRPND)
  8582. break;
  8583. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
  8584. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
  8585. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
  8586. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
  8587. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  8588. tmp |= LC_SET_QUIESCE;
  8589. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  8590. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  8591. tmp |= LC_REDO_EQ;
  8592. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  8593. mdelay(100);
  8594. /* linkctl */
  8595. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
  8596. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  8597. tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
  8598. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
  8599. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
  8600. tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
  8601. tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
  8602. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
  8603. /* linkctl2 */
  8604. pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
  8605. tmp16 &= ~((1 << 4) | (7 << 9));
  8606. tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
  8607. pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
  8608. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  8609. tmp16 &= ~((1 << 4) | (7 << 9));
  8610. tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
  8611. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  8612. tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
  8613. tmp &= ~LC_SET_QUIESCE;
  8614. WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
  8615. }
  8616. }
  8617. }
  8618. /* set the link speed */
  8619. speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
  8620. speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
  8621. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  8622. pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
  8623. tmp16 &= ~0xf;
  8624. if (mask & DRM_PCIE_SPEED_80)
  8625. tmp16 |= 3; /* gen3 */
  8626. else if (mask & DRM_PCIE_SPEED_50)
  8627. tmp16 |= 2; /* gen2 */
  8628. else
  8629. tmp16 |= 1; /* gen1 */
  8630. pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
  8631. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  8632. speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
  8633. WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
  8634. for (i = 0; i < rdev->usec_timeout; i++) {
  8635. speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
  8636. if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
  8637. break;
  8638. udelay(1);
  8639. }
  8640. }
  8641. static void cik_program_aspm(struct radeon_device *rdev)
  8642. {
  8643. u32 data, orig;
  8644. bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
  8645. bool disable_clkreq = false;
  8646. if (radeon_aspm == 0)
  8647. return;
  8648. /* XXX double check IGPs */
  8649. if (rdev->flags & RADEON_IS_IGP)
  8650. return;
  8651. if (!(rdev->flags & RADEON_IS_PCIE))
  8652. return;
  8653. orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  8654. data &= ~LC_XMIT_N_FTS_MASK;
  8655. data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
  8656. if (orig != data)
  8657. WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
  8658. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
  8659. data |= LC_GO_TO_RECOVERY;
  8660. if (orig != data)
  8661. WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
  8662. orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
  8663. data |= P_IGNORE_EDB_ERR;
  8664. if (orig != data)
  8665. WREG32_PCIE_PORT(PCIE_P_CNTL, data);
  8666. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  8667. data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
  8668. data |= LC_PMI_TO_L1_DIS;
  8669. if (!disable_l0s)
  8670. data |= LC_L0S_INACTIVITY(7);
  8671. if (!disable_l1) {
  8672. data |= LC_L1_INACTIVITY(7);
  8673. data &= ~LC_PMI_TO_L1_DIS;
  8674. if (orig != data)
  8675. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  8676. if (!disable_plloff_in_l1) {
  8677. bool clk_req_support;
  8678. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
  8679. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  8680. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  8681. if (orig != data)
  8682. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
  8683. orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
  8684. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  8685. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  8686. if (orig != data)
  8687. WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
  8688. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
  8689. data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
  8690. data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
  8691. if (orig != data)
  8692. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
  8693. orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
  8694. data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
  8695. data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
  8696. if (orig != data)
  8697. WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
  8698. orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
  8699. data &= ~LC_DYN_LANES_PWR_STATE_MASK;
  8700. data |= LC_DYN_LANES_PWR_STATE(3);
  8701. if (orig != data)
  8702. WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
  8703. if (!disable_clkreq &&
  8704. !pci_is_root_bus(rdev->pdev->bus)) {
  8705. struct pci_dev *root = rdev->pdev->bus->self;
  8706. u32 lnkcap;
  8707. clk_req_support = false;
  8708. pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  8709. if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
  8710. clk_req_support = true;
  8711. } else {
  8712. clk_req_support = false;
  8713. }
  8714. if (clk_req_support) {
  8715. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
  8716. data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
  8717. if (orig != data)
  8718. WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
  8719. orig = data = RREG32_SMC(THM_CLK_CNTL);
  8720. data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
  8721. data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
  8722. if (orig != data)
  8723. WREG32_SMC(THM_CLK_CNTL, data);
  8724. orig = data = RREG32_SMC(MISC_CLK_CTRL);
  8725. data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
  8726. data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
  8727. if (orig != data)
  8728. WREG32_SMC(MISC_CLK_CTRL, data);
  8729. orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
  8730. data &= ~BCLK_AS_XCLK;
  8731. if (orig != data)
  8732. WREG32_SMC(CG_CLKPIN_CNTL, data);
  8733. orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
  8734. data &= ~FORCE_BIF_REFCLK_EN;
  8735. if (orig != data)
  8736. WREG32_SMC(CG_CLKPIN_CNTL_2, data);
  8737. orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
  8738. data &= ~MPLL_CLKOUT_SEL_MASK;
  8739. data |= MPLL_CLKOUT_SEL(4);
  8740. if (orig != data)
  8741. WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
  8742. }
  8743. }
  8744. } else {
  8745. if (orig != data)
  8746. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  8747. }
  8748. orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
  8749. data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
  8750. if (orig != data)
  8751. WREG32_PCIE_PORT(PCIE_CNTL2, data);
  8752. if (!disable_l0s) {
  8753. data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
  8754. if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
  8755. data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
  8756. if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
  8757. orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
  8758. data &= ~LC_L0S_INACTIVITY_MASK;
  8759. if (orig != data)
  8760. WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
  8761. }
  8762. }
  8763. }
  8764. }