intel_pm.c 184 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612
  1. /*
  2. * Copyright © 2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. *
  26. */
  27. #include <linux/cpufreq.h>
  28. #include "i915_drv.h"
  29. #include "intel_drv.h"
  30. #include "../../../platform/x86/intel_ips.h"
  31. #include <linux/module.h>
  32. #include <linux/vgaarb.h>
  33. #include <drm/i915_powerwell.h>
  34. #include <linux/pm_runtime.h>
  35. /**
  36. * RC6 is a special power stage which allows the GPU to enter an very
  37. * low-voltage mode when idle, using down to 0V while at this stage. This
  38. * stage is entered automatically when the GPU is idle when RC6 support is
  39. * enabled, and as soon as new workload arises GPU wakes up automatically as well.
  40. *
  41. * There are different RC6 modes available in Intel GPU, which differentiate
  42. * among each other with the latency required to enter and leave RC6 and
  43. * voltage consumed by the GPU in different states.
  44. *
  45. * The combination of the following flags define which states GPU is allowed
  46. * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
  47. * RC6pp is deepest RC6. Their support by hardware varies according to the
  48. * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
  49. * which brings the most power savings; deeper states save more power, but
  50. * require higher latency to switch to and wake up.
  51. */
  52. #define INTEL_RC6_ENABLE (1<<0)
  53. #define INTEL_RC6p_ENABLE (1<<1)
  54. #define INTEL_RC6pp_ENABLE (1<<2)
  55. /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  56. * framebuffer contents in-memory, aiming at reducing the required bandwidth
  57. * during in-memory transfers and, therefore, reduce the power packet.
  58. *
  59. * The benefits of FBC are mostly visible with solid backgrounds and
  60. * variation-less patterns.
  61. *
  62. * FBC-related functionality can be enabled by the means of the
  63. * i915.i915_enable_fbc parameter
  64. */
  65. static void i8xx_disable_fbc(struct drm_device *dev)
  66. {
  67. struct drm_i915_private *dev_priv = dev->dev_private;
  68. u32 fbc_ctl;
  69. /* Disable compression */
  70. fbc_ctl = I915_READ(FBC_CONTROL);
  71. if ((fbc_ctl & FBC_CTL_EN) == 0)
  72. return;
  73. fbc_ctl &= ~FBC_CTL_EN;
  74. I915_WRITE(FBC_CONTROL, fbc_ctl);
  75. /* Wait for compressing bit to clear */
  76. if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
  77. DRM_DEBUG_KMS("FBC idle timed out\n");
  78. return;
  79. }
  80. DRM_DEBUG_KMS("disabled FBC\n");
  81. }
  82. static void i8xx_enable_fbc(struct drm_crtc *crtc)
  83. {
  84. struct drm_device *dev = crtc->dev;
  85. struct drm_i915_private *dev_priv = dev->dev_private;
  86. struct drm_framebuffer *fb = crtc->primary->fb;
  87. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  88. struct drm_i915_gem_object *obj = intel_fb->obj;
  89. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  90. int cfb_pitch;
  91. int i;
  92. u32 fbc_ctl;
  93. cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
  94. if (fb->pitches[0] < cfb_pitch)
  95. cfb_pitch = fb->pitches[0];
  96. /* FBC_CTL wants 32B or 64B units */
  97. if (IS_GEN2(dev))
  98. cfb_pitch = (cfb_pitch / 32) - 1;
  99. else
  100. cfb_pitch = (cfb_pitch / 64) - 1;
  101. /* Clear old tags */
  102. for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  103. I915_WRITE(FBC_TAG + (i * 4), 0);
  104. if (IS_GEN4(dev)) {
  105. u32 fbc_ctl2;
  106. /* Set it up... */
  107. fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
  108. fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
  109. I915_WRITE(FBC_CONTROL2, fbc_ctl2);
  110. I915_WRITE(FBC_FENCE_OFF, crtc->y);
  111. }
  112. /* enable it... */
  113. fbc_ctl = I915_READ(FBC_CONTROL);
  114. fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
  115. fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
  116. if (IS_I945GM(dev))
  117. fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
  118. fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
  119. fbc_ctl |= obj->fence_reg;
  120. I915_WRITE(FBC_CONTROL, fbc_ctl);
  121. DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
  122. cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
  123. }
  124. static bool i8xx_fbc_enabled(struct drm_device *dev)
  125. {
  126. struct drm_i915_private *dev_priv = dev->dev_private;
  127. return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
  128. }
  129. static void g4x_enable_fbc(struct drm_crtc *crtc)
  130. {
  131. struct drm_device *dev = crtc->dev;
  132. struct drm_i915_private *dev_priv = dev->dev_private;
  133. struct drm_framebuffer *fb = crtc->primary->fb;
  134. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  135. struct drm_i915_gem_object *obj = intel_fb->obj;
  136. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  137. u32 dpfc_ctl;
  138. dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
  139. if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
  140. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  141. else
  142. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  143. dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
  144. I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
  145. /* enable it... */
  146. I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  147. DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  148. }
  149. static void g4x_disable_fbc(struct drm_device *dev)
  150. {
  151. struct drm_i915_private *dev_priv = dev->dev_private;
  152. u32 dpfc_ctl;
  153. /* Disable compression */
  154. dpfc_ctl = I915_READ(DPFC_CONTROL);
  155. if (dpfc_ctl & DPFC_CTL_EN) {
  156. dpfc_ctl &= ~DPFC_CTL_EN;
  157. I915_WRITE(DPFC_CONTROL, dpfc_ctl);
  158. DRM_DEBUG_KMS("disabled FBC\n");
  159. }
  160. }
  161. static bool g4x_fbc_enabled(struct drm_device *dev)
  162. {
  163. struct drm_i915_private *dev_priv = dev->dev_private;
  164. return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  165. }
  166. static void sandybridge_blit_fbc_update(struct drm_device *dev)
  167. {
  168. struct drm_i915_private *dev_priv = dev->dev_private;
  169. u32 blt_ecoskpd;
  170. /* Make sure blitter notifies FBC of writes */
  171. /* Blitter is part of Media powerwell on VLV. No impact of
  172. * his param in other platforms for now */
  173. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
  174. blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
  175. blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
  176. GEN6_BLITTER_LOCK_SHIFT;
  177. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  178. blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
  179. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  180. blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
  181. GEN6_BLITTER_LOCK_SHIFT);
  182. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  183. POSTING_READ(GEN6_BLITTER_ECOSKPD);
  184. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
  185. }
  186. static void ironlake_enable_fbc(struct drm_crtc *crtc)
  187. {
  188. struct drm_device *dev = crtc->dev;
  189. struct drm_i915_private *dev_priv = dev->dev_private;
  190. struct drm_framebuffer *fb = crtc->primary->fb;
  191. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  192. struct drm_i915_gem_object *obj = intel_fb->obj;
  193. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  194. u32 dpfc_ctl;
  195. dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
  196. if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
  197. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  198. else
  199. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  200. dpfc_ctl |= DPFC_CTL_FENCE_EN;
  201. if (IS_GEN5(dev))
  202. dpfc_ctl |= obj->fence_reg;
  203. I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
  204. I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
  205. /* enable it... */
  206. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  207. if (IS_GEN6(dev)) {
  208. I915_WRITE(SNB_DPFC_CTL_SA,
  209. SNB_CPU_FENCE_ENABLE | obj->fence_reg);
  210. I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
  211. sandybridge_blit_fbc_update(dev);
  212. }
  213. DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  214. }
  215. static void ironlake_disable_fbc(struct drm_device *dev)
  216. {
  217. struct drm_i915_private *dev_priv = dev->dev_private;
  218. u32 dpfc_ctl;
  219. /* Disable compression */
  220. dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  221. if (dpfc_ctl & DPFC_CTL_EN) {
  222. dpfc_ctl &= ~DPFC_CTL_EN;
  223. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
  224. DRM_DEBUG_KMS("disabled FBC\n");
  225. }
  226. }
  227. static bool ironlake_fbc_enabled(struct drm_device *dev)
  228. {
  229. struct drm_i915_private *dev_priv = dev->dev_private;
  230. return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
  231. }
  232. static void gen7_enable_fbc(struct drm_crtc *crtc)
  233. {
  234. struct drm_device *dev = crtc->dev;
  235. struct drm_i915_private *dev_priv = dev->dev_private;
  236. struct drm_framebuffer *fb = crtc->primary->fb;
  237. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  238. struct drm_i915_gem_object *obj = intel_fb->obj;
  239. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  240. u32 dpfc_ctl;
  241. dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
  242. if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
  243. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  244. else
  245. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  246. dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
  247. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  248. if (IS_IVYBRIDGE(dev)) {
  249. /* WaFbcAsynchFlipDisableFbcQueue:ivb */
  250. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  251. I915_READ(ILK_DISPLAY_CHICKEN1) |
  252. ILK_FBCQ_DIS);
  253. } else {
  254. /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
  255. I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
  256. I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
  257. HSW_FBCQ_DIS);
  258. }
  259. I915_WRITE(SNB_DPFC_CTL_SA,
  260. SNB_CPU_FENCE_ENABLE | obj->fence_reg);
  261. I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
  262. sandybridge_blit_fbc_update(dev);
  263. DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  264. }
  265. bool intel_fbc_enabled(struct drm_device *dev)
  266. {
  267. struct drm_i915_private *dev_priv = dev->dev_private;
  268. if (!dev_priv->display.fbc_enabled)
  269. return false;
  270. return dev_priv->display.fbc_enabled(dev);
  271. }
  272. static void intel_fbc_work_fn(struct work_struct *__work)
  273. {
  274. struct intel_fbc_work *work =
  275. container_of(to_delayed_work(__work),
  276. struct intel_fbc_work, work);
  277. struct drm_device *dev = work->crtc->dev;
  278. struct drm_i915_private *dev_priv = dev->dev_private;
  279. mutex_lock(&dev->struct_mutex);
  280. if (work == dev_priv->fbc.fbc_work) {
  281. /* Double check that we haven't switched fb without cancelling
  282. * the prior work.
  283. */
  284. if (work->crtc->primary->fb == work->fb) {
  285. dev_priv->display.enable_fbc(work->crtc);
  286. dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
  287. dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
  288. dev_priv->fbc.y = work->crtc->y;
  289. }
  290. dev_priv->fbc.fbc_work = NULL;
  291. }
  292. mutex_unlock(&dev->struct_mutex);
  293. kfree(work);
  294. }
  295. static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
  296. {
  297. if (dev_priv->fbc.fbc_work == NULL)
  298. return;
  299. DRM_DEBUG_KMS("cancelling pending FBC enable\n");
  300. /* Synchronisation is provided by struct_mutex and checking of
  301. * dev_priv->fbc.fbc_work, so we can perform the cancellation
  302. * entirely asynchronously.
  303. */
  304. if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
  305. /* tasklet was killed before being run, clean up */
  306. kfree(dev_priv->fbc.fbc_work);
  307. /* Mark the work as no longer wanted so that if it does
  308. * wake-up (because the work was already running and waiting
  309. * for our mutex), it will discover that is no longer
  310. * necessary to run.
  311. */
  312. dev_priv->fbc.fbc_work = NULL;
  313. }
  314. static void intel_enable_fbc(struct drm_crtc *crtc)
  315. {
  316. struct intel_fbc_work *work;
  317. struct drm_device *dev = crtc->dev;
  318. struct drm_i915_private *dev_priv = dev->dev_private;
  319. if (!dev_priv->display.enable_fbc)
  320. return;
  321. intel_cancel_fbc_work(dev_priv);
  322. work = kzalloc(sizeof(*work), GFP_KERNEL);
  323. if (work == NULL) {
  324. DRM_ERROR("Failed to allocate FBC work structure\n");
  325. dev_priv->display.enable_fbc(crtc);
  326. return;
  327. }
  328. work->crtc = crtc;
  329. work->fb = crtc->primary->fb;
  330. INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
  331. dev_priv->fbc.fbc_work = work;
  332. /* Delay the actual enabling to let pageflipping cease and the
  333. * display to settle before starting the compression. Note that
  334. * this delay also serves a second purpose: it allows for a
  335. * vblank to pass after disabling the FBC before we attempt
  336. * to modify the control registers.
  337. *
  338. * A more complicated solution would involve tracking vblanks
  339. * following the termination of the page-flipping sequence
  340. * and indeed performing the enable as a co-routine and not
  341. * waiting synchronously upon the vblank.
  342. *
  343. * WaFbcWaitForVBlankBeforeEnable:ilk,snb
  344. */
  345. schedule_delayed_work(&work->work, msecs_to_jiffies(50));
  346. }
  347. void intel_disable_fbc(struct drm_device *dev)
  348. {
  349. struct drm_i915_private *dev_priv = dev->dev_private;
  350. intel_cancel_fbc_work(dev_priv);
  351. if (!dev_priv->display.disable_fbc)
  352. return;
  353. dev_priv->display.disable_fbc(dev);
  354. dev_priv->fbc.plane = -1;
  355. }
  356. static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
  357. enum no_fbc_reason reason)
  358. {
  359. if (dev_priv->fbc.no_fbc_reason == reason)
  360. return false;
  361. dev_priv->fbc.no_fbc_reason = reason;
  362. return true;
  363. }
  364. /**
  365. * intel_update_fbc - enable/disable FBC as needed
  366. * @dev: the drm_device
  367. *
  368. * Set up the framebuffer compression hardware at mode set time. We
  369. * enable it if possible:
  370. * - plane A only (on pre-965)
  371. * - no pixel mulitply/line duplication
  372. * - no alpha buffer discard
  373. * - no dual wide
  374. * - framebuffer <= max_hdisplay in width, max_vdisplay in height
  375. *
  376. * We can't assume that any compression will take place (worst case),
  377. * so the compressed buffer has to be the same size as the uncompressed
  378. * one. It also must reside (along with the line length buffer) in
  379. * stolen memory.
  380. *
  381. * We need to enable/disable FBC on a global basis.
  382. */
  383. void intel_update_fbc(struct drm_device *dev)
  384. {
  385. struct drm_i915_private *dev_priv = dev->dev_private;
  386. struct drm_crtc *crtc = NULL, *tmp_crtc;
  387. struct intel_crtc *intel_crtc;
  388. struct drm_framebuffer *fb;
  389. struct intel_framebuffer *intel_fb;
  390. struct drm_i915_gem_object *obj;
  391. const struct drm_display_mode *adjusted_mode;
  392. unsigned int max_width, max_height;
  393. if (!HAS_FBC(dev)) {
  394. set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
  395. return;
  396. }
  397. if (!i915.powersave) {
  398. if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
  399. DRM_DEBUG_KMS("fbc disabled per module param\n");
  400. return;
  401. }
  402. /*
  403. * If FBC is already on, we just have to verify that we can
  404. * keep it that way...
  405. * Need to disable if:
  406. * - more than one pipe is active
  407. * - changing FBC params (stride, fence, mode)
  408. * - new fb is too large to fit in compressed buffer
  409. * - going to an unsupported config (interlace, pixel multiply, etc.)
  410. */
  411. for_each_crtc(dev, tmp_crtc) {
  412. if (intel_crtc_active(tmp_crtc) &&
  413. to_intel_crtc(tmp_crtc)->primary_enabled) {
  414. if (crtc) {
  415. if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
  416. DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
  417. goto out_disable;
  418. }
  419. crtc = tmp_crtc;
  420. }
  421. }
  422. if (!crtc || crtc->primary->fb == NULL) {
  423. if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
  424. DRM_DEBUG_KMS("no output, disabling\n");
  425. goto out_disable;
  426. }
  427. intel_crtc = to_intel_crtc(crtc);
  428. fb = crtc->primary->fb;
  429. intel_fb = to_intel_framebuffer(fb);
  430. obj = intel_fb->obj;
  431. adjusted_mode = &intel_crtc->config.adjusted_mode;
  432. if (i915.enable_fbc < 0) {
  433. if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
  434. DRM_DEBUG_KMS("disabled per chip default\n");
  435. goto out_disable;
  436. }
  437. if (!i915.enable_fbc) {
  438. if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
  439. DRM_DEBUG_KMS("fbc disabled per module param\n");
  440. goto out_disable;
  441. }
  442. if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
  443. (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
  444. if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
  445. DRM_DEBUG_KMS("mode incompatible with compression, "
  446. "disabling\n");
  447. goto out_disable;
  448. }
  449. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  450. max_width = 4096;
  451. max_height = 2048;
  452. } else {
  453. max_width = 2048;
  454. max_height = 1536;
  455. }
  456. if (intel_crtc->config.pipe_src_w > max_width ||
  457. intel_crtc->config.pipe_src_h > max_height) {
  458. if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
  459. DRM_DEBUG_KMS("mode too large for compression, disabling\n");
  460. goto out_disable;
  461. }
  462. if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
  463. intel_crtc->plane != PLANE_A) {
  464. if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
  465. DRM_DEBUG_KMS("plane not A, disabling compression\n");
  466. goto out_disable;
  467. }
  468. /* The use of a CPU fence is mandatory in order to detect writes
  469. * by the CPU to the scanout and trigger updates to the FBC.
  470. */
  471. if (obj->tiling_mode != I915_TILING_X ||
  472. obj->fence_reg == I915_FENCE_REG_NONE) {
  473. if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
  474. DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
  475. goto out_disable;
  476. }
  477. /* If the kernel debugger is active, always disable compression */
  478. if (in_dbg_master())
  479. goto out_disable;
  480. if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
  481. if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
  482. DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
  483. goto out_disable;
  484. }
  485. /* If the scanout has not changed, don't modify the FBC settings.
  486. * Note that we make the fundamental assumption that the fb->obj
  487. * cannot be unpinned (and have its GTT offset and fence revoked)
  488. * without first being decoupled from the scanout and FBC disabled.
  489. */
  490. if (dev_priv->fbc.plane == intel_crtc->plane &&
  491. dev_priv->fbc.fb_id == fb->base.id &&
  492. dev_priv->fbc.y == crtc->y)
  493. return;
  494. if (intel_fbc_enabled(dev)) {
  495. /* We update FBC along two paths, after changing fb/crtc
  496. * configuration (modeswitching) and after page-flipping
  497. * finishes. For the latter, we know that not only did
  498. * we disable the FBC at the start of the page-flip
  499. * sequence, but also more than one vblank has passed.
  500. *
  501. * For the former case of modeswitching, it is possible
  502. * to switch between two FBC valid configurations
  503. * instantaneously so we do need to disable the FBC
  504. * before we can modify its control registers. We also
  505. * have to wait for the next vblank for that to take
  506. * effect. However, since we delay enabling FBC we can
  507. * assume that a vblank has passed since disabling and
  508. * that we can safely alter the registers in the deferred
  509. * callback.
  510. *
  511. * In the scenario that we go from a valid to invalid
  512. * and then back to valid FBC configuration we have
  513. * no strict enforcement that a vblank occurred since
  514. * disabling the FBC. However, along all current pipe
  515. * disabling paths we do need to wait for a vblank at
  516. * some point. And we wait before enabling FBC anyway.
  517. */
  518. DRM_DEBUG_KMS("disabling active FBC for update\n");
  519. intel_disable_fbc(dev);
  520. }
  521. intel_enable_fbc(crtc);
  522. dev_priv->fbc.no_fbc_reason = FBC_OK;
  523. return;
  524. out_disable:
  525. /* Multiple disables should be harmless */
  526. if (intel_fbc_enabled(dev)) {
  527. DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
  528. intel_disable_fbc(dev);
  529. }
  530. i915_gem_stolen_cleanup_compression(dev);
  531. }
  532. static void i915_pineview_get_mem_freq(struct drm_device *dev)
  533. {
  534. struct drm_i915_private *dev_priv = dev->dev_private;
  535. u32 tmp;
  536. tmp = I915_READ(CLKCFG);
  537. switch (tmp & CLKCFG_FSB_MASK) {
  538. case CLKCFG_FSB_533:
  539. dev_priv->fsb_freq = 533; /* 133*4 */
  540. break;
  541. case CLKCFG_FSB_800:
  542. dev_priv->fsb_freq = 800; /* 200*4 */
  543. break;
  544. case CLKCFG_FSB_667:
  545. dev_priv->fsb_freq = 667; /* 167*4 */
  546. break;
  547. case CLKCFG_FSB_400:
  548. dev_priv->fsb_freq = 400; /* 100*4 */
  549. break;
  550. }
  551. switch (tmp & CLKCFG_MEM_MASK) {
  552. case CLKCFG_MEM_533:
  553. dev_priv->mem_freq = 533;
  554. break;
  555. case CLKCFG_MEM_667:
  556. dev_priv->mem_freq = 667;
  557. break;
  558. case CLKCFG_MEM_800:
  559. dev_priv->mem_freq = 800;
  560. break;
  561. }
  562. /* detect pineview DDR3 setting */
  563. tmp = I915_READ(CSHRDDR3CTL);
  564. dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
  565. }
  566. static void i915_ironlake_get_mem_freq(struct drm_device *dev)
  567. {
  568. struct drm_i915_private *dev_priv = dev->dev_private;
  569. u16 ddrpll, csipll;
  570. ddrpll = I915_READ16(DDRMPLL1);
  571. csipll = I915_READ16(CSIPLL0);
  572. switch (ddrpll & 0xff) {
  573. case 0xc:
  574. dev_priv->mem_freq = 800;
  575. break;
  576. case 0x10:
  577. dev_priv->mem_freq = 1066;
  578. break;
  579. case 0x14:
  580. dev_priv->mem_freq = 1333;
  581. break;
  582. case 0x18:
  583. dev_priv->mem_freq = 1600;
  584. break;
  585. default:
  586. DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
  587. ddrpll & 0xff);
  588. dev_priv->mem_freq = 0;
  589. break;
  590. }
  591. dev_priv->ips.r_t = dev_priv->mem_freq;
  592. switch (csipll & 0x3ff) {
  593. case 0x00c:
  594. dev_priv->fsb_freq = 3200;
  595. break;
  596. case 0x00e:
  597. dev_priv->fsb_freq = 3733;
  598. break;
  599. case 0x010:
  600. dev_priv->fsb_freq = 4266;
  601. break;
  602. case 0x012:
  603. dev_priv->fsb_freq = 4800;
  604. break;
  605. case 0x014:
  606. dev_priv->fsb_freq = 5333;
  607. break;
  608. case 0x016:
  609. dev_priv->fsb_freq = 5866;
  610. break;
  611. case 0x018:
  612. dev_priv->fsb_freq = 6400;
  613. break;
  614. default:
  615. DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
  616. csipll & 0x3ff);
  617. dev_priv->fsb_freq = 0;
  618. break;
  619. }
  620. if (dev_priv->fsb_freq == 3200) {
  621. dev_priv->ips.c_m = 0;
  622. } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
  623. dev_priv->ips.c_m = 1;
  624. } else {
  625. dev_priv->ips.c_m = 2;
  626. }
  627. }
  628. static const struct cxsr_latency cxsr_latency_table[] = {
  629. {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
  630. {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
  631. {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
  632. {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
  633. {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
  634. {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
  635. {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
  636. {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
  637. {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
  638. {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
  639. {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
  640. {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
  641. {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
  642. {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
  643. {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
  644. {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
  645. {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
  646. {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
  647. {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
  648. {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
  649. {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
  650. {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
  651. {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
  652. {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
  653. {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
  654. {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
  655. {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
  656. {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
  657. {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
  658. {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
  659. };
  660. static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
  661. int is_ddr3,
  662. int fsb,
  663. int mem)
  664. {
  665. const struct cxsr_latency *latency;
  666. int i;
  667. if (fsb == 0 || mem == 0)
  668. return NULL;
  669. for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
  670. latency = &cxsr_latency_table[i];
  671. if (is_desktop == latency->is_desktop &&
  672. is_ddr3 == latency->is_ddr3 &&
  673. fsb == latency->fsb_freq && mem == latency->mem_freq)
  674. return latency;
  675. }
  676. DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
  677. return NULL;
  678. }
  679. static void pineview_disable_cxsr(struct drm_device *dev)
  680. {
  681. struct drm_i915_private *dev_priv = dev->dev_private;
  682. /* deactivate cxsr */
  683. I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
  684. }
  685. /*
  686. * Latency for FIFO fetches is dependent on several factors:
  687. * - memory configuration (speed, channels)
  688. * - chipset
  689. * - current MCH state
  690. * It can be fairly high in some situations, so here we assume a fairly
  691. * pessimal value. It's a tradeoff between extra memory fetches (if we
  692. * set this value too high, the FIFO will fetch frequently to stay full)
  693. * and power consumption (set it too low to save power and we might see
  694. * FIFO underruns and display "flicker").
  695. *
  696. * A value of 5us seems to be a good balance; safe for very low end
  697. * platforms but not overly aggressive on lower latency configs.
  698. */
  699. static const int latency_ns = 5000;
  700. static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
  701. {
  702. struct drm_i915_private *dev_priv = dev->dev_private;
  703. uint32_t dsparb = I915_READ(DSPARB);
  704. int size;
  705. size = dsparb & 0x7f;
  706. if (plane)
  707. size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
  708. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  709. plane ? "B" : "A", size);
  710. return size;
  711. }
  712. static int i830_get_fifo_size(struct drm_device *dev, int plane)
  713. {
  714. struct drm_i915_private *dev_priv = dev->dev_private;
  715. uint32_t dsparb = I915_READ(DSPARB);
  716. int size;
  717. size = dsparb & 0x1ff;
  718. if (plane)
  719. size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
  720. size >>= 1; /* Convert to cachelines */
  721. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  722. plane ? "B" : "A", size);
  723. return size;
  724. }
  725. static int i845_get_fifo_size(struct drm_device *dev, int plane)
  726. {
  727. struct drm_i915_private *dev_priv = dev->dev_private;
  728. uint32_t dsparb = I915_READ(DSPARB);
  729. int size;
  730. size = dsparb & 0x7f;
  731. size >>= 2; /* Convert to cachelines */
  732. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  733. plane ? "B" : "A",
  734. size);
  735. return size;
  736. }
  737. /* Pineview has different values for various configs */
  738. static const struct intel_watermark_params pineview_display_wm = {
  739. PINEVIEW_DISPLAY_FIFO,
  740. PINEVIEW_MAX_WM,
  741. PINEVIEW_DFT_WM,
  742. PINEVIEW_GUARD_WM,
  743. PINEVIEW_FIFO_LINE_SIZE
  744. };
  745. static const struct intel_watermark_params pineview_display_hplloff_wm = {
  746. PINEVIEW_DISPLAY_FIFO,
  747. PINEVIEW_MAX_WM,
  748. PINEVIEW_DFT_HPLLOFF_WM,
  749. PINEVIEW_GUARD_WM,
  750. PINEVIEW_FIFO_LINE_SIZE
  751. };
  752. static const struct intel_watermark_params pineview_cursor_wm = {
  753. PINEVIEW_CURSOR_FIFO,
  754. PINEVIEW_CURSOR_MAX_WM,
  755. PINEVIEW_CURSOR_DFT_WM,
  756. PINEVIEW_CURSOR_GUARD_WM,
  757. PINEVIEW_FIFO_LINE_SIZE,
  758. };
  759. static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
  760. PINEVIEW_CURSOR_FIFO,
  761. PINEVIEW_CURSOR_MAX_WM,
  762. PINEVIEW_CURSOR_DFT_WM,
  763. PINEVIEW_CURSOR_GUARD_WM,
  764. PINEVIEW_FIFO_LINE_SIZE
  765. };
  766. static const struct intel_watermark_params g4x_wm_info = {
  767. G4X_FIFO_SIZE,
  768. G4X_MAX_WM,
  769. G4X_MAX_WM,
  770. 2,
  771. G4X_FIFO_LINE_SIZE,
  772. };
  773. static const struct intel_watermark_params g4x_cursor_wm_info = {
  774. I965_CURSOR_FIFO,
  775. I965_CURSOR_MAX_WM,
  776. I965_CURSOR_DFT_WM,
  777. 2,
  778. G4X_FIFO_LINE_SIZE,
  779. };
  780. static const struct intel_watermark_params valleyview_wm_info = {
  781. VALLEYVIEW_FIFO_SIZE,
  782. VALLEYVIEW_MAX_WM,
  783. VALLEYVIEW_MAX_WM,
  784. 2,
  785. G4X_FIFO_LINE_SIZE,
  786. };
  787. static const struct intel_watermark_params valleyview_cursor_wm_info = {
  788. I965_CURSOR_FIFO,
  789. VALLEYVIEW_CURSOR_MAX_WM,
  790. I965_CURSOR_DFT_WM,
  791. 2,
  792. G4X_FIFO_LINE_SIZE,
  793. };
  794. static const struct intel_watermark_params i965_cursor_wm_info = {
  795. I965_CURSOR_FIFO,
  796. I965_CURSOR_MAX_WM,
  797. I965_CURSOR_DFT_WM,
  798. 2,
  799. I915_FIFO_LINE_SIZE,
  800. };
  801. static const struct intel_watermark_params i945_wm_info = {
  802. I945_FIFO_SIZE,
  803. I915_MAX_WM,
  804. 1,
  805. 2,
  806. I915_FIFO_LINE_SIZE
  807. };
  808. static const struct intel_watermark_params i915_wm_info = {
  809. I915_FIFO_SIZE,
  810. I915_MAX_WM,
  811. 1,
  812. 2,
  813. I915_FIFO_LINE_SIZE
  814. };
  815. static const struct intel_watermark_params i830_wm_info = {
  816. I855GM_FIFO_SIZE,
  817. I915_MAX_WM,
  818. 1,
  819. 2,
  820. I830_FIFO_LINE_SIZE
  821. };
  822. static const struct intel_watermark_params i845_wm_info = {
  823. I830_FIFO_SIZE,
  824. I915_MAX_WM,
  825. 1,
  826. 2,
  827. I830_FIFO_LINE_SIZE
  828. };
  829. /**
  830. * intel_calculate_wm - calculate watermark level
  831. * @clock_in_khz: pixel clock
  832. * @wm: chip FIFO params
  833. * @pixel_size: display pixel size
  834. * @latency_ns: memory latency for the platform
  835. *
  836. * Calculate the watermark level (the level at which the display plane will
  837. * start fetching from memory again). Each chip has a different display
  838. * FIFO size and allocation, so the caller needs to figure that out and pass
  839. * in the correct intel_watermark_params structure.
  840. *
  841. * As the pixel clock runs, the FIFO will be drained at a rate that depends
  842. * on the pixel size. When it reaches the watermark level, it'll start
  843. * fetching FIFO line sized based chunks from memory until the FIFO fills
  844. * past the watermark point. If the FIFO drains completely, a FIFO underrun
  845. * will occur, and a display engine hang could result.
  846. */
  847. static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
  848. const struct intel_watermark_params *wm,
  849. int fifo_size,
  850. int pixel_size,
  851. unsigned long latency_ns)
  852. {
  853. long entries_required, wm_size;
  854. /*
  855. * Note: we need to make sure we don't overflow for various clock &
  856. * latency values.
  857. * clocks go from a few thousand to several hundred thousand.
  858. * latency is usually a few thousand
  859. */
  860. entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
  861. 1000;
  862. entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
  863. DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
  864. wm_size = fifo_size - (entries_required + wm->guard_size);
  865. DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
  866. /* Don't promote wm_size to unsigned... */
  867. if (wm_size > (long)wm->max_wm)
  868. wm_size = wm->max_wm;
  869. if (wm_size <= 0)
  870. wm_size = wm->default_wm;
  871. return wm_size;
  872. }
  873. static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
  874. {
  875. struct drm_crtc *crtc, *enabled = NULL;
  876. for_each_crtc(dev, crtc) {
  877. if (intel_crtc_active(crtc)) {
  878. if (enabled)
  879. return NULL;
  880. enabled = crtc;
  881. }
  882. }
  883. return enabled;
  884. }
  885. static void pineview_update_wm(struct drm_crtc *unused_crtc)
  886. {
  887. struct drm_device *dev = unused_crtc->dev;
  888. struct drm_i915_private *dev_priv = dev->dev_private;
  889. struct drm_crtc *crtc;
  890. const struct cxsr_latency *latency;
  891. u32 reg;
  892. unsigned long wm;
  893. latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
  894. dev_priv->fsb_freq, dev_priv->mem_freq);
  895. if (!latency) {
  896. DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
  897. pineview_disable_cxsr(dev);
  898. return;
  899. }
  900. crtc = single_enabled_crtc(dev);
  901. if (crtc) {
  902. const struct drm_display_mode *adjusted_mode;
  903. int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  904. int clock;
  905. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  906. clock = adjusted_mode->crtc_clock;
  907. /* Display SR */
  908. wm = intel_calculate_wm(clock, &pineview_display_wm,
  909. pineview_display_wm.fifo_size,
  910. pixel_size, latency->display_sr);
  911. reg = I915_READ(DSPFW1);
  912. reg &= ~DSPFW_SR_MASK;
  913. reg |= wm << DSPFW_SR_SHIFT;
  914. I915_WRITE(DSPFW1, reg);
  915. DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
  916. /* cursor SR */
  917. wm = intel_calculate_wm(clock, &pineview_cursor_wm,
  918. pineview_display_wm.fifo_size,
  919. pixel_size, latency->cursor_sr);
  920. reg = I915_READ(DSPFW3);
  921. reg &= ~DSPFW_CURSOR_SR_MASK;
  922. reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
  923. I915_WRITE(DSPFW3, reg);
  924. /* Display HPLL off SR */
  925. wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
  926. pineview_display_hplloff_wm.fifo_size,
  927. pixel_size, latency->display_hpll_disable);
  928. reg = I915_READ(DSPFW3);
  929. reg &= ~DSPFW_HPLL_SR_MASK;
  930. reg |= wm & DSPFW_HPLL_SR_MASK;
  931. I915_WRITE(DSPFW3, reg);
  932. /* cursor HPLL off SR */
  933. wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
  934. pineview_display_hplloff_wm.fifo_size,
  935. pixel_size, latency->cursor_hpll_disable);
  936. reg = I915_READ(DSPFW3);
  937. reg &= ~DSPFW_HPLL_CURSOR_MASK;
  938. reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
  939. I915_WRITE(DSPFW3, reg);
  940. DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
  941. /* activate cxsr */
  942. I915_WRITE(DSPFW3,
  943. I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
  944. DRM_DEBUG_KMS("Self-refresh is enabled\n");
  945. } else {
  946. pineview_disable_cxsr(dev);
  947. DRM_DEBUG_KMS("Self-refresh is disabled\n");
  948. }
  949. }
  950. static bool g4x_compute_wm0(struct drm_device *dev,
  951. int plane,
  952. const struct intel_watermark_params *display,
  953. int display_latency_ns,
  954. const struct intel_watermark_params *cursor,
  955. int cursor_latency_ns,
  956. int *plane_wm,
  957. int *cursor_wm)
  958. {
  959. struct drm_crtc *crtc;
  960. const struct drm_display_mode *adjusted_mode;
  961. int htotal, hdisplay, clock, pixel_size;
  962. int line_time_us, line_count;
  963. int entries, tlb_miss;
  964. crtc = intel_get_crtc_for_plane(dev, plane);
  965. if (!intel_crtc_active(crtc)) {
  966. *cursor_wm = cursor->guard_size;
  967. *plane_wm = display->guard_size;
  968. return false;
  969. }
  970. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  971. clock = adjusted_mode->crtc_clock;
  972. htotal = adjusted_mode->crtc_htotal;
  973. hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
  974. pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  975. /* Use the small buffer method to calculate plane watermark */
  976. entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
  977. tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
  978. if (tlb_miss > 0)
  979. entries += tlb_miss;
  980. entries = DIV_ROUND_UP(entries, display->cacheline_size);
  981. *plane_wm = entries + display->guard_size;
  982. if (*plane_wm > (int)display->max_wm)
  983. *plane_wm = display->max_wm;
  984. /* Use the large buffer method to calculate cursor watermark */
  985. line_time_us = max(htotal * 1000 / clock, 1);
  986. line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
  987. entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
  988. tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
  989. if (tlb_miss > 0)
  990. entries += tlb_miss;
  991. entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
  992. *cursor_wm = entries + cursor->guard_size;
  993. if (*cursor_wm > (int)cursor->max_wm)
  994. *cursor_wm = (int)cursor->max_wm;
  995. return true;
  996. }
  997. /*
  998. * Check the wm result.
  999. *
  1000. * If any calculated watermark values is larger than the maximum value that
  1001. * can be programmed into the associated watermark register, that watermark
  1002. * must be disabled.
  1003. */
  1004. static bool g4x_check_srwm(struct drm_device *dev,
  1005. int display_wm, int cursor_wm,
  1006. const struct intel_watermark_params *display,
  1007. const struct intel_watermark_params *cursor)
  1008. {
  1009. DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
  1010. display_wm, cursor_wm);
  1011. if (display_wm > display->max_wm) {
  1012. DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
  1013. display_wm, display->max_wm);
  1014. return false;
  1015. }
  1016. if (cursor_wm > cursor->max_wm) {
  1017. DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
  1018. cursor_wm, cursor->max_wm);
  1019. return false;
  1020. }
  1021. if (!(display_wm || cursor_wm)) {
  1022. DRM_DEBUG_KMS("SR latency is 0, disabling\n");
  1023. return false;
  1024. }
  1025. return true;
  1026. }
  1027. static bool g4x_compute_srwm(struct drm_device *dev,
  1028. int plane,
  1029. int latency_ns,
  1030. const struct intel_watermark_params *display,
  1031. const struct intel_watermark_params *cursor,
  1032. int *display_wm, int *cursor_wm)
  1033. {
  1034. struct drm_crtc *crtc;
  1035. const struct drm_display_mode *adjusted_mode;
  1036. int hdisplay, htotal, pixel_size, clock;
  1037. unsigned long line_time_us;
  1038. int line_count, line_size;
  1039. int small, large;
  1040. int entries;
  1041. if (!latency_ns) {
  1042. *display_wm = *cursor_wm = 0;
  1043. return false;
  1044. }
  1045. crtc = intel_get_crtc_for_plane(dev, plane);
  1046. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1047. clock = adjusted_mode->crtc_clock;
  1048. htotal = adjusted_mode->crtc_htotal;
  1049. hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
  1050. pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  1051. line_time_us = max(htotal * 1000 / clock, 1);
  1052. line_count = (latency_ns / line_time_us + 1000) / 1000;
  1053. line_size = hdisplay * pixel_size;
  1054. /* Use the minimum of the small and large buffer method for primary */
  1055. small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
  1056. large = line_count * line_size;
  1057. entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
  1058. *display_wm = entries + display->guard_size;
  1059. /* calculate the self-refresh watermark for display cursor */
  1060. entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
  1061. entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
  1062. *cursor_wm = entries + cursor->guard_size;
  1063. return g4x_check_srwm(dev,
  1064. *display_wm, *cursor_wm,
  1065. display, cursor);
  1066. }
  1067. static bool vlv_compute_drain_latency(struct drm_device *dev,
  1068. int plane,
  1069. int *plane_prec_mult,
  1070. int *plane_dl,
  1071. int *cursor_prec_mult,
  1072. int *cursor_dl)
  1073. {
  1074. struct drm_crtc *crtc;
  1075. int clock, pixel_size;
  1076. int entries;
  1077. crtc = intel_get_crtc_for_plane(dev, plane);
  1078. if (!intel_crtc_active(crtc))
  1079. return false;
  1080. clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
  1081. pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
  1082. entries = (clock / 1000) * pixel_size;
  1083. *plane_prec_mult = (entries > 256) ?
  1084. DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
  1085. *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
  1086. pixel_size);
  1087. entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
  1088. *cursor_prec_mult = (entries > 256) ?
  1089. DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
  1090. *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
  1091. return true;
  1092. }
  1093. /*
  1094. * Update drain latency registers of memory arbiter
  1095. *
  1096. * Valleyview SoC has a new memory arbiter and needs drain latency registers
  1097. * to be programmed. Each plane has a drain latency multiplier and a drain
  1098. * latency value.
  1099. */
  1100. static void vlv_update_drain_latency(struct drm_device *dev)
  1101. {
  1102. struct drm_i915_private *dev_priv = dev->dev_private;
  1103. int planea_prec, planea_dl, planeb_prec, planeb_dl;
  1104. int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
  1105. int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
  1106. either 16 or 32 */
  1107. /* For plane A, Cursor A */
  1108. if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
  1109. &cursor_prec_mult, &cursora_dl)) {
  1110. cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
  1111. DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
  1112. planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
  1113. DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
  1114. I915_WRITE(VLV_DDL1, cursora_prec |
  1115. (cursora_dl << DDL_CURSORA_SHIFT) |
  1116. planea_prec | planea_dl);
  1117. }
  1118. /* For plane B, Cursor B */
  1119. if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
  1120. &cursor_prec_mult, &cursorb_dl)) {
  1121. cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
  1122. DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
  1123. planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
  1124. DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
  1125. I915_WRITE(VLV_DDL2, cursorb_prec |
  1126. (cursorb_dl << DDL_CURSORB_SHIFT) |
  1127. planeb_prec | planeb_dl);
  1128. }
  1129. }
  1130. #define single_plane_enabled(mask) is_power_of_2(mask)
  1131. static void valleyview_update_wm(struct drm_crtc *crtc)
  1132. {
  1133. struct drm_device *dev = crtc->dev;
  1134. static const int sr_latency_ns = 12000;
  1135. struct drm_i915_private *dev_priv = dev->dev_private;
  1136. int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
  1137. int plane_sr, cursor_sr;
  1138. int ignore_plane_sr, ignore_cursor_sr;
  1139. unsigned int enabled = 0;
  1140. vlv_update_drain_latency(dev);
  1141. if (g4x_compute_wm0(dev, PIPE_A,
  1142. &valleyview_wm_info, latency_ns,
  1143. &valleyview_cursor_wm_info, latency_ns,
  1144. &planea_wm, &cursora_wm))
  1145. enabled |= 1 << PIPE_A;
  1146. if (g4x_compute_wm0(dev, PIPE_B,
  1147. &valleyview_wm_info, latency_ns,
  1148. &valleyview_cursor_wm_info, latency_ns,
  1149. &planeb_wm, &cursorb_wm))
  1150. enabled |= 1 << PIPE_B;
  1151. if (single_plane_enabled(enabled) &&
  1152. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1153. sr_latency_ns,
  1154. &valleyview_wm_info,
  1155. &valleyview_cursor_wm_info,
  1156. &plane_sr, &ignore_cursor_sr) &&
  1157. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1158. 2*sr_latency_ns,
  1159. &valleyview_wm_info,
  1160. &valleyview_cursor_wm_info,
  1161. &ignore_plane_sr, &cursor_sr)) {
  1162. I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
  1163. } else {
  1164. I915_WRITE(FW_BLC_SELF_VLV,
  1165. I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
  1166. plane_sr = cursor_sr = 0;
  1167. }
  1168. DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
  1169. planea_wm, cursora_wm,
  1170. planeb_wm, cursorb_wm,
  1171. plane_sr, cursor_sr);
  1172. I915_WRITE(DSPFW1,
  1173. (plane_sr << DSPFW_SR_SHIFT) |
  1174. (cursorb_wm << DSPFW_CURSORB_SHIFT) |
  1175. (planeb_wm << DSPFW_PLANEB_SHIFT) |
  1176. planea_wm);
  1177. I915_WRITE(DSPFW2,
  1178. (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
  1179. (cursora_wm << DSPFW_CURSORA_SHIFT));
  1180. I915_WRITE(DSPFW3,
  1181. (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
  1182. (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  1183. }
  1184. static void g4x_update_wm(struct drm_crtc *crtc)
  1185. {
  1186. struct drm_device *dev = crtc->dev;
  1187. static const int sr_latency_ns = 12000;
  1188. struct drm_i915_private *dev_priv = dev->dev_private;
  1189. int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
  1190. int plane_sr, cursor_sr;
  1191. unsigned int enabled = 0;
  1192. if (g4x_compute_wm0(dev, PIPE_A,
  1193. &g4x_wm_info, latency_ns,
  1194. &g4x_cursor_wm_info, latency_ns,
  1195. &planea_wm, &cursora_wm))
  1196. enabled |= 1 << PIPE_A;
  1197. if (g4x_compute_wm0(dev, PIPE_B,
  1198. &g4x_wm_info, latency_ns,
  1199. &g4x_cursor_wm_info, latency_ns,
  1200. &planeb_wm, &cursorb_wm))
  1201. enabled |= 1 << PIPE_B;
  1202. if (single_plane_enabled(enabled) &&
  1203. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1204. sr_latency_ns,
  1205. &g4x_wm_info,
  1206. &g4x_cursor_wm_info,
  1207. &plane_sr, &cursor_sr)) {
  1208. I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
  1209. } else {
  1210. I915_WRITE(FW_BLC_SELF,
  1211. I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
  1212. plane_sr = cursor_sr = 0;
  1213. }
  1214. DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
  1215. planea_wm, cursora_wm,
  1216. planeb_wm, cursorb_wm,
  1217. plane_sr, cursor_sr);
  1218. I915_WRITE(DSPFW1,
  1219. (plane_sr << DSPFW_SR_SHIFT) |
  1220. (cursorb_wm << DSPFW_CURSORB_SHIFT) |
  1221. (planeb_wm << DSPFW_PLANEB_SHIFT) |
  1222. planea_wm);
  1223. I915_WRITE(DSPFW2,
  1224. (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
  1225. (cursora_wm << DSPFW_CURSORA_SHIFT));
  1226. /* HPLL off in SR has some issues on G4x... disable it */
  1227. I915_WRITE(DSPFW3,
  1228. (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
  1229. (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  1230. }
  1231. static void i965_update_wm(struct drm_crtc *unused_crtc)
  1232. {
  1233. struct drm_device *dev = unused_crtc->dev;
  1234. struct drm_i915_private *dev_priv = dev->dev_private;
  1235. struct drm_crtc *crtc;
  1236. int srwm = 1;
  1237. int cursor_sr = 16;
  1238. /* Calc sr entries for one plane configs */
  1239. crtc = single_enabled_crtc(dev);
  1240. if (crtc) {
  1241. /* self-refresh has much higher latency */
  1242. static const int sr_latency_ns = 12000;
  1243. const struct drm_display_mode *adjusted_mode =
  1244. &to_intel_crtc(crtc)->config.adjusted_mode;
  1245. int clock = adjusted_mode->crtc_clock;
  1246. int htotal = adjusted_mode->crtc_htotal;
  1247. int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
  1248. int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  1249. unsigned long line_time_us;
  1250. int entries;
  1251. line_time_us = max(htotal * 1000 / clock, 1);
  1252. /* Use ns/us then divide to preserve precision */
  1253. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  1254. pixel_size * hdisplay;
  1255. entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
  1256. srwm = I965_FIFO_SIZE - entries;
  1257. if (srwm < 0)
  1258. srwm = 1;
  1259. srwm &= 0x1ff;
  1260. DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
  1261. entries, srwm);
  1262. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  1263. pixel_size * to_intel_crtc(crtc)->cursor_width;
  1264. entries = DIV_ROUND_UP(entries,
  1265. i965_cursor_wm_info.cacheline_size);
  1266. cursor_sr = i965_cursor_wm_info.fifo_size -
  1267. (entries + i965_cursor_wm_info.guard_size);
  1268. if (cursor_sr > i965_cursor_wm_info.max_wm)
  1269. cursor_sr = i965_cursor_wm_info.max_wm;
  1270. DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
  1271. "cursor %d\n", srwm, cursor_sr);
  1272. if (IS_CRESTLINE(dev))
  1273. I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
  1274. } else {
  1275. /* Turn off self refresh if both pipes are enabled */
  1276. if (IS_CRESTLINE(dev))
  1277. I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
  1278. & ~FW_BLC_SELF_EN);
  1279. }
  1280. DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
  1281. srwm);
  1282. /* 965 has limitations... */
  1283. I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
  1284. (8 << 16) | (8 << 8) | (8 << 0));
  1285. I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
  1286. /* update cursor SR watermark */
  1287. I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  1288. }
  1289. static void i9xx_update_wm(struct drm_crtc *unused_crtc)
  1290. {
  1291. struct drm_device *dev = unused_crtc->dev;
  1292. struct drm_i915_private *dev_priv = dev->dev_private;
  1293. const struct intel_watermark_params *wm_info;
  1294. uint32_t fwater_lo;
  1295. uint32_t fwater_hi;
  1296. int cwm, srwm = 1;
  1297. int fifo_size;
  1298. int planea_wm, planeb_wm;
  1299. struct drm_crtc *crtc, *enabled = NULL;
  1300. if (IS_I945GM(dev))
  1301. wm_info = &i945_wm_info;
  1302. else if (!IS_GEN2(dev))
  1303. wm_info = &i915_wm_info;
  1304. else
  1305. wm_info = &i830_wm_info;
  1306. fifo_size = dev_priv->display.get_fifo_size(dev, 0);
  1307. crtc = intel_get_crtc_for_plane(dev, 0);
  1308. if (intel_crtc_active(crtc)) {
  1309. const struct drm_display_mode *adjusted_mode;
  1310. int cpp = crtc->primary->fb->bits_per_pixel / 8;
  1311. if (IS_GEN2(dev))
  1312. cpp = 4;
  1313. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1314. planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
  1315. wm_info, fifo_size, cpp,
  1316. latency_ns);
  1317. enabled = crtc;
  1318. } else
  1319. planea_wm = fifo_size - wm_info->guard_size;
  1320. fifo_size = dev_priv->display.get_fifo_size(dev, 1);
  1321. crtc = intel_get_crtc_for_plane(dev, 1);
  1322. if (intel_crtc_active(crtc)) {
  1323. const struct drm_display_mode *adjusted_mode;
  1324. int cpp = crtc->primary->fb->bits_per_pixel / 8;
  1325. if (IS_GEN2(dev))
  1326. cpp = 4;
  1327. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1328. planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
  1329. wm_info, fifo_size, cpp,
  1330. latency_ns);
  1331. if (enabled == NULL)
  1332. enabled = crtc;
  1333. else
  1334. enabled = NULL;
  1335. } else
  1336. planeb_wm = fifo_size - wm_info->guard_size;
  1337. DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
  1338. if (IS_I915GM(dev) && enabled) {
  1339. struct intel_framebuffer *fb;
  1340. fb = to_intel_framebuffer(enabled->primary->fb);
  1341. /* self-refresh seems busted with untiled */
  1342. if (fb->obj->tiling_mode == I915_TILING_NONE)
  1343. enabled = NULL;
  1344. }
  1345. /*
  1346. * Overlay gets an aggressive default since video jitter is bad.
  1347. */
  1348. cwm = 2;
  1349. /* Play safe and disable self-refresh before adjusting watermarks. */
  1350. if (IS_I945G(dev) || IS_I945GM(dev))
  1351. I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
  1352. else if (IS_I915GM(dev))
  1353. I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
  1354. /* Calc sr entries for one plane configs */
  1355. if (HAS_FW_BLC(dev) && enabled) {
  1356. /* self-refresh has much higher latency */
  1357. static const int sr_latency_ns = 6000;
  1358. const struct drm_display_mode *adjusted_mode =
  1359. &to_intel_crtc(enabled)->config.adjusted_mode;
  1360. int clock = adjusted_mode->crtc_clock;
  1361. int htotal = adjusted_mode->crtc_htotal;
  1362. int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
  1363. int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
  1364. unsigned long line_time_us;
  1365. int entries;
  1366. line_time_us = max(htotal * 1000 / clock, 1);
  1367. /* Use ns/us then divide to preserve precision */
  1368. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  1369. pixel_size * hdisplay;
  1370. entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
  1371. DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
  1372. srwm = wm_info->fifo_size - entries;
  1373. if (srwm < 0)
  1374. srwm = 1;
  1375. if (IS_I945G(dev) || IS_I945GM(dev))
  1376. I915_WRITE(FW_BLC_SELF,
  1377. FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
  1378. else if (IS_I915GM(dev))
  1379. I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
  1380. }
  1381. DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
  1382. planea_wm, planeb_wm, cwm, srwm);
  1383. fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
  1384. fwater_hi = (cwm & 0x1f);
  1385. /* Set request length to 8 cachelines per fetch */
  1386. fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
  1387. fwater_hi = fwater_hi | (1 << 8);
  1388. I915_WRITE(FW_BLC, fwater_lo);
  1389. I915_WRITE(FW_BLC2, fwater_hi);
  1390. if (HAS_FW_BLC(dev)) {
  1391. if (enabled) {
  1392. if (IS_I945G(dev) || IS_I945GM(dev))
  1393. I915_WRITE(FW_BLC_SELF,
  1394. FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
  1395. else if (IS_I915GM(dev))
  1396. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
  1397. DRM_DEBUG_KMS("memory self refresh enabled\n");
  1398. } else
  1399. DRM_DEBUG_KMS("memory self refresh disabled\n");
  1400. }
  1401. }
  1402. static void i845_update_wm(struct drm_crtc *unused_crtc)
  1403. {
  1404. struct drm_device *dev = unused_crtc->dev;
  1405. struct drm_i915_private *dev_priv = dev->dev_private;
  1406. struct drm_crtc *crtc;
  1407. const struct drm_display_mode *adjusted_mode;
  1408. uint32_t fwater_lo;
  1409. int planea_wm;
  1410. crtc = single_enabled_crtc(dev);
  1411. if (crtc == NULL)
  1412. return;
  1413. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1414. planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
  1415. &i845_wm_info,
  1416. dev_priv->display.get_fifo_size(dev, 0),
  1417. 4, latency_ns);
  1418. fwater_lo = I915_READ(FW_BLC) & ~0xfff;
  1419. fwater_lo |= (3<<8) | planea_wm;
  1420. DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
  1421. I915_WRITE(FW_BLC, fwater_lo);
  1422. }
  1423. static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
  1424. struct drm_crtc *crtc)
  1425. {
  1426. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1427. uint32_t pixel_rate;
  1428. pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
  1429. /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
  1430. * adjust the pixel_rate here. */
  1431. if (intel_crtc->config.pch_pfit.enabled) {
  1432. uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
  1433. uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
  1434. pipe_w = intel_crtc->config.pipe_src_w;
  1435. pipe_h = intel_crtc->config.pipe_src_h;
  1436. pfit_w = (pfit_size >> 16) & 0xFFFF;
  1437. pfit_h = pfit_size & 0xFFFF;
  1438. if (pipe_w < pfit_w)
  1439. pipe_w = pfit_w;
  1440. if (pipe_h < pfit_h)
  1441. pipe_h = pfit_h;
  1442. pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
  1443. pfit_w * pfit_h);
  1444. }
  1445. return pixel_rate;
  1446. }
  1447. /* latency must be in 0.1us units. */
  1448. static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
  1449. uint32_t latency)
  1450. {
  1451. uint64_t ret;
  1452. if (WARN(latency == 0, "Latency value missing\n"))
  1453. return UINT_MAX;
  1454. ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
  1455. ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
  1456. return ret;
  1457. }
  1458. /* latency must be in 0.1us units. */
  1459. static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
  1460. uint32_t horiz_pixels, uint8_t bytes_per_pixel,
  1461. uint32_t latency)
  1462. {
  1463. uint32_t ret;
  1464. if (WARN(latency == 0, "Latency value missing\n"))
  1465. return UINT_MAX;
  1466. ret = (latency * pixel_rate) / (pipe_htotal * 10000);
  1467. ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
  1468. ret = DIV_ROUND_UP(ret, 64) + 2;
  1469. return ret;
  1470. }
  1471. static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
  1472. uint8_t bytes_per_pixel)
  1473. {
  1474. return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
  1475. }
  1476. struct ilk_pipe_wm_parameters {
  1477. bool active;
  1478. uint32_t pipe_htotal;
  1479. uint32_t pixel_rate;
  1480. struct intel_plane_wm_parameters pri;
  1481. struct intel_plane_wm_parameters spr;
  1482. struct intel_plane_wm_parameters cur;
  1483. };
  1484. struct ilk_wm_maximums {
  1485. uint16_t pri;
  1486. uint16_t spr;
  1487. uint16_t cur;
  1488. uint16_t fbc;
  1489. };
  1490. /* used in computing the new watermarks state */
  1491. struct intel_wm_config {
  1492. unsigned int num_pipes_active;
  1493. bool sprites_enabled;
  1494. bool sprites_scaled;
  1495. };
  1496. /*
  1497. * For both WM_PIPE and WM_LP.
  1498. * mem_value must be in 0.1us units.
  1499. */
  1500. static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
  1501. uint32_t mem_value,
  1502. bool is_lp)
  1503. {
  1504. uint32_t method1, method2;
  1505. if (!params->active || !params->pri.enabled)
  1506. return 0;
  1507. method1 = ilk_wm_method1(params->pixel_rate,
  1508. params->pri.bytes_per_pixel,
  1509. mem_value);
  1510. if (!is_lp)
  1511. return method1;
  1512. method2 = ilk_wm_method2(params->pixel_rate,
  1513. params->pipe_htotal,
  1514. params->pri.horiz_pixels,
  1515. params->pri.bytes_per_pixel,
  1516. mem_value);
  1517. return min(method1, method2);
  1518. }
  1519. /*
  1520. * For both WM_PIPE and WM_LP.
  1521. * mem_value must be in 0.1us units.
  1522. */
  1523. static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
  1524. uint32_t mem_value)
  1525. {
  1526. uint32_t method1, method2;
  1527. if (!params->active || !params->spr.enabled)
  1528. return 0;
  1529. method1 = ilk_wm_method1(params->pixel_rate,
  1530. params->spr.bytes_per_pixel,
  1531. mem_value);
  1532. method2 = ilk_wm_method2(params->pixel_rate,
  1533. params->pipe_htotal,
  1534. params->spr.horiz_pixels,
  1535. params->spr.bytes_per_pixel,
  1536. mem_value);
  1537. return min(method1, method2);
  1538. }
  1539. /*
  1540. * For both WM_PIPE and WM_LP.
  1541. * mem_value must be in 0.1us units.
  1542. */
  1543. static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
  1544. uint32_t mem_value)
  1545. {
  1546. if (!params->active || !params->cur.enabled)
  1547. return 0;
  1548. return ilk_wm_method2(params->pixel_rate,
  1549. params->pipe_htotal,
  1550. params->cur.horiz_pixels,
  1551. params->cur.bytes_per_pixel,
  1552. mem_value);
  1553. }
  1554. /* Only for WM_LP. */
  1555. static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
  1556. uint32_t pri_val)
  1557. {
  1558. if (!params->active || !params->pri.enabled)
  1559. return 0;
  1560. return ilk_wm_fbc(pri_val,
  1561. params->pri.horiz_pixels,
  1562. params->pri.bytes_per_pixel);
  1563. }
  1564. static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
  1565. {
  1566. if (INTEL_INFO(dev)->gen >= 8)
  1567. return 3072;
  1568. else if (INTEL_INFO(dev)->gen >= 7)
  1569. return 768;
  1570. else
  1571. return 512;
  1572. }
  1573. static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
  1574. int level, bool is_sprite)
  1575. {
  1576. if (INTEL_INFO(dev)->gen >= 8)
  1577. /* BDW primary/sprite plane watermarks */
  1578. return level == 0 ? 255 : 2047;
  1579. else if (INTEL_INFO(dev)->gen >= 7)
  1580. /* IVB/HSW primary/sprite plane watermarks */
  1581. return level == 0 ? 127 : 1023;
  1582. else if (!is_sprite)
  1583. /* ILK/SNB primary plane watermarks */
  1584. return level == 0 ? 127 : 511;
  1585. else
  1586. /* ILK/SNB sprite plane watermarks */
  1587. return level == 0 ? 63 : 255;
  1588. }
  1589. static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
  1590. int level)
  1591. {
  1592. if (INTEL_INFO(dev)->gen >= 7)
  1593. return level == 0 ? 63 : 255;
  1594. else
  1595. return level == 0 ? 31 : 63;
  1596. }
  1597. static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
  1598. {
  1599. if (INTEL_INFO(dev)->gen >= 8)
  1600. return 31;
  1601. else
  1602. return 15;
  1603. }
  1604. /* Calculate the maximum primary/sprite plane watermark */
  1605. static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
  1606. int level,
  1607. const struct intel_wm_config *config,
  1608. enum intel_ddb_partitioning ddb_partitioning,
  1609. bool is_sprite)
  1610. {
  1611. unsigned int fifo_size = ilk_display_fifo_size(dev);
  1612. /* if sprites aren't enabled, sprites get nothing */
  1613. if (is_sprite && !config->sprites_enabled)
  1614. return 0;
  1615. /* HSW allows LP1+ watermarks even with multiple pipes */
  1616. if (level == 0 || config->num_pipes_active > 1) {
  1617. fifo_size /= INTEL_INFO(dev)->num_pipes;
  1618. /*
  1619. * For some reason the non self refresh
  1620. * FIFO size is only half of the self
  1621. * refresh FIFO size on ILK/SNB.
  1622. */
  1623. if (INTEL_INFO(dev)->gen <= 6)
  1624. fifo_size /= 2;
  1625. }
  1626. if (config->sprites_enabled) {
  1627. /* level 0 is always calculated with 1:1 split */
  1628. if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
  1629. if (is_sprite)
  1630. fifo_size *= 5;
  1631. fifo_size /= 6;
  1632. } else {
  1633. fifo_size /= 2;
  1634. }
  1635. }
  1636. /* clamp to max that the registers can hold */
  1637. return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
  1638. }
  1639. /* Calculate the maximum cursor plane watermark */
  1640. static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
  1641. int level,
  1642. const struct intel_wm_config *config)
  1643. {
  1644. /* HSW LP1+ watermarks w/ multiple pipes */
  1645. if (level > 0 && config->num_pipes_active > 1)
  1646. return 64;
  1647. /* otherwise just report max that registers can hold */
  1648. return ilk_cursor_wm_reg_max(dev, level);
  1649. }
  1650. static void ilk_compute_wm_maximums(const struct drm_device *dev,
  1651. int level,
  1652. const struct intel_wm_config *config,
  1653. enum intel_ddb_partitioning ddb_partitioning,
  1654. struct ilk_wm_maximums *max)
  1655. {
  1656. max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
  1657. max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
  1658. max->cur = ilk_cursor_wm_max(dev, level, config);
  1659. max->fbc = ilk_fbc_wm_reg_max(dev);
  1660. }
  1661. static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
  1662. int level,
  1663. struct ilk_wm_maximums *max)
  1664. {
  1665. max->pri = ilk_plane_wm_reg_max(dev, level, false);
  1666. max->spr = ilk_plane_wm_reg_max(dev, level, true);
  1667. max->cur = ilk_cursor_wm_reg_max(dev, level);
  1668. max->fbc = ilk_fbc_wm_reg_max(dev);
  1669. }
  1670. static bool ilk_validate_wm_level(int level,
  1671. const struct ilk_wm_maximums *max,
  1672. struct intel_wm_level *result)
  1673. {
  1674. bool ret;
  1675. /* already determined to be invalid? */
  1676. if (!result->enable)
  1677. return false;
  1678. result->enable = result->pri_val <= max->pri &&
  1679. result->spr_val <= max->spr &&
  1680. result->cur_val <= max->cur;
  1681. ret = result->enable;
  1682. /*
  1683. * HACK until we can pre-compute everything,
  1684. * and thus fail gracefully if LP0 watermarks
  1685. * are exceeded...
  1686. */
  1687. if (level == 0 && !result->enable) {
  1688. if (result->pri_val > max->pri)
  1689. DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
  1690. level, result->pri_val, max->pri);
  1691. if (result->spr_val > max->spr)
  1692. DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
  1693. level, result->spr_val, max->spr);
  1694. if (result->cur_val > max->cur)
  1695. DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
  1696. level, result->cur_val, max->cur);
  1697. result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
  1698. result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
  1699. result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
  1700. result->enable = true;
  1701. }
  1702. return ret;
  1703. }
  1704. static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
  1705. int level,
  1706. const struct ilk_pipe_wm_parameters *p,
  1707. struct intel_wm_level *result)
  1708. {
  1709. uint16_t pri_latency = dev_priv->wm.pri_latency[level];
  1710. uint16_t spr_latency = dev_priv->wm.spr_latency[level];
  1711. uint16_t cur_latency = dev_priv->wm.cur_latency[level];
  1712. /* WM1+ latency values stored in 0.5us units */
  1713. if (level > 0) {
  1714. pri_latency *= 5;
  1715. spr_latency *= 5;
  1716. cur_latency *= 5;
  1717. }
  1718. result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
  1719. result->spr_val = ilk_compute_spr_wm(p, spr_latency);
  1720. result->cur_val = ilk_compute_cur_wm(p, cur_latency);
  1721. result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
  1722. result->enable = true;
  1723. }
  1724. static uint32_t
  1725. hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
  1726. {
  1727. struct drm_i915_private *dev_priv = dev->dev_private;
  1728. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1729. struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
  1730. u32 linetime, ips_linetime;
  1731. if (!intel_crtc_active(crtc))
  1732. return 0;
  1733. /* The WM are computed with base on how long it takes to fill a single
  1734. * row at the given clock rate, multiplied by 8.
  1735. * */
  1736. linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
  1737. mode->crtc_clock);
  1738. ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
  1739. intel_ddi_get_cdclk_freq(dev_priv));
  1740. return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
  1741. PIPE_WM_LINETIME_TIME(linetime);
  1742. }
  1743. static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
  1744. {
  1745. struct drm_i915_private *dev_priv = dev->dev_private;
  1746. if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  1747. uint64_t sskpd = I915_READ64(MCH_SSKPD);
  1748. wm[0] = (sskpd >> 56) & 0xFF;
  1749. if (wm[0] == 0)
  1750. wm[0] = sskpd & 0xF;
  1751. wm[1] = (sskpd >> 4) & 0xFF;
  1752. wm[2] = (sskpd >> 12) & 0xFF;
  1753. wm[3] = (sskpd >> 20) & 0x1FF;
  1754. wm[4] = (sskpd >> 32) & 0x1FF;
  1755. } else if (INTEL_INFO(dev)->gen >= 6) {
  1756. uint32_t sskpd = I915_READ(MCH_SSKPD);
  1757. wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
  1758. wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
  1759. wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
  1760. wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
  1761. } else if (INTEL_INFO(dev)->gen >= 5) {
  1762. uint32_t mltr = I915_READ(MLTR_ILK);
  1763. /* ILK primary LP0 latency is 700 ns */
  1764. wm[0] = 7;
  1765. wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
  1766. wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
  1767. }
  1768. }
  1769. static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
  1770. {
  1771. /* ILK sprite LP0 latency is 1300 ns */
  1772. if (INTEL_INFO(dev)->gen == 5)
  1773. wm[0] = 13;
  1774. }
  1775. static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
  1776. {
  1777. /* ILK cursor LP0 latency is 1300 ns */
  1778. if (INTEL_INFO(dev)->gen == 5)
  1779. wm[0] = 13;
  1780. /* WaDoubleCursorLP3Latency:ivb */
  1781. if (IS_IVYBRIDGE(dev))
  1782. wm[3] *= 2;
  1783. }
  1784. int ilk_wm_max_level(const struct drm_device *dev)
  1785. {
  1786. /* how many WM levels are we expecting */
  1787. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  1788. return 4;
  1789. else if (INTEL_INFO(dev)->gen >= 6)
  1790. return 3;
  1791. else
  1792. return 2;
  1793. }
  1794. static void intel_print_wm_latency(struct drm_device *dev,
  1795. const char *name,
  1796. const uint16_t wm[5])
  1797. {
  1798. int level, max_level = ilk_wm_max_level(dev);
  1799. for (level = 0; level <= max_level; level++) {
  1800. unsigned int latency = wm[level];
  1801. if (latency == 0) {
  1802. DRM_ERROR("%s WM%d latency not provided\n",
  1803. name, level);
  1804. continue;
  1805. }
  1806. /* WM1+ latency values in 0.5us units */
  1807. if (level > 0)
  1808. latency *= 5;
  1809. DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
  1810. name, level, wm[level],
  1811. latency / 10, latency % 10);
  1812. }
  1813. }
  1814. static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
  1815. uint16_t wm[5], uint16_t min)
  1816. {
  1817. int level, max_level = ilk_wm_max_level(dev_priv->dev);
  1818. if (wm[0] >= min)
  1819. return false;
  1820. wm[0] = max(wm[0], min);
  1821. for (level = 1; level <= max_level; level++)
  1822. wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
  1823. return true;
  1824. }
  1825. static void snb_wm_latency_quirk(struct drm_device *dev)
  1826. {
  1827. struct drm_i915_private *dev_priv = dev->dev_private;
  1828. bool changed;
  1829. /*
  1830. * The BIOS provided WM memory latency values are often
  1831. * inadequate for high resolution displays. Adjust them.
  1832. */
  1833. changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
  1834. ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
  1835. ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
  1836. if (!changed)
  1837. return;
  1838. DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
  1839. intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
  1840. intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
  1841. intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
  1842. }
  1843. static void ilk_setup_wm_latency(struct drm_device *dev)
  1844. {
  1845. struct drm_i915_private *dev_priv = dev->dev_private;
  1846. intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
  1847. memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
  1848. sizeof(dev_priv->wm.pri_latency));
  1849. memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
  1850. sizeof(dev_priv->wm.pri_latency));
  1851. intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
  1852. intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
  1853. intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
  1854. intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
  1855. intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
  1856. if (IS_GEN6(dev))
  1857. snb_wm_latency_quirk(dev);
  1858. }
  1859. static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
  1860. struct ilk_pipe_wm_parameters *p)
  1861. {
  1862. struct drm_device *dev = crtc->dev;
  1863. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1864. enum pipe pipe = intel_crtc->pipe;
  1865. struct drm_plane *plane;
  1866. if (!intel_crtc_active(crtc))
  1867. return;
  1868. p->active = true;
  1869. p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
  1870. p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
  1871. p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
  1872. p->cur.bytes_per_pixel = 4;
  1873. p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
  1874. p->cur.horiz_pixels = intel_crtc->cursor_width;
  1875. /* TODO: for now, assume primary and cursor planes are always enabled. */
  1876. p->pri.enabled = true;
  1877. p->cur.enabled = true;
  1878. drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
  1879. struct intel_plane *intel_plane = to_intel_plane(plane);
  1880. if (intel_plane->pipe == pipe) {
  1881. p->spr = intel_plane->wm;
  1882. break;
  1883. }
  1884. }
  1885. }
  1886. static void ilk_compute_wm_config(struct drm_device *dev,
  1887. struct intel_wm_config *config)
  1888. {
  1889. struct intel_crtc *intel_crtc;
  1890. /* Compute the currently _active_ config */
  1891. for_each_intel_crtc(dev, intel_crtc) {
  1892. const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
  1893. if (!wm->pipe_enabled)
  1894. continue;
  1895. config->sprites_enabled |= wm->sprites_enabled;
  1896. config->sprites_scaled |= wm->sprites_scaled;
  1897. config->num_pipes_active++;
  1898. }
  1899. }
  1900. /* Compute new watermarks for the pipe */
  1901. static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
  1902. const struct ilk_pipe_wm_parameters *params,
  1903. struct intel_pipe_wm *pipe_wm)
  1904. {
  1905. struct drm_device *dev = crtc->dev;
  1906. const struct drm_i915_private *dev_priv = dev->dev_private;
  1907. int level, max_level = ilk_wm_max_level(dev);
  1908. /* LP0 watermark maximums depend on this pipe alone */
  1909. struct intel_wm_config config = {
  1910. .num_pipes_active = 1,
  1911. .sprites_enabled = params->spr.enabled,
  1912. .sprites_scaled = params->spr.scaled,
  1913. };
  1914. struct ilk_wm_maximums max;
  1915. pipe_wm->pipe_enabled = params->active;
  1916. pipe_wm->sprites_enabled = params->spr.enabled;
  1917. pipe_wm->sprites_scaled = params->spr.scaled;
  1918. /* ILK/SNB: LP2+ watermarks only w/o sprites */
  1919. if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
  1920. max_level = 1;
  1921. /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
  1922. if (params->spr.scaled)
  1923. max_level = 0;
  1924. ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
  1925. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  1926. pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
  1927. /* LP0 watermarks always use 1/2 DDB partitioning */
  1928. ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
  1929. /* At least LP0 must be valid */
  1930. if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
  1931. return false;
  1932. ilk_compute_wm_reg_maximums(dev, 1, &max);
  1933. for (level = 1; level <= max_level; level++) {
  1934. struct intel_wm_level wm = {};
  1935. ilk_compute_wm_level(dev_priv, level, params, &wm);
  1936. /*
  1937. * Disable any watermark level that exceeds the
  1938. * register maximums since such watermarks are
  1939. * always invalid.
  1940. */
  1941. if (!ilk_validate_wm_level(level, &max, &wm))
  1942. break;
  1943. pipe_wm->wm[level] = wm;
  1944. }
  1945. return true;
  1946. }
  1947. /*
  1948. * Merge the watermarks from all active pipes for a specific level.
  1949. */
  1950. static void ilk_merge_wm_level(struct drm_device *dev,
  1951. int level,
  1952. struct intel_wm_level *ret_wm)
  1953. {
  1954. const struct intel_crtc *intel_crtc;
  1955. ret_wm->enable = true;
  1956. for_each_intel_crtc(dev, intel_crtc) {
  1957. const struct intel_pipe_wm *active = &intel_crtc->wm.active;
  1958. const struct intel_wm_level *wm = &active->wm[level];
  1959. if (!active->pipe_enabled)
  1960. continue;
  1961. /*
  1962. * The watermark values may have been used in the past,
  1963. * so we must maintain them in the registers for some
  1964. * time even if the level is now disabled.
  1965. */
  1966. if (!wm->enable)
  1967. ret_wm->enable = false;
  1968. ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
  1969. ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
  1970. ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
  1971. ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
  1972. }
  1973. }
  1974. /*
  1975. * Merge all low power watermarks for all active pipes.
  1976. */
  1977. static void ilk_wm_merge(struct drm_device *dev,
  1978. const struct intel_wm_config *config,
  1979. const struct ilk_wm_maximums *max,
  1980. struct intel_pipe_wm *merged)
  1981. {
  1982. int level, max_level = ilk_wm_max_level(dev);
  1983. int last_enabled_level = max_level;
  1984. /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
  1985. if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
  1986. config->num_pipes_active > 1)
  1987. return;
  1988. /* ILK: FBC WM must be disabled always */
  1989. merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
  1990. /* merge each WM1+ level */
  1991. for (level = 1; level <= max_level; level++) {
  1992. struct intel_wm_level *wm = &merged->wm[level];
  1993. ilk_merge_wm_level(dev, level, wm);
  1994. if (level > last_enabled_level)
  1995. wm->enable = false;
  1996. else if (!ilk_validate_wm_level(level, max, wm))
  1997. /* make sure all following levels get disabled */
  1998. last_enabled_level = level - 1;
  1999. /*
  2000. * The spec says it is preferred to disable
  2001. * FBC WMs instead of disabling a WM level.
  2002. */
  2003. if (wm->fbc_val > max->fbc) {
  2004. if (wm->enable)
  2005. merged->fbc_wm_enabled = false;
  2006. wm->fbc_val = 0;
  2007. }
  2008. }
  2009. /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
  2010. /*
  2011. * FIXME this is racy. FBC might get enabled later.
  2012. * What we should check here is whether FBC can be
  2013. * enabled sometime later.
  2014. */
  2015. if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
  2016. for (level = 2; level <= max_level; level++) {
  2017. struct intel_wm_level *wm = &merged->wm[level];
  2018. wm->enable = false;
  2019. }
  2020. }
  2021. }
  2022. static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
  2023. {
  2024. /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
  2025. return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
  2026. }
  2027. /* The value we need to program into the WM_LPx latency field */
  2028. static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
  2029. {
  2030. struct drm_i915_private *dev_priv = dev->dev_private;
  2031. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2032. return 2 * level;
  2033. else
  2034. return dev_priv->wm.pri_latency[level];
  2035. }
  2036. static void ilk_compute_wm_results(struct drm_device *dev,
  2037. const struct intel_pipe_wm *merged,
  2038. enum intel_ddb_partitioning partitioning,
  2039. struct ilk_wm_values *results)
  2040. {
  2041. struct intel_crtc *intel_crtc;
  2042. int level, wm_lp;
  2043. results->enable_fbc_wm = merged->fbc_wm_enabled;
  2044. results->partitioning = partitioning;
  2045. /* LP1+ register values */
  2046. for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
  2047. const struct intel_wm_level *r;
  2048. level = ilk_wm_lp_to_level(wm_lp, merged);
  2049. r = &merged->wm[level];
  2050. /*
  2051. * Maintain the watermark values even if the level is
  2052. * disabled. Doing otherwise could cause underruns.
  2053. */
  2054. results->wm_lp[wm_lp - 1] =
  2055. (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
  2056. (r->pri_val << WM1_LP_SR_SHIFT) |
  2057. r->cur_val;
  2058. if (r->enable)
  2059. results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
  2060. if (INTEL_INFO(dev)->gen >= 8)
  2061. results->wm_lp[wm_lp - 1] |=
  2062. r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
  2063. else
  2064. results->wm_lp[wm_lp - 1] |=
  2065. r->fbc_val << WM1_LP_FBC_SHIFT;
  2066. /*
  2067. * Always set WM1S_LP_EN when spr_val != 0, even if the
  2068. * level is disabled. Doing otherwise could cause underruns.
  2069. */
  2070. if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
  2071. WARN_ON(wm_lp != 1);
  2072. results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
  2073. } else
  2074. results->wm_lp_spr[wm_lp - 1] = r->spr_val;
  2075. }
  2076. /* LP0 register values */
  2077. for_each_intel_crtc(dev, intel_crtc) {
  2078. enum pipe pipe = intel_crtc->pipe;
  2079. const struct intel_wm_level *r =
  2080. &intel_crtc->wm.active.wm[0];
  2081. if (WARN_ON(!r->enable))
  2082. continue;
  2083. results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
  2084. results->wm_pipe[pipe] =
  2085. (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
  2086. (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
  2087. r->cur_val;
  2088. }
  2089. }
  2090. /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  2091. * case both are at the same level. Prefer r1 in case they're the same. */
  2092. static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
  2093. struct intel_pipe_wm *r1,
  2094. struct intel_pipe_wm *r2)
  2095. {
  2096. int level, max_level = ilk_wm_max_level(dev);
  2097. int level1 = 0, level2 = 0;
  2098. for (level = 1; level <= max_level; level++) {
  2099. if (r1->wm[level].enable)
  2100. level1 = level;
  2101. if (r2->wm[level].enable)
  2102. level2 = level;
  2103. }
  2104. if (level1 == level2) {
  2105. if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
  2106. return r2;
  2107. else
  2108. return r1;
  2109. } else if (level1 > level2) {
  2110. return r1;
  2111. } else {
  2112. return r2;
  2113. }
  2114. }
  2115. /* dirty bits used to track which watermarks need changes */
  2116. #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
  2117. #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
  2118. #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
  2119. #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
  2120. #define WM_DIRTY_FBC (1 << 24)
  2121. #define WM_DIRTY_DDB (1 << 25)
  2122. static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
  2123. const struct ilk_wm_values *old,
  2124. const struct ilk_wm_values *new)
  2125. {
  2126. unsigned int dirty = 0;
  2127. enum pipe pipe;
  2128. int wm_lp;
  2129. for_each_pipe(pipe) {
  2130. if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
  2131. dirty |= WM_DIRTY_LINETIME(pipe);
  2132. /* Must disable LP1+ watermarks too */
  2133. dirty |= WM_DIRTY_LP_ALL;
  2134. }
  2135. if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
  2136. dirty |= WM_DIRTY_PIPE(pipe);
  2137. /* Must disable LP1+ watermarks too */
  2138. dirty |= WM_DIRTY_LP_ALL;
  2139. }
  2140. }
  2141. if (old->enable_fbc_wm != new->enable_fbc_wm) {
  2142. dirty |= WM_DIRTY_FBC;
  2143. /* Must disable LP1+ watermarks too */
  2144. dirty |= WM_DIRTY_LP_ALL;
  2145. }
  2146. if (old->partitioning != new->partitioning) {
  2147. dirty |= WM_DIRTY_DDB;
  2148. /* Must disable LP1+ watermarks too */
  2149. dirty |= WM_DIRTY_LP_ALL;
  2150. }
  2151. /* LP1+ watermarks already deemed dirty, no need to continue */
  2152. if (dirty & WM_DIRTY_LP_ALL)
  2153. return dirty;
  2154. /* Find the lowest numbered LP1+ watermark in need of an update... */
  2155. for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
  2156. if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
  2157. old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
  2158. break;
  2159. }
  2160. /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
  2161. for (; wm_lp <= 3; wm_lp++)
  2162. dirty |= WM_DIRTY_LP(wm_lp);
  2163. return dirty;
  2164. }
  2165. static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
  2166. unsigned int dirty)
  2167. {
  2168. struct ilk_wm_values *previous = &dev_priv->wm.hw;
  2169. bool changed = false;
  2170. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
  2171. previous->wm_lp[2] &= ~WM1_LP_SR_EN;
  2172. I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
  2173. changed = true;
  2174. }
  2175. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
  2176. previous->wm_lp[1] &= ~WM1_LP_SR_EN;
  2177. I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
  2178. changed = true;
  2179. }
  2180. if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
  2181. previous->wm_lp[0] &= ~WM1_LP_SR_EN;
  2182. I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
  2183. changed = true;
  2184. }
  2185. /*
  2186. * Don't touch WM1S_LP_EN here.
  2187. * Doing so could cause underruns.
  2188. */
  2189. return changed;
  2190. }
  2191. /*
  2192. * The spec says we shouldn't write when we don't need, because every write
  2193. * causes WMs to be re-evaluated, expending some power.
  2194. */
  2195. static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
  2196. struct ilk_wm_values *results)
  2197. {
  2198. struct drm_device *dev = dev_priv->dev;
  2199. struct ilk_wm_values *previous = &dev_priv->wm.hw;
  2200. unsigned int dirty;
  2201. uint32_t val;
  2202. dirty = ilk_compute_wm_dirty(dev, previous, results);
  2203. if (!dirty)
  2204. return;
  2205. _ilk_disable_lp_wm(dev_priv, dirty);
  2206. if (dirty & WM_DIRTY_PIPE(PIPE_A))
  2207. I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
  2208. if (dirty & WM_DIRTY_PIPE(PIPE_B))
  2209. I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
  2210. if (dirty & WM_DIRTY_PIPE(PIPE_C))
  2211. I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
  2212. if (dirty & WM_DIRTY_LINETIME(PIPE_A))
  2213. I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
  2214. if (dirty & WM_DIRTY_LINETIME(PIPE_B))
  2215. I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
  2216. if (dirty & WM_DIRTY_LINETIME(PIPE_C))
  2217. I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
  2218. if (dirty & WM_DIRTY_DDB) {
  2219. if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  2220. val = I915_READ(WM_MISC);
  2221. if (results->partitioning == INTEL_DDB_PART_1_2)
  2222. val &= ~WM_MISC_DATA_PARTITION_5_6;
  2223. else
  2224. val |= WM_MISC_DATA_PARTITION_5_6;
  2225. I915_WRITE(WM_MISC, val);
  2226. } else {
  2227. val = I915_READ(DISP_ARB_CTL2);
  2228. if (results->partitioning == INTEL_DDB_PART_1_2)
  2229. val &= ~DISP_DATA_PARTITION_5_6;
  2230. else
  2231. val |= DISP_DATA_PARTITION_5_6;
  2232. I915_WRITE(DISP_ARB_CTL2, val);
  2233. }
  2234. }
  2235. if (dirty & WM_DIRTY_FBC) {
  2236. val = I915_READ(DISP_ARB_CTL);
  2237. if (results->enable_fbc_wm)
  2238. val &= ~DISP_FBC_WM_DIS;
  2239. else
  2240. val |= DISP_FBC_WM_DIS;
  2241. I915_WRITE(DISP_ARB_CTL, val);
  2242. }
  2243. if (dirty & WM_DIRTY_LP(1) &&
  2244. previous->wm_lp_spr[0] != results->wm_lp_spr[0])
  2245. I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
  2246. if (INTEL_INFO(dev)->gen >= 7) {
  2247. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
  2248. I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
  2249. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
  2250. I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
  2251. }
  2252. if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
  2253. I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
  2254. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
  2255. I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
  2256. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
  2257. I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
  2258. dev_priv->wm.hw = *results;
  2259. }
  2260. static bool ilk_disable_lp_wm(struct drm_device *dev)
  2261. {
  2262. struct drm_i915_private *dev_priv = dev->dev_private;
  2263. return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
  2264. }
  2265. static void ilk_update_wm(struct drm_crtc *crtc)
  2266. {
  2267. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2268. struct drm_device *dev = crtc->dev;
  2269. struct drm_i915_private *dev_priv = dev->dev_private;
  2270. struct ilk_wm_maximums max;
  2271. struct ilk_pipe_wm_parameters params = {};
  2272. struct ilk_wm_values results = {};
  2273. enum intel_ddb_partitioning partitioning;
  2274. struct intel_pipe_wm pipe_wm = {};
  2275. struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
  2276. struct intel_wm_config config = {};
  2277. ilk_compute_wm_parameters(crtc, &params);
  2278. intel_compute_pipe_wm(crtc, &params, &pipe_wm);
  2279. if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
  2280. return;
  2281. intel_crtc->wm.active = pipe_wm;
  2282. ilk_compute_wm_config(dev, &config);
  2283. ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
  2284. ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
  2285. /* 5/6 split only in single pipe config on IVB+ */
  2286. if (INTEL_INFO(dev)->gen >= 7 &&
  2287. config.num_pipes_active == 1 && config.sprites_enabled) {
  2288. ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
  2289. ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
  2290. best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
  2291. } else {
  2292. best_lp_wm = &lp_wm_1_2;
  2293. }
  2294. partitioning = (best_lp_wm == &lp_wm_1_2) ?
  2295. INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
  2296. ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
  2297. ilk_write_wm_values(dev_priv, &results);
  2298. }
  2299. static void ilk_update_sprite_wm(struct drm_plane *plane,
  2300. struct drm_crtc *crtc,
  2301. uint32_t sprite_width, int pixel_size,
  2302. bool enabled, bool scaled)
  2303. {
  2304. struct drm_device *dev = plane->dev;
  2305. struct intel_plane *intel_plane = to_intel_plane(plane);
  2306. intel_plane->wm.enabled = enabled;
  2307. intel_plane->wm.scaled = scaled;
  2308. intel_plane->wm.horiz_pixels = sprite_width;
  2309. intel_plane->wm.bytes_per_pixel = pixel_size;
  2310. /*
  2311. * IVB workaround: must disable low power watermarks for at least
  2312. * one frame before enabling scaling. LP watermarks can be re-enabled
  2313. * when scaling is disabled.
  2314. *
  2315. * WaCxSRDisabledForSpriteScaling:ivb
  2316. */
  2317. if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
  2318. intel_wait_for_vblank(dev, intel_plane->pipe);
  2319. ilk_update_wm(crtc);
  2320. }
  2321. static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
  2322. {
  2323. struct drm_device *dev = crtc->dev;
  2324. struct drm_i915_private *dev_priv = dev->dev_private;
  2325. struct ilk_wm_values *hw = &dev_priv->wm.hw;
  2326. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2327. struct intel_pipe_wm *active = &intel_crtc->wm.active;
  2328. enum pipe pipe = intel_crtc->pipe;
  2329. static const unsigned int wm0_pipe_reg[] = {
  2330. [PIPE_A] = WM0_PIPEA_ILK,
  2331. [PIPE_B] = WM0_PIPEB_ILK,
  2332. [PIPE_C] = WM0_PIPEC_IVB,
  2333. };
  2334. hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
  2335. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2336. hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
  2337. active->pipe_enabled = intel_crtc_active(crtc);
  2338. if (active->pipe_enabled) {
  2339. u32 tmp = hw->wm_pipe[pipe];
  2340. /*
  2341. * For active pipes LP0 watermark is marked as
  2342. * enabled, and LP1+ watermaks as disabled since
  2343. * we can't really reverse compute them in case
  2344. * multiple pipes are active.
  2345. */
  2346. active->wm[0].enable = true;
  2347. active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
  2348. active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
  2349. active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
  2350. active->linetime = hw->wm_linetime[pipe];
  2351. } else {
  2352. int level, max_level = ilk_wm_max_level(dev);
  2353. /*
  2354. * For inactive pipes, all watermark levels
  2355. * should be marked as enabled but zeroed,
  2356. * which is what we'd compute them to.
  2357. */
  2358. for (level = 0; level <= max_level; level++)
  2359. active->wm[level].enable = true;
  2360. }
  2361. }
  2362. void ilk_wm_get_hw_state(struct drm_device *dev)
  2363. {
  2364. struct drm_i915_private *dev_priv = dev->dev_private;
  2365. struct ilk_wm_values *hw = &dev_priv->wm.hw;
  2366. struct drm_crtc *crtc;
  2367. for_each_crtc(dev, crtc)
  2368. ilk_pipe_wm_get_hw_state(crtc);
  2369. hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
  2370. hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
  2371. hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
  2372. hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
  2373. if (INTEL_INFO(dev)->gen >= 7) {
  2374. hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
  2375. hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
  2376. }
  2377. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2378. hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
  2379. INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
  2380. else if (IS_IVYBRIDGE(dev))
  2381. hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
  2382. INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
  2383. hw->enable_fbc_wm =
  2384. !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
  2385. }
  2386. /**
  2387. * intel_update_watermarks - update FIFO watermark values based on current modes
  2388. *
  2389. * Calculate watermark values for the various WM regs based on current mode
  2390. * and plane configuration.
  2391. *
  2392. * There are several cases to deal with here:
  2393. * - normal (i.e. non-self-refresh)
  2394. * - self-refresh (SR) mode
  2395. * - lines are large relative to FIFO size (buffer can hold up to 2)
  2396. * - lines are small relative to FIFO size (buffer can hold more than 2
  2397. * lines), so need to account for TLB latency
  2398. *
  2399. * The normal calculation is:
  2400. * watermark = dotclock * bytes per pixel * latency
  2401. * where latency is platform & configuration dependent (we assume pessimal
  2402. * values here).
  2403. *
  2404. * The SR calculation is:
  2405. * watermark = (trunc(latency/line time)+1) * surface width *
  2406. * bytes per pixel
  2407. * where
  2408. * line time = htotal / dotclock
  2409. * surface width = hdisplay for normal plane and 64 for cursor
  2410. * and latency is assumed to be high, as above.
  2411. *
  2412. * The final value programmed to the register should always be rounded up,
  2413. * and include an extra 2 entries to account for clock crossings.
  2414. *
  2415. * We don't use the sprite, so we can ignore that. And on Crestline we have
  2416. * to set the non-SR watermarks to 8.
  2417. */
  2418. void intel_update_watermarks(struct drm_crtc *crtc)
  2419. {
  2420. struct drm_i915_private *dev_priv = crtc->dev->dev_private;
  2421. if (dev_priv->display.update_wm)
  2422. dev_priv->display.update_wm(crtc);
  2423. }
  2424. void intel_update_sprite_watermarks(struct drm_plane *plane,
  2425. struct drm_crtc *crtc,
  2426. uint32_t sprite_width, int pixel_size,
  2427. bool enabled, bool scaled)
  2428. {
  2429. struct drm_i915_private *dev_priv = plane->dev->dev_private;
  2430. if (dev_priv->display.update_sprite_wm)
  2431. dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
  2432. pixel_size, enabled, scaled);
  2433. }
  2434. static struct drm_i915_gem_object *
  2435. intel_alloc_context_page(struct drm_device *dev)
  2436. {
  2437. struct drm_i915_gem_object *ctx;
  2438. int ret;
  2439. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  2440. ctx = i915_gem_alloc_object(dev, 4096);
  2441. if (!ctx) {
  2442. DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
  2443. return NULL;
  2444. }
  2445. ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
  2446. if (ret) {
  2447. DRM_ERROR("failed to pin power context: %d\n", ret);
  2448. goto err_unref;
  2449. }
  2450. ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
  2451. if (ret) {
  2452. DRM_ERROR("failed to set-domain on power context: %d\n", ret);
  2453. goto err_unpin;
  2454. }
  2455. return ctx;
  2456. err_unpin:
  2457. i915_gem_object_ggtt_unpin(ctx);
  2458. err_unref:
  2459. drm_gem_object_unreference(&ctx->base);
  2460. return NULL;
  2461. }
  2462. /**
  2463. * Lock protecting IPS related data structures
  2464. */
  2465. DEFINE_SPINLOCK(mchdev_lock);
  2466. /* Global for IPS driver to get at the current i915 device. Protected by
  2467. * mchdev_lock. */
  2468. static struct drm_i915_private *i915_mch_dev;
  2469. bool ironlake_set_drps(struct drm_device *dev, u8 val)
  2470. {
  2471. struct drm_i915_private *dev_priv = dev->dev_private;
  2472. u16 rgvswctl;
  2473. assert_spin_locked(&mchdev_lock);
  2474. rgvswctl = I915_READ16(MEMSWCTL);
  2475. if (rgvswctl & MEMCTL_CMD_STS) {
  2476. DRM_DEBUG("gpu busy, RCS change rejected\n");
  2477. return false; /* still busy with another command */
  2478. }
  2479. rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
  2480. (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
  2481. I915_WRITE16(MEMSWCTL, rgvswctl);
  2482. POSTING_READ16(MEMSWCTL);
  2483. rgvswctl |= MEMCTL_CMD_STS;
  2484. I915_WRITE16(MEMSWCTL, rgvswctl);
  2485. return true;
  2486. }
  2487. static void ironlake_enable_drps(struct drm_device *dev)
  2488. {
  2489. struct drm_i915_private *dev_priv = dev->dev_private;
  2490. u32 rgvmodectl = I915_READ(MEMMODECTL);
  2491. u8 fmax, fmin, fstart, vstart;
  2492. spin_lock_irq(&mchdev_lock);
  2493. /* Enable temp reporting */
  2494. I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
  2495. I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
  2496. /* 100ms RC evaluation intervals */
  2497. I915_WRITE(RCUPEI, 100000);
  2498. I915_WRITE(RCDNEI, 100000);
  2499. /* Set max/min thresholds to 90ms and 80ms respectively */
  2500. I915_WRITE(RCBMAXAVG, 90000);
  2501. I915_WRITE(RCBMINAVG, 80000);
  2502. I915_WRITE(MEMIHYST, 1);
  2503. /* Set up min, max, and cur for interrupt handling */
  2504. fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
  2505. fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
  2506. fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
  2507. MEMMODE_FSTART_SHIFT;
  2508. vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
  2509. PXVFREQ_PX_SHIFT;
  2510. dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
  2511. dev_priv->ips.fstart = fstart;
  2512. dev_priv->ips.max_delay = fstart;
  2513. dev_priv->ips.min_delay = fmin;
  2514. dev_priv->ips.cur_delay = fstart;
  2515. DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
  2516. fmax, fmin, fstart);
  2517. I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
  2518. /*
  2519. * Interrupts will be enabled in ironlake_irq_postinstall
  2520. */
  2521. I915_WRITE(VIDSTART, vstart);
  2522. POSTING_READ(VIDSTART);
  2523. rgvmodectl |= MEMMODE_SWMODE_EN;
  2524. I915_WRITE(MEMMODECTL, rgvmodectl);
  2525. if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
  2526. DRM_ERROR("stuck trying to change perf mode\n");
  2527. mdelay(1);
  2528. ironlake_set_drps(dev, fstart);
  2529. dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
  2530. I915_READ(0x112e0);
  2531. dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
  2532. dev_priv->ips.last_count2 = I915_READ(0x112f4);
  2533. getrawmonotonic(&dev_priv->ips.last_time2);
  2534. spin_unlock_irq(&mchdev_lock);
  2535. }
  2536. static void ironlake_disable_drps(struct drm_device *dev)
  2537. {
  2538. struct drm_i915_private *dev_priv = dev->dev_private;
  2539. u16 rgvswctl;
  2540. spin_lock_irq(&mchdev_lock);
  2541. rgvswctl = I915_READ16(MEMSWCTL);
  2542. /* Ack interrupts, disable EFC interrupt */
  2543. I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
  2544. I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
  2545. I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
  2546. I915_WRITE(DEIIR, DE_PCU_EVENT);
  2547. I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
  2548. /* Go back to the starting frequency */
  2549. ironlake_set_drps(dev, dev_priv->ips.fstart);
  2550. mdelay(1);
  2551. rgvswctl |= MEMCTL_CMD_STS;
  2552. I915_WRITE(MEMSWCTL, rgvswctl);
  2553. mdelay(1);
  2554. spin_unlock_irq(&mchdev_lock);
  2555. }
  2556. /* There's a funny hw issue where the hw returns all 0 when reading from
  2557. * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
  2558. * ourselves, instead of doing a rmw cycle (which might result in us clearing
  2559. * all limits and the gpu stuck at whatever frequency it is at atm).
  2560. */
  2561. static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
  2562. {
  2563. u32 limits;
  2564. /* Only set the down limit when we've reached the lowest level to avoid
  2565. * getting more interrupts, otherwise leave this clear. This prevents a
  2566. * race in the hw when coming out of rc6: There's a tiny window where
  2567. * the hw runs at the minimal clock before selecting the desired
  2568. * frequency, if the down threshold expires in that window we will not
  2569. * receive a down interrupt. */
  2570. limits = dev_priv->rps.max_freq_softlimit << 24;
  2571. if (val <= dev_priv->rps.min_freq_softlimit)
  2572. limits |= dev_priv->rps.min_freq_softlimit << 16;
  2573. return limits;
  2574. }
  2575. static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
  2576. {
  2577. int new_power;
  2578. new_power = dev_priv->rps.power;
  2579. switch (dev_priv->rps.power) {
  2580. case LOW_POWER:
  2581. if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
  2582. new_power = BETWEEN;
  2583. break;
  2584. case BETWEEN:
  2585. if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
  2586. new_power = LOW_POWER;
  2587. else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
  2588. new_power = HIGH_POWER;
  2589. break;
  2590. case HIGH_POWER:
  2591. if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
  2592. new_power = BETWEEN;
  2593. break;
  2594. }
  2595. /* Max/min bins are special */
  2596. if (val == dev_priv->rps.min_freq_softlimit)
  2597. new_power = LOW_POWER;
  2598. if (val == dev_priv->rps.max_freq_softlimit)
  2599. new_power = HIGH_POWER;
  2600. if (new_power == dev_priv->rps.power)
  2601. return;
  2602. /* Note the units here are not exactly 1us, but 1280ns. */
  2603. switch (new_power) {
  2604. case LOW_POWER:
  2605. /* Upclock if more than 95% busy over 16ms */
  2606. I915_WRITE(GEN6_RP_UP_EI, 12500);
  2607. I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
  2608. /* Downclock if less than 85% busy over 32ms */
  2609. I915_WRITE(GEN6_RP_DOWN_EI, 25000);
  2610. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
  2611. I915_WRITE(GEN6_RP_CONTROL,
  2612. GEN6_RP_MEDIA_TURBO |
  2613. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  2614. GEN6_RP_MEDIA_IS_GFX |
  2615. GEN6_RP_ENABLE |
  2616. GEN6_RP_UP_BUSY_AVG |
  2617. GEN6_RP_DOWN_IDLE_AVG);
  2618. break;
  2619. case BETWEEN:
  2620. /* Upclock if more than 90% busy over 13ms */
  2621. I915_WRITE(GEN6_RP_UP_EI, 10250);
  2622. I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
  2623. /* Downclock if less than 75% busy over 32ms */
  2624. I915_WRITE(GEN6_RP_DOWN_EI, 25000);
  2625. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
  2626. I915_WRITE(GEN6_RP_CONTROL,
  2627. GEN6_RP_MEDIA_TURBO |
  2628. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  2629. GEN6_RP_MEDIA_IS_GFX |
  2630. GEN6_RP_ENABLE |
  2631. GEN6_RP_UP_BUSY_AVG |
  2632. GEN6_RP_DOWN_IDLE_AVG);
  2633. break;
  2634. case HIGH_POWER:
  2635. /* Upclock if more than 85% busy over 10ms */
  2636. I915_WRITE(GEN6_RP_UP_EI, 8000);
  2637. I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
  2638. /* Downclock if less than 60% busy over 32ms */
  2639. I915_WRITE(GEN6_RP_DOWN_EI, 25000);
  2640. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
  2641. I915_WRITE(GEN6_RP_CONTROL,
  2642. GEN6_RP_MEDIA_TURBO |
  2643. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  2644. GEN6_RP_MEDIA_IS_GFX |
  2645. GEN6_RP_ENABLE |
  2646. GEN6_RP_UP_BUSY_AVG |
  2647. GEN6_RP_DOWN_IDLE_AVG);
  2648. break;
  2649. }
  2650. dev_priv->rps.power = new_power;
  2651. dev_priv->rps.last_adj = 0;
  2652. }
  2653. static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
  2654. {
  2655. u32 mask = 0;
  2656. if (val > dev_priv->rps.min_freq_softlimit)
  2657. mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
  2658. if (val < dev_priv->rps.max_freq_softlimit)
  2659. mask |= GEN6_PM_RP_UP_THRESHOLD;
  2660. /* IVB and SNB hard hangs on looping batchbuffer
  2661. * if GEN6_PM_UP_EI_EXPIRED is masked.
  2662. */
  2663. if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
  2664. mask |= GEN6_PM_RP_UP_EI_EXPIRED;
  2665. if (IS_GEN8(dev_priv->dev))
  2666. mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
  2667. return ~mask;
  2668. }
  2669. /* gen6_set_rps is called to update the frequency request, but should also be
  2670. * called when the range (min_delay and max_delay) is modified so that we can
  2671. * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
  2672. void gen6_set_rps(struct drm_device *dev, u8 val)
  2673. {
  2674. struct drm_i915_private *dev_priv = dev->dev_private;
  2675. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  2676. WARN_ON(val > dev_priv->rps.max_freq_softlimit);
  2677. WARN_ON(val < dev_priv->rps.min_freq_softlimit);
  2678. /* min/max delay may still have been modified so be sure to
  2679. * write the limits value.
  2680. */
  2681. if (val != dev_priv->rps.cur_freq) {
  2682. gen6_set_rps_thresholds(dev_priv, val);
  2683. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2684. I915_WRITE(GEN6_RPNSWREQ,
  2685. HSW_FREQUENCY(val));
  2686. else
  2687. I915_WRITE(GEN6_RPNSWREQ,
  2688. GEN6_FREQUENCY(val) |
  2689. GEN6_OFFSET(0) |
  2690. GEN6_AGGRESSIVE_TURBO);
  2691. }
  2692. /* Make sure we continue to get interrupts
  2693. * until we hit the minimum or maximum frequencies.
  2694. */
  2695. I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
  2696. I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
  2697. POSTING_READ(GEN6_RPNSWREQ);
  2698. dev_priv->rps.cur_freq = val;
  2699. trace_intel_gpu_freq_change(val * 50);
  2700. }
  2701. /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
  2702. *
  2703. * * If Gfx is Idle, then
  2704. * 1. Mask Turbo interrupts
  2705. * 2. Bring up Gfx clock
  2706. * 3. Change the freq to Rpn and wait till P-Unit updates freq
  2707. * 4. Clear the Force GFX CLK ON bit so that Gfx can down
  2708. * 5. Unmask Turbo interrupts
  2709. */
  2710. static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
  2711. {
  2712. struct drm_device *dev = dev_priv->dev;
  2713. /* Latest VLV doesn't need to force the gfx clock */
  2714. if (dev->pdev->revision >= 0xd) {
  2715. valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
  2716. return;
  2717. }
  2718. /*
  2719. * When we are idle. Drop to min voltage state.
  2720. */
  2721. if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
  2722. return;
  2723. /* Mask turbo interrupt so that they will not come in between */
  2724. I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
  2725. vlv_force_gfx_clock(dev_priv, true);
  2726. dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
  2727. vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
  2728. dev_priv->rps.min_freq_softlimit);
  2729. if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
  2730. & GENFREQSTATUS) == 0, 5))
  2731. DRM_ERROR("timed out waiting for Punit\n");
  2732. vlv_force_gfx_clock(dev_priv, false);
  2733. I915_WRITE(GEN6_PMINTRMSK,
  2734. gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
  2735. }
  2736. void gen6_rps_idle(struct drm_i915_private *dev_priv)
  2737. {
  2738. struct drm_device *dev = dev_priv->dev;
  2739. mutex_lock(&dev_priv->rps.hw_lock);
  2740. if (dev_priv->rps.enabled) {
  2741. if (IS_VALLEYVIEW(dev))
  2742. vlv_set_rps_idle(dev_priv);
  2743. else
  2744. gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
  2745. dev_priv->rps.last_adj = 0;
  2746. }
  2747. mutex_unlock(&dev_priv->rps.hw_lock);
  2748. }
  2749. void gen6_rps_boost(struct drm_i915_private *dev_priv)
  2750. {
  2751. struct drm_device *dev = dev_priv->dev;
  2752. mutex_lock(&dev_priv->rps.hw_lock);
  2753. if (dev_priv->rps.enabled) {
  2754. if (IS_VALLEYVIEW(dev))
  2755. valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
  2756. else
  2757. gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
  2758. dev_priv->rps.last_adj = 0;
  2759. }
  2760. mutex_unlock(&dev_priv->rps.hw_lock);
  2761. }
  2762. void valleyview_set_rps(struct drm_device *dev, u8 val)
  2763. {
  2764. struct drm_i915_private *dev_priv = dev->dev_private;
  2765. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  2766. WARN_ON(val > dev_priv->rps.max_freq_softlimit);
  2767. WARN_ON(val < dev_priv->rps.min_freq_softlimit);
  2768. DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
  2769. vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
  2770. dev_priv->rps.cur_freq,
  2771. vlv_gpu_freq(dev_priv, val), val);
  2772. if (val != dev_priv->rps.cur_freq)
  2773. vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
  2774. I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
  2775. dev_priv->rps.cur_freq = val;
  2776. trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
  2777. }
  2778. static void gen8_disable_rps_interrupts(struct drm_device *dev)
  2779. {
  2780. struct drm_i915_private *dev_priv = dev->dev_private;
  2781. I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
  2782. I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
  2783. ~dev_priv->pm_rps_events);
  2784. /* Complete PM interrupt masking here doesn't race with the rps work
  2785. * item again unmasking PM interrupts because that is using a different
  2786. * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
  2787. * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
  2788. * gen8_enable_rps will clean up. */
  2789. spin_lock_irq(&dev_priv->irq_lock);
  2790. dev_priv->rps.pm_iir = 0;
  2791. spin_unlock_irq(&dev_priv->irq_lock);
  2792. I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
  2793. }
  2794. static void gen6_disable_rps_interrupts(struct drm_device *dev)
  2795. {
  2796. struct drm_i915_private *dev_priv = dev->dev_private;
  2797. I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
  2798. I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
  2799. ~dev_priv->pm_rps_events);
  2800. /* Complete PM interrupt masking here doesn't race with the rps work
  2801. * item again unmasking PM interrupts because that is using a different
  2802. * register (PMIMR) to mask PM interrupts. The only risk is in leaving
  2803. * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
  2804. spin_lock_irq(&dev_priv->irq_lock);
  2805. dev_priv->rps.pm_iir = 0;
  2806. spin_unlock_irq(&dev_priv->irq_lock);
  2807. I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
  2808. }
  2809. static void gen6_disable_rps(struct drm_device *dev)
  2810. {
  2811. struct drm_i915_private *dev_priv = dev->dev_private;
  2812. I915_WRITE(GEN6_RC_CONTROL, 0);
  2813. I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
  2814. if (IS_BROADWELL(dev))
  2815. gen8_disable_rps_interrupts(dev);
  2816. else
  2817. gen6_disable_rps_interrupts(dev);
  2818. }
  2819. static void valleyview_disable_rps(struct drm_device *dev)
  2820. {
  2821. struct drm_i915_private *dev_priv = dev->dev_private;
  2822. I915_WRITE(GEN6_RC_CONTROL, 0);
  2823. gen6_disable_rps_interrupts(dev);
  2824. }
  2825. static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
  2826. {
  2827. if (IS_VALLEYVIEW(dev)) {
  2828. if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
  2829. mode = GEN6_RC_CTL_RC6_ENABLE;
  2830. else
  2831. mode = 0;
  2832. }
  2833. DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
  2834. (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
  2835. (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
  2836. (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
  2837. }
  2838. static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
  2839. {
  2840. /* No RC6 before Ironlake */
  2841. if (INTEL_INFO(dev)->gen < 5)
  2842. return 0;
  2843. /* RC6 is only on Ironlake mobile not on desktop */
  2844. if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
  2845. return 0;
  2846. /* Respect the kernel parameter if it is set */
  2847. if (enable_rc6 >= 0) {
  2848. int mask;
  2849. if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
  2850. mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
  2851. INTEL_RC6pp_ENABLE;
  2852. else
  2853. mask = INTEL_RC6_ENABLE;
  2854. if ((enable_rc6 & mask) != enable_rc6)
  2855. DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
  2856. enable_rc6 & mask, enable_rc6, mask);
  2857. return enable_rc6 & mask;
  2858. }
  2859. /* Disable RC6 on Ironlake */
  2860. if (INTEL_INFO(dev)->gen == 5)
  2861. return 0;
  2862. if (IS_IVYBRIDGE(dev))
  2863. return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
  2864. return INTEL_RC6_ENABLE;
  2865. }
  2866. int intel_enable_rc6(const struct drm_device *dev)
  2867. {
  2868. return i915.enable_rc6;
  2869. }
  2870. static void gen8_enable_rps_interrupts(struct drm_device *dev)
  2871. {
  2872. struct drm_i915_private *dev_priv = dev->dev_private;
  2873. spin_lock_irq(&dev_priv->irq_lock);
  2874. WARN_ON(dev_priv->rps.pm_iir);
  2875. bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  2876. I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
  2877. spin_unlock_irq(&dev_priv->irq_lock);
  2878. }
  2879. static void gen6_enable_rps_interrupts(struct drm_device *dev)
  2880. {
  2881. struct drm_i915_private *dev_priv = dev->dev_private;
  2882. spin_lock_irq(&dev_priv->irq_lock);
  2883. WARN_ON(dev_priv->rps.pm_iir);
  2884. snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  2885. I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
  2886. spin_unlock_irq(&dev_priv->irq_lock);
  2887. }
  2888. static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
  2889. {
  2890. /* All of these values are in units of 50MHz */
  2891. dev_priv->rps.cur_freq = 0;
  2892. /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
  2893. dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
  2894. dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
  2895. dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
  2896. /* XXX: only BYT has a special efficient freq */
  2897. dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
  2898. /* hw_max = RP0 until we check for overclocking */
  2899. dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
  2900. /* Preserve min/max settings in case of re-init */
  2901. if (dev_priv->rps.max_freq_softlimit == 0)
  2902. dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
  2903. if (dev_priv->rps.min_freq_softlimit == 0)
  2904. dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
  2905. }
  2906. static void gen8_enable_rps(struct drm_device *dev)
  2907. {
  2908. struct drm_i915_private *dev_priv = dev->dev_private;
  2909. struct intel_engine_cs *ring;
  2910. uint32_t rc6_mask = 0, rp_state_cap;
  2911. int unused;
  2912. /* 1a: Software RC state - RC0 */
  2913. I915_WRITE(GEN6_RC_STATE, 0);
  2914. /* 1c & 1d: Get forcewake during program sequence. Although the driver
  2915. * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
  2916. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  2917. /* 2a: Disable RC states. */
  2918. I915_WRITE(GEN6_RC_CONTROL, 0);
  2919. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  2920. parse_rp_state_cap(dev_priv, rp_state_cap);
  2921. /* 2b: Program RC6 thresholds.*/
  2922. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
  2923. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
  2924. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
  2925. for_each_ring(ring, dev_priv, unused)
  2926. I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  2927. I915_WRITE(GEN6_RC_SLEEP, 0);
  2928. I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
  2929. /* 3: Enable RC6 */
  2930. if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
  2931. rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
  2932. intel_print_rc6_info(dev, rc6_mask);
  2933. I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
  2934. GEN6_RC_CTL_EI_MODE(1) |
  2935. rc6_mask);
  2936. /* 4 Program defaults and thresholds for RPS*/
  2937. I915_WRITE(GEN6_RPNSWREQ,
  2938. HSW_FREQUENCY(dev_priv->rps.rp1_freq));
  2939. I915_WRITE(GEN6_RC_VIDEO_FREQ,
  2940. HSW_FREQUENCY(dev_priv->rps.rp1_freq));
  2941. /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
  2942. I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
  2943. /* Docs recommend 900MHz, and 300 MHz respectively */
  2944. I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
  2945. dev_priv->rps.max_freq_softlimit << 24 |
  2946. dev_priv->rps.min_freq_softlimit << 16);
  2947. I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
  2948. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
  2949. I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
  2950. I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
  2951. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  2952. /* 5: Enable RPS */
  2953. I915_WRITE(GEN6_RP_CONTROL,
  2954. GEN6_RP_MEDIA_TURBO |
  2955. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  2956. GEN6_RP_MEDIA_IS_GFX |
  2957. GEN6_RP_ENABLE |
  2958. GEN6_RP_UP_BUSY_AVG |
  2959. GEN6_RP_DOWN_IDLE_AVG);
  2960. /* 6: Ring frequency + overclocking (our driver does this later */
  2961. gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
  2962. gen8_enable_rps_interrupts(dev);
  2963. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  2964. }
  2965. static void gen6_enable_rps(struct drm_device *dev)
  2966. {
  2967. struct drm_i915_private *dev_priv = dev->dev_private;
  2968. struct intel_engine_cs *ring;
  2969. u32 rp_state_cap;
  2970. u32 gt_perf_status;
  2971. u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
  2972. u32 gtfifodbg;
  2973. int rc6_mode;
  2974. int i, ret;
  2975. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  2976. /* Here begins a magic sequence of register writes to enable
  2977. * auto-downclocking.
  2978. *
  2979. * Perhaps there might be some value in exposing these to
  2980. * userspace...
  2981. */
  2982. I915_WRITE(GEN6_RC_STATE, 0);
  2983. /* Clear the DBG now so we don't confuse earlier errors */
  2984. if ((gtfifodbg = I915_READ(GTFIFODBG))) {
  2985. DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
  2986. I915_WRITE(GTFIFODBG, gtfifodbg);
  2987. }
  2988. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  2989. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  2990. gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  2991. parse_rp_state_cap(dev_priv, rp_state_cap);
  2992. /* disable the counters and set deterministic thresholds */
  2993. I915_WRITE(GEN6_RC_CONTROL, 0);
  2994. I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
  2995. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
  2996. I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
  2997. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
  2998. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
  2999. for_each_ring(ring, dev_priv, i)
  3000. I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  3001. I915_WRITE(GEN6_RC_SLEEP, 0);
  3002. I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
  3003. if (IS_IVYBRIDGE(dev))
  3004. I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
  3005. else
  3006. I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
  3007. I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
  3008. I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
  3009. /* Check if we are enabling RC6 */
  3010. rc6_mode = intel_enable_rc6(dev_priv->dev);
  3011. if (rc6_mode & INTEL_RC6_ENABLE)
  3012. rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
  3013. /* We don't use those on Haswell */
  3014. if (!IS_HASWELL(dev)) {
  3015. if (rc6_mode & INTEL_RC6p_ENABLE)
  3016. rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
  3017. if (rc6_mode & INTEL_RC6pp_ENABLE)
  3018. rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
  3019. }
  3020. intel_print_rc6_info(dev, rc6_mask);
  3021. I915_WRITE(GEN6_RC_CONTROL,
  3022. rc6_mask |
  3023. GEN6_RC_CTL_EI_MODE(1) |
  3024. GEN6_RC_CTL_HW_ENABLE);
  3025. /* Power down if completely idle for over 50ms */
  3026. I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
  3027. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  3028. ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
  3029. if (ret)
  3030. DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
  3031. ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
  3032. if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
  3033. DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
  3034. (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
  3035. (pcu_mbox & 0xff) * 50);
  3036. dev_priv->rps.max_freq = pcu_mbox & 0xff;
  3037. }
  3038. dev_priv->rps.power = HIGH_POWER; /* force a reset */
  3039. gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
  3040. gen6_enable_rps_interrupts(dev);
  3041. rc6vids = 0;
  3042. ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
  3043. if (IS_GEN6(dev) && ret) {
  3044. DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
  3045. } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
  3046. DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
  3047. GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
  3048. rc6vids &= 0xffff00;
  3049. rc6vids |= GEN6_ENCODE_RC6_VID(450);
  3050. ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
  3051. if (ret)
  3052. DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
  3053. }
  3054. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3055. }
  3056. static void __gen6_update_ring_freq(struct drm_device *dev)
  3057. {
  3058. struct drm_i915_private *dev_priv = dev->dev_private;
  3059. int min_freq = 15;
  3060. unsigned int gpu_freq;
  3061. unsigned int max_ia_freq, min_ring_freq;
  3062. int scaling_factor = 180;
  3063. struct cpufreq_policy *policy;
  3064. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  3065. policy = cpufreq_cpu_get(0);
  3066. if (policy) {
  3067. max_ia_freq = policy->cpuinfo.max_freq;
  3068. cpufreq_cpu_put(policy);
  3069. } else {
  3070. /*
  3071. * Default to measured freq if none found, PCU will ensure we
  3072. * don't go over
  3073. */
  3074. max_ia_freq = tsc_khz;
  3075. }
  3076. /* Convert from kHz to MHz */
  3077. max_ia_freq /= 1000;
  3078. min_ring_freq = I915_READ(DCLK) & 0xf;
  3079. /* convert DDR frequency from units of 266.6MHz to bandwidth */
  3080. min_ring_freq = mult_frac(min_ring_freq, 8, 3);
  3081. /*
  3082. * For each potential GPU frequency, load a ring frequency we'd like
  3083. * to use for memory access. We do this by specifying the IA frequency
  3084. * the PCU should use as a reference to determine the ring frequency.
  3085. */
  3086. for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
  3087. gpu_freq--) {
  3088. int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
  3089. unsigned int ia_freq = 0, ring_freq = 0;
  3090. if (INTEL_INFO(dev)->gen >= 8) {
  3091. /* max(2 * GT, DDR). NB: GT is 50MHz units */
  3092. ring_freq = max(min_ring_freq, gpu_freq);
  3093. } else if (IS_HASWELL(dev)) {
  3094. ring_freq = mult_frac(gpu_freq, 5, 4);
  3095. ring_freq = max(min_ring_freq, ring_freq);
  3096. /* leave ia_freq as the default, chosen by cpufreq */
  3097. } else {
  3098. /* On older processors, there is no separate ring
  3099. * clock domain, so in order to boost the bandwidth
  3100. * of the ring, we need to upclock the CPU (ia_freq).
  3101. *
  3102. * For GPU frequencies less than 750MHz,
  3103. * just use the lowest ring freq.
  3104. */
  3105. if (gpu_freq < min_freq)
  3106. ia_freq = 800;
  3107. else
  3108. ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
  3109. ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
  3110. }
  3111. sandybridge_pcode_write(dev_priv,
  3112. GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
  3113. ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
  3114. ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
  3115. gpu_freq);
  3116. }
  3117. }
  3118. void gen6_update_ring_freq(struct drm_device *dev)
  3119. {
  3120. struct drm_i915_private *dev_priv = dev->dev_private;
  3121. if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
  3122. return;
  3123. mutex_lock(&dev_priv->rps.hw_lock);
  3124. __gen6_update_ring_freq(dev);
  3125. mutex_unlock(&dev_priv->rps.hw_lock);
  3126. }
  3127. int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
  3128. {
  3129. u32 val, rp0;
  3130. val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
  3131. rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
  3132. /* Clamp to max */
  3133. rp0 = min_t(u32, rp0, 0xea);
  3134. return rp0;
  3135. }
  3136. static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
  3137. {
  3138. u32 val, rpe;
  3139. val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
  3140. rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
  3141. val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
  3142. rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
  3143. return rpe;
  3144. }
  3145. int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
  3146. {
  3147. return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
  3148. }
  3149. /* Check that the pctx buffer wasn't move under us. */
  3150. static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
  3151. {
  3152. unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
  3153. WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
  3154. dev_priv->vlv_pctx->stolen->start);
  3155. }
  3156. static void valleyview_setup_pctx(struct drm_device *dev)
  3157. {
  3158. struct drm_i915_private *dev_priv = dev->dev_private;
  3159. struct drm_i915_gem_object *pctx;
  3160. unsigned long pctx_paddr;
  3161. u32 pcbr;
  3162. int pctx_size = 24*1024;
  3163. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  3164. pcbr = I915_READ(VLV_PCBR);
  3165. if (pcbr) {
  3166. /* BIOS set it up already, grab the pre-alloc'd space */
  3167. int pcbr_offset;
  3168. pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
  3169. pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
  3170. pcbr_offset,
  3171. I915_GTT_OFFSET_NONE,
  3172. pctx_size);
  3173. goto out;
  3174. }
  3175. /*
  3176. * From the Gunit register HAS:
  3177. * The Gfx driver is expected to program this register and ensure
  3178. * proper allocation within Gfx stolen memory. For example, this
  3179. * register should be programmed such than the PCBR range does not
  3180. * overlap with other ranges, such as the frame buffer, protected
  3181. * memory, or any other relevant ranges.
  3182. */
  3183. pctx = i915_gem_object_create_stolen(dev, pctx_size);
  3184. if (!pctx) {
  3185. DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
  3186. return;
  3187. }
  3188. pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
  3189. I915_WRITE(VLV_PCBR, pctx_paddr);
  3190. out:
  3191. dev_priv->vlv_pctx = pctx;
  3192. }
  3193. static void valleyview_cleanup_pctx(struct drm_device *dev)
  3194. {
  3195. struct drm_i915_private *dev_priv = dev->dev_private;
  3196. if (WARN_ON(!dev_priv->vlv_pctx))
  3197. return;
  3198. drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
  3199. dev_priv->vlv_pctx = NULL;
  3200. }
  3201. static void valleyview_init_gt_powersave(struct drm_device *dev)
  3202. {
  3203. struct drm_i915_private *dev_priv = dev->dev_private;
  3204. valleyview_setup_pctx(dev);
  3205. mutex_lock(&dev_priv->rps.hw_lock);
  3206. dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
  3207. dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
  3208. DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
  3209. vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
  3210. dev_priv->rps.max_freq);
  3211. dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
  3212. DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
  3213. vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
  3214. dev_priv->rps.efficient_freq);
  3215. dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
  3216. DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
  3217. vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
  3218. dev_priv->rps.min_freq);
  3219. /* Preserve min/max settings in case of re-init */
  3220. if (dev_priv->rps.max_freq_softlimit == 0)
  3221. dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
  3222. if (dev_priv->rps.min_freq_softlimit == 0)
  3223. dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
  3224. mutex_unlock(&dev_priv->rps.hw_lock);
  3225. }
  3226. static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
  3227. {
  3228. valleyview_cleanup_pctx(dev);
  3229. }
  3230. static void valleyview_enable_rps(struct drm_device *dev)
  3231. {
  3232. struct drm_i915_private *dev_priv = dev->dev_private;
  3233. struct intel_engine_cs *ring;
  3234. u32 gtfifodbg, val, rc6_mode = 0;
  3235. int i;
  3236. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  3237. valleyview_check_pctx(dev_priv);
  3238. if ((gtfifodbg = I915_READ(GTFIFODBG))) {
  3239. DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
  3240. gtfifodbg);
  3241. I915_WRITE(GTFIFODBG, gtfifodbg);
  3242. }
  3243. /* If VLV, Forcewake all wells, else re-direct to regular path */
  3244. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  3245. I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
  3246. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
  3247. I915_WRITE(GEN6_RP_UP_EI, 66000);
  3248. I915_WRITE(GEN6_RP_DOWN_EI, 350000);
  3249. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  3250. I915_WRITE(GEN6_RP_CONTROL,
  3251. GEN6_RP_MEDIA_TURBO |
  3252. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  3253. GEN6_RP_MEDIA_IS_GFX |
  3254. GEN6_RP_ENABLE |
  3255. GEN6_RP_UP_BUSY_AVG |
  3256. GEN6_RP_DOWN_IDLE_CONT);
  3257. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
  3258. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
  3259. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
  3260. for_each_ring(ring, dev_priv, i)
  3261. I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  3262. I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
  3263. /* allows RC6 residency counter to work */
  3264. I915_WRITE(VLV_COUNTER_CONTROL,
  3265. _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
  3266. VLV_MEDIA_RC6_COUNT_EN |
  3267. VLV_RENDER_RC6_COUNT_EN));
  3268. if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
  3269. rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
  3270. intel_print_rc6_info(dev, rc6_mode);
  3271. I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
  3272. val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  3273. DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
  3274. DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
  3275. dev_priv->rps.cur_freq = (val >> 8) & 0xff;
  3276. DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
  3277. vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
  3278. dev_priv->rps.cur_freq);
  3279. DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
  3280. vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
  3281. dev_priv->rps.efficient_freq);
  3282. valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
  3283. gen6_enable_rps_interrupts(dev);
  3284. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3285. }
  3286. void ironlake_teardown_rc6(struct drm_device *dev)
  3287. {
  3288. struct drm_i915_private *dev_priv = dev->dev_private;
  3289. if (dev_priv->ips.renderctx) {
  3290. i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
  3291. drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
  3292. dev_priv->ips.renderctx = NULL;
  3293. }
  3294. if (dev_priv->ips.pwrctx) {
  3295. i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
  3296. drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
  3297. dev_priv->ips.pwrctx = NULL;
  3298. }
  3299. }
  3300. static void ironlake_disable_rc6(struct drm_device *dev)
  3301. {
  3302. struct drm_i915_private *dev_priv = dev->dev_private;
  3303. if (I915_READ(PWRCTXA)) {
  3304. /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
  3305. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
  3306. wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
  3307. 50);
  3308. I915_WRITE(PWRCTXA, 0);
  3309. POSTING_READ(PWRCTXA);
  3310. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
  3311. POSTING_READ(RSTDBYCTL);
  3312. }
  3313. }
  3314. static int ironlake_setup_rc6(struct drm_device *dev)
  3315. {
  3316. struct drm_i915_private *dev_priv = dev->dev_private;
  3317. if (dev_priv->ips.renderctx == NULL)
  3318. dev_priv->ips.renderctx = intel_alloc_context_page(dev);
  3319. if (!dev_priv->ips.renderctx)
  3320. return -ENOMEM;
  3321. if (dev_priv->ips.pwrctx == NULL)
  3322. dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
  3323. if (!dev_priv->ips.pwrctx) {
  3324. ironlake_teardown_rc6(dev);
  3325. return -ENOMEM;
  3326. }
  3327. return 0;
  3328. }
  3329. static void ironlake_enable_rc6(struct drm_device *dev)
  3330. {
  3331. struct drm_i915_private *dev_priv = dev->dev_private;
  3332. struct intel_engine_cs *ring = &dev_priv->ring[RCS];
  3333. bool was_interruptible;
  3334. int ret;
  3335. /* rc6 disabled by default due to repeated reports of hanging during
  3336. * boot and resume.
  3337. */
  3338. if (!intel_enable_rc6(dev))
  3339. return;
  3340. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  3341. ret = ironlake_setup_rc6(dev);
  3342. if (ret)
  3343. return;
  3344. was_interruptible = dev_priv->mm.interruptible;
  3345. dev_priv->mm.interruptible = false;
  3346. /*
  3347. * GPU can automatically power down the render unit if given a page
  3348. * to save state.
  3349. */
  3350. ret = intel_ring_begin(ring, 6);
  3351. if (ret) {
  3352. ironlake_teardown_rc6(dev);
  3353. dev_priv->mm.interruptible = was_interruptible;
  3354. return;
  3355. }
  3356. intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
  3357. intel_ring_emit(ring, MI_SET_CONTEXT);
  3358. intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
  3359. MI_MM_SPACE_GTT |
  3360. MI_SAVE_EXT_STATE_EN |
  3361. MI_RESTORE_EXT_STATE_EN |
  3362. MI_RESTORE_INHIBIT);
  3363. intel_ring_emit(ring, MI_SUSPEND_FLUSH);
  3364. intel_ring_emit(ring, MI_NOOP);
  3365. intel_ring_emit(ring, MI_FLUSH);
  3366. intel_ring_advance(ring);
  3367. /*
  3368. * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
  3369. * does an implicit flush, combined with MI_FLUSH above, it should be
  3370. * safe to assume that renderctx is valid
  3371. */
  3372. ret = intel_ring_idle(ring);
  3373. dev_priv->mm.interruptible = was_interruptible;
  3374. if (ret) {
  3375. DRM_ERROR("failed to enable ironlake power savings\n");
  3376. ironlake_teardown_rc6(dev);
  3377. return;
  3378. }
  3379. I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
  3380. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
  3381. intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
  3382. }
  3383. static unsigned long intel_pxfreq(u32 vidfreq)
  3384. {
  3385. unsigned long freq;
  3386. int div = (vidfreq & 0x3f0000) >> 16;
  3387. int post = (vidfreq & 0x3000) >> 12;
  3388. int pre = (vidfreq & 0x7);
  3389. if (!pre)
  3390. return 0;
  3391. freq = ((div * 133333) / ((1<<post) * pre));
  3392. return freq;
  3393. }
  3394. static const struct cparams {
  3395. u16 i;
  3396. u16 t;
  3397. u16 m;
  3398. u16 c;
  3399. } cparams[] = {
  3400. { 1, 1333, 301, 28664 },
  3401. { 1, 1066, 294, 24460 },
  3402. { 1, 800, 294, 25192 },
  3403. { 0, 1333, 276, 27605 },
  3404. { 0, 1066, 276, 27605 },
  3405. { 0, 800, 231, 23784 },
  3406. };
  3407. static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
  3408. {
  3409. u64 total_count, diff, ret;
  3410. u32 count1, count2, count3, m = 0, c = 0;
  3411. unsigned long now = jiffies_to_msecs(jiffies), diff1;
  3412. int i;
  3413. assert_spin_locked(&mchdev_lock);
  3414. diff1 = now - dev_priv->ips.last_time1;
  3415. /* Prevent division-by-zero if we are asking too fast.
  3416. * Also, we don't get interesting results if we are polling
  3417. * faster than once in 10ms, so just return the saved value
  3418. * in such cases.
  3419. */
  3420. if (diff1 <= 10)
  3421. return dev_priv->ips.chipset_power;
  3422. count1 = I915_READ(DMIEC);
  3423. count2 = I915_READ(DDREC);
  3424. count3 = I915_READ(CSIEC);
  3425. total_count = count1 + count2 + count3;
  3426. /* FIXME: handle per-counter overflow */
  3427. if (total_count < dev_priv->ips.last_count1) {
  3428. diff = ~0UL - dev_priv->ips.last_count1;
  3429. diff += total_count;
  3430. } else {
  3431. diff = total_count - dev_priv->ips.last_count1;
  3432. }
  3433. for (i = 0; i < ARRAY_SIZE(cparams); i++) {
  3434. if (cparams[i].i == dev_priv->ips.c_m &&
  3435. cparams[i].t == dev_priv->ips.r_t) {
  3436. m = cparams[i].m;
  3437. c = cparams[i].c;
  3438. break;
  3439. }
  3440. }
  3441. diff = div_u64(diff, diff1);
  3442. ret = ((m * diff) + c);
  3443. ret = div_u64(ret, 10);
  3444. dev_priv->ips.last_count1 = total_count;
  3445. dev_priv->ips.last_time1 = now;
  3446. dev_priv->ips.chipset_power = ret;
  3447. return ret;
  3448. }
  3449. unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
  3450. {
  3451. struct drm_device *dev = dev_priv->dev;
  3452. unsigned long val;
  3453. if (INTEL_INFO(dev)->gen != 5)
  3454. return 0;
  3455. spin_lock_irq(&mchdev_lock);
  3456. val = __i915_chipset_val(dev_priv);
  3457. spin_unlock_irq(&mchdev_lock);
  3458. return val;
  3459. }
  3460. unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
  3461. {
  3462. unsigned long m, x, b;
  3463. u32 tsfs;
  3464. tsfs = I915_READ(TSFS);
  3465. m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
  3466. x = I915_READ8(TR1);
  3467. b = tsfs & TSFS_INTR_MASK;
  3468. return ((m * x) / 127) - b;
  3469. }
  3470. static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
  3471. {
  3472. struct drm_device *dev = dev_priv->dev;
  3473. static const struct v_table {
  3474. u16 vd; /* in .1 mil */
  3475. u16 vm; /* in .1 mil */
  3476. } v_table[] = {
  3477. { 0, 0, },
  3478. { 375, 0, },
  3479. { 500, 0, },
  3480. { 625, 0, },
  3481. { 750, 0, },
  3482. { 875, 0, },
  3483. { 1000, 0, },
  3484. { 1125, 0, },
  3485. { 4125, 3000, },
  3486. { 4125, 3000, },
  3487. { 4125, 3000, },
  3488. { 4125, 3000, },
  3489. { 4125, 3000, },
  3490. { 4125, 3000, },
  3491. { 4125, 3000, },
  3492. { 4125, 3000, },
  3493. { 4125, 3000, },
  3494. { 4125, 3000, },
  3495. { 4125, 3000, },
  3496. { 4125, 3000, },
  3497. { 4125, 3000, },
  3498. { 4125, 3000, },
  3499. { 4125, 3000, },
  3500. { 4125, 3000, },
  3501. { 4125, 3000, },
  3502. { 4125, 3000, },
  3503. { 4125, 3000, },
  3504. { 4125, 3000, },
  3505. { 4125, 3000, },
  3506. { 4125, 3000, },
  3507. { 4125, 3000, },
  3508. { 4125, 3000, },
  3509. { 4250, 3125, },
  3510. { 4375, 3250, },
  3511. { 4500, 3375, },
  3512. { 4625, 3500, },
  3513. { 4750, 3625, },
  3514. { 4875, 3750, },
  3515. { 5000, 3875, },
  3516. { 5125, 4000, },
  3517. { 5250, 4125, },
  3518. { 5375, 4250, },
  3519. { 5500, 4375, },
  3520. { 5625, 4500, },
  3521. { 5750, 4625, },
  3522. { 5875, 4750, },
  3523. { 6000, 4875, },
  3524. { 6125, 5000, },
  3525. { 6250, 5125, },
  3526. { 6375, 5250, },
  3527. { 6500, 5375, },
  3528. { 6625, 5500, },
  3529. { 6750, 5625, },
  3530. { 6875, 5750, },
  3531. { 7000, 5875, },
  3532. { 7125, 6000, },
  3533. { 7250, 6125, },
  3534. { 7375, 6250, },
  3535. { 7500, 6375, },
  3536. { 7625, 6500, },
  3537. { 7750, 6625, },
  3538. { 7875, 6750, },
  3539. { 8000, 6875, },
  3540. { 8125, 7000, },
  3541. { 8250, 7125, },
  3542. { 8375, 7250, },
  3543. { 8500, 7375, },
  3544. { 8625, 7500, },
  3545. { 8750, 7625, },
  3546. { 8875, 7750, },
  3547. { 9000, 7875, },
  3548. { 9125, 8000, },
  3549. { 9250, 8125, },
  3550. { 9375, 8250, },
  3551. { 9500, 8375, },
  3552. { 9625, 8500, },
  3553. { 9750, 8625, },
  3554. { 9875, 8750, },
  3555. { 10000, 8875, },
  3556. { 10125, 9000, },
  3557. { 10250, 9125, },
  3558. { 10375, 9250, },
  3559. { 10500, 9375, },
  3560. { 10625, 9500, },
  3561. { 10750, 9625, },
  3562. { 10875, 9750, },
  3563. { 11000, 9875, },
  3564. { 11125, 10000, },
  3565. { 11250, 10125, },
  3566. { 11375, 10250, },
  3567. { 11500, 10375, },
  3568. { 11625, 10500, },
  3569. { 11750, 10625, },
  3570. { 11875, 10750, },
  3571. { 12000, 10875, },
  3572. { 12125, 11000, },
  3573. { 12250, 11125, },
  3574. { 12375, 11250, },
  3575. { 12500, 11375, },
  3576. { 12625, 11500, },
  3577. { 12750, 11625, },
  3578. { 12875, 11750, },
  3579. { 13000, 11875, },
  3580. { 13125, 12000, },
  3581. { 13250, 12125, },
  3582. { 13375, 12250, },
  3583. { 13500, 12375, },
  3584. { 13625, 12500, },
  3585. { 13750, 12625, },
  3586. { 13875, 12750, },
  3587. { 14000, 12875, },
  3588. { 14125, 13000, },
  3589. { 14250, 13125, },
  3590. { 14375, 13250, },
  3591. { 14500, 13375, },
  3592. { 14625, 13500, },
  3593. { 14750, 13625, },
  3594. { 14875, 13750, },
  3595. { 15000, 13875, },
  3596. { 15125, 14000, },
  3597. { 15250, 14125, },
  3598. { 15375, 14250, },
  3599. { 15500, 14375, },
  3600. { 15625, 14500, },
  3601. { 15750, 14625, },
  3602. { 15875, 14750, },
  3603. { 16000, 14875, },
  3604. { 16125, 15000, },
  3605. };
  3606. if (INTEL_INFO(dev)->is_mobile)
  3607. return v_table[pxvid].vm;
  3608. else
  3609. return v_table[pxvid].vd;
  3610. }
  3611. static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
  3612. {
  3613. struct timespec now, diff1;
  3614. u64 diff;
  3615. unsigned long diffms;
  3616. u32 count;
  3617. assert_spin_locked(&mchdev_lock);
  3618. getrawmonotonic(&now);
  3619. diff1 = timespec_sub(now, dev_priv->ips.last_time2);
  3620. /* Don't divide by 0 */
  3621. diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
  3622. if (!diffms)
  3623. return;
  3624. count = I915_READ(GFXEC);
  3625. if (count < dev_priv->ips.last_count2) {
  3626. diff = ~0UL - dev_priv->ips.last_count2;
  3627. diff += count;
  3628. } else {
  3629. diff = count - dev_priv->ips.last_count2;
  3630. }
  3631. dev_priv->ips.last_count2 = count;
  3632. dev_priv->ips.last_time2 = now;
  3633. /* More magic constants... */
  3634. diff = diff * 1181;
  3635. diff = div_u64(diff, diffms * 10);
  3636. dev_priv->ips.gfx_power = diff;
  3637. }
  3638. void i915_update_gfx_val(struct drm_i915_private *dev_priv)
  3639. {
  3640. struct drm_device *dev = dev_priv->dev;
  3641. if (INTEL_INFO(dev)->gen != 5)
  3642. return;
  3643. spin_lock_irq(&mchdev_lock);
  3644. __i915_update_gfx_val(dev_priv);
  3645. spin_unlock_irq(&mchdev_lock);
  3646. }
  3647. static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
  3648. {
  3649. unsigned long t, corr, state1, corr2, state2;
  3650. u32 pxvid, ext_v;
  3651. assert_spin_locked(&mchdev_lock);
  3652. pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
  3653. pxvid = (pxvid >> 24) & 0x7f;
  3654. ext_v = pvid_to_extvid(dev_priv, pxvid);
  3655. state1 = ext_v;
  3656. t = i915_mch_val(dev_priv);
  3657. /* Revel in the empirically derived constants */
  3658. /* Correction factor in 1/100000 units */
  3659. if (t > 80)
  3660. corr = ((t * 2349) + 135940);
  3661. else if (t >= 50)
  3662. corr = ((t * 964) + 29317);
  3663. else /* < 50 */
  3664. corr = ((t * 301) + 1004);
  3665. corr = corr * ((150142 * state1) / 10000 - 78642);
  3666. corr /= 100000;
  3667. corr2 = (corr * dev_priv->ips.corr);
  3668. state2 = (corr2 * state1) / 10000;
  3669. state2 /= 100; /* convert to mW */
  3670. __i915_update_gfx_val(dev_priv);
  3671. return dev_priv->ips.gfx_power + state2;
  3672. }
  3673. unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
  3674. {
  3675. struct drm_device *dev = dev_priv->dev;
  3676. unsigned long val;
  3677. if (INTEL_INFO(dev)->gen != 5)
  3678. return 0;
  3679. spin_lock_irq(&mchdev_lock);
  3680. val = __i915_gfx_val(dev_priv);
  3681. spin_unlock_irq(&mchdev_lock);
  3682. return val;
  3683. }
  3684. /**
  3685. * i915_read_mch_val - return value for IPS use
  3686. *
  3687. * Calculate and return a value for the IPS driver to use when deciding whether
  3688. * we have thermal and power headroom to increase CPU or GPU power budget.
  3689. */
  3690. unsigned long i915_read_mch_val(void)
  3691. {
  3692. struct drm_i915_private *dev_priv;
  3693. unsigned long chipset_val, graphics_val, ret = 0;
  3694. spin_lock_irq(&mchdev_lock);
  3695. if (!i915_mch_dev)
  3696. goto out_unlock;
  3697. dev_priv = i915_mch_dev;
  3698. chipset_val = __i915_chipset_val(dev_priv);
  3699. graphics_val = __i915_gfx_val(dev_priv);
  3700. ret = chipset_val + graphics_val;
  3701. out_unlock:
  3702. spin_unlock_irq(&mchdev_lock);
  3703. return ret;
  3704. }
  3705. EXPORT_SYMBOL_GPL(i915_read_mch_val);
  3706. /**
  3707. * i915_gpu_raise - raise GPU frequency limit
  3708. *
  3709. * Raise the limit; IPS indicates we have thermal headroom.
  3710. */
  3711. bool i915_gpu_raise(void)
  3712. {
  3713. struct drm_i915_private *dev_priv;
  3714. bool ret = true;
  3715. spin_lock_irq(&mchdev_lock);
  3716. if (!i915_mch_dev) {
  3717. ret = false;
  3718. goto out_unlock;
  3719. }
  3720. dev_priv = i915_mch_dev;
  3721. if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
  3722. dev_priv->ips.max_delay--;
  3723. out_unlock:
  3724. spin_unlock_irq(&mchdev_lock);
  3725. return ret;
  3726. }
  3727. EXPORT_SYMBOL_GPL(i915_gpu_raise);
  3728. /**
  3729. * i915_gpu_lower - lower GPU frequency limit
  3730. *
  3731. * IPS indicates we're close to a thermal limit, so throttle back the GPU
  3732. * frequency maximum.
  3733. */
  3734. bool i915_gpu_lower(void)
  3735. {
  3736. struct drm_i915_private *dev_priv;
  3737. bool ret = true;
  3738. spin_lock_irq(&mchdev_lock);
  3739. if (!i915_mch_dev) {
  3740. ret = false;
  3741. goto out_unlock;
  3742. }
  3743. dev_priv = i915_mch_dev;
  3744. if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
  3745. dev_priv->ips.max_delay++;
  3746. out_unlock:
  3747. spin_unlock_irq(&mchdev_lock);
  3748. return ret;
  3749. }
  3750. EXPORT_SYMBOL_GPL(i915_gpu_lower);
  3751. /**
  3752. * i915_gpu_busy - indicate GPU business to IPS
  3753. *
  3754. * Tell the IPS driver whether or not the GPU is busy.
  3755. */
  3756. bool i915_gpu_busy(void)
  3757. {
  3758. struct drm_i915_private *dev_priv;
  3759. struct intel_engine_cs *ring;
  3760. bool ret = false;
  3761. int i;
  3762. spin_lock_irq(&mchdev_lock);
  3763. if (!i915_mch_dev)
  3764. goto out_unlock;
  3765. dev_priv = i915_mch_dev;
  3766. for_each_ring(ring, dev_priv, i)
  3767. ret |= !list_empty(&ring->request_list);
  3768. out_unlock:
  3769. spin_unlock_irq(&mchdev_lock);
  3770. return ret;
  3771. }
  3772. EXPORT_SYMBOL_GPL(i915_gpu_busy);
  3773. /**
  3774. * i915_gpu_turbo_disable - disable graphics turbo
  3775. *
  3776. * Disable graphics turbo by resetting the max frequency and setting the
  3777. * current frequency to the default.
  3778. */
  3779. bool i915_gpu_turbo_disable(void)
  3780. {
  3781. struct drm_i915_private *dev_priv;
  3782. bool ret = true;
  3783. spin_lock_irq(&mchdev_lock);
  3784. if (!i915_mch_dev) {
  3785. ret = false;
  3786. goto out_unlock;
  3787. }
  3788. dev_priv = i915_mch_dev;
  3789. dev_priv->ips.max_delay = dev_priv->ips.fstart;
  3790. if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
  3791. ret = false;
  3792. out_unlock:
  3793. spin_unlock_irq(&mchdev_lock);
  3794. return ret;
  3795. }
  3796. EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
  3797. /**
  3798. * Tells the intel_ips driver that the i915 driver is now loaded, if
  3799. * IPS got loaded first.
  3800. *
  3801. * This awkward dance is so that neither module has to depend on the
  3802. * other in order for IPS to do the appropriate communication of
  3803. * GPU turbo limits to i915.
  3804. */
  3805. static void
  3806. ips_ping_for_i915_load(void)
  3807. {
  3808. void (*link)(void);
  3809. link = symbol_get(ips_link_to_i915_driver);
  3810. if (link) {
  3811. link();
  3812. symbol_put(ips_link_to_i915_driver);
  3813. }
  3814. }
  3815. void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
  3816. {
  3817. /* We only register the i915 ips part with intel-ips once everything is
  3818. * set up, to avoid intel-ips sneaking in and reading bogus values. */
  3819. spin_lock_irq(&mchdev_lock);
  3820. i915_mch_dev = dev_priv;
  3821. spin_unlock_irq(&mchdev_lock);
  3822. ips_ping_for_i915_load();
  3823. }
  3824. void intel_gpu_ips_teardown(void)
  3825. {
  3826. spin_lock_irq(&mchdev_lock);
  3827. i915_mch_dev = NULL;
  3828. spin_unlock_irq(&mchdev_lock);
  3829. }
  3830. static void intel_init_emon(struct drm_device *dev)
  3831. {
  3832. struct drm_i915_private *dev_priv = dev->dev_private;
  3833. u32 lcfuse;
  3834. u8 pxw[16];
  3835. int i;
  3836. /* Disable to program */
  3837. I915_WRITE(ECR, 0);
  3838. POSTING_READ(ECR);
  3839. /* Program energy weights for various events */
  3840. I915_WRITE(SDEW, 0x15040d00);
  3841. I915_WRITE(CSIEW0, 0x007f0000);
  3842. I915_WRITE(CSIEW1, 0x1e220004);
  3843. I915_WRITE(CSIEW2, 0x04000004);
  3844. for (i = 0; i < 5; i++)
  3845. I915_WRITE(PEW + (i * 4), 0);
  3846. for (i = 0; i < 3; i++)
  3847. I915_WRITE(DEW + (i * 4), 0);
  3848. /* Program P-state weights to account for frequency power adjustment */
  3849. for (i = 0; i < 16; i++) {
  3850. u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
  3851. unsigned long freq = intel_pxfreq(pxvidfreq);
  3852. unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
  3853. PXVFREQ_PX_SHIFT;
  3854. unsigned long val;
  3855. val = vid * vid;
  3856. val *= (freq / 1000);
  3857. val *= 255;
  3858. val /= (127*127*900);
  3859. if (val > 0xff)
  3860. DRM_ERROR("bad pxval: %ld\n", val);
  3861. pxw[i] = val;
  3862. }
  3863. /* Render standby states get 0 weight */
  3864. pxw[14] = 0;
  3865. pxw[15] = 0;
  3866. for (i = 0; i < 4; i++) {
  3867. u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
  3868. (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
  3869. I915_WRITE(PXW + (i * 4), val);
  3870. }
  3871. /* Adjust magic regs to magic values (more experimental results) */
  3872. I915_WRITE(OGW0, 0);
  3873. I915_WRITE(OGW1, 0);
  3874. I915_WRITE(EG0, 0x00007f00);
  3875. I915_WRITE(EG1, 0x0000000e);
  3876. I915_WRITE(EG2, 0x000e0000);
  3877. I915_WRITE(EG3, 0x68000300);
  3878. I915_WRITE(EG4, 0x42000000);
  3879. I915_WRITE(EG5, 0x00140031);
  3880. I915_WRITE(EG6, 0);
  3881. I915_WRITE(EG7, 0);
  3882. for (i = 0; i < 8; i++)
  3883. I915_WRITE(PXWL + (i * 4), 0);
  3884. /* Enable PMON + select events */
  3885. I915_WRITE(ECR, 0x80000019);
  3886. lcfuse = I915_READ(LCFUSE02);
  3887. dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
  3888. }
  3889. void intel_init_gt_powersave(struct drm_device *dev)
  3890. {
  3891. i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
  3892. if (IS_VALLEYVIEW(dev))
  3893. valleyview_init_gt_powersave(dev);
  3894. }
  3895. void intel_cleanup_gt_powersave(struct drm_device *dev)
  3896. {
  3897. if (IS_VALLEYVIEW(dev))
  3898. valleyview_cleanup_gt_powersave(dev);
  3899. }
  3900. void intel_disable_gt_powersave(struct drm_device *dev)
  3901. {
  3902. struct drm_i915_private *dev_priv = dev->dev_private;
  3903. /* Interrupts should be disabled already to avoid re-arming. */
  3904. WARN_ON(dev->irq_enabled);
  3905. if (IS_IRONLAKE_M(dev)) {
  3906. ironlake_disable_drps(dev);
  3907. ironlake_disable_rc6(dev);
  3908. } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
  3909. if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
  3910. intel_runtime_pm_put(dev_priv);
  3911. cancel_work_sync(&dev_priv->rps.work);
  3912. mutex_lock(&dev_priv->rps.hw_lock);
  3913. if (IS_VALLEYVIEW(dev))
  3914. valleyview_disable_rps(dev);
  3915. else
  3916. gen6_disable_rps(dev);
  3917. dev_priv->rps.enabled = false;
  3918. mutex_unlock(&dev_priv->rps.hw_lock);
  3919. }
  3920. }
  3921. static void intel_gen6_powersave_work(struct work_struct *work)
  3922. {
  3923. struct drm_i915_private *dev_priv =
  3924. container_of(work, struct drm_i915_private,
  3925. rps.delayed_resume_work.work);
  3926. struct drm_device *dev = dev_priv->dev;
  3927. mutex_lock(&dev_priv->rps.hw_lock);
  3928. if (IS_VALLEYVIEW(dev)) {
  3929. valleyview_enable_rps(dev);
  3930. } else if (IS_BROADWELL(dev)) {
  3931. gen8_enable_rps(dev);
  3932. __gen6_update_ring_freq(dev);
  3933. } else {
  3934. gen6_enable_rps(dev);
  3935. __gen6_update_ring_freq(dev);
  3936. }
  3937. dev_priv->rps.enabled = true;
  3938. mutex_unlock(&dev_priv->rps.hw_lock);
  3939. intel_runtime_pm_put(dev_priv);
  3940. }
  3941. void intel_enable_gt_powersave(struct drm_device *dev)
  3942. {
  3943. struct drm_i915_private *dev_priv = dev->dev_private;
  3944. if (IS_IRONLAKE_M(dev)) {
  3945. mutex_lock(&dev->struct_mutex);
  3946. ironlake_enable_drps(dev);
  3947. ironlake_enable_rc6(dev);
  3948. intel_init_emon(dev);
  3949. mutex_unlock(&dev->struct_mutex);
  3950. } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
  3951. /*
  3952. * PCU communication is slow and this doesn't need to be
  3953. * done at any specific time, so do this out of our fast path
  3954. * to make resume and init faster.
  3955. *
  3956. * We depend on the HW RC6 power context save/restore
  3957. * mechanism when entering D3 through runtime PM suspend. So
  3958. * disable RPM until RPS/RC6 is properly setup. We can only
  3959. * get here via the driver load/system resume/runtime resume
  3960. * paths, so the _noresume version is enough (and in case of
  3961. * runtime resume it's necessary).
  3962. */
  3963. if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
  3964. round_jiffies_up_relative(HZ)))
  3965. intel_runtime_pm_get_noresume(dev_priv);
  3966. }
  3967. }
  3968. void intel_reset_gt_powersave(struct drm_device *dev)
  3969. {
  3970. struct drm_i915_private *dev_priv = dev->dev_private;
  3971. dev_priv->rps.enabled = false;
  3972. intel_enable_gt_powersave(dev);
  3973. }
  3974. static void ibx_init_clock_gating(struct drm_device *dev)
  3975. {
  3976. struct drm_i915_private *dev_priv = dev->dev_private;
  3977. /*
  3978. * On Ibex Peak and Cougar Point, we need to disable clock
  3979. * gating for the panel power sequencer or it will fail to
  3980. * start up when no ports are active.
  3981. */
  3982. I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
  3983. }
  3984. static void g4x_disable_trickle_feed(struct drm_device *dev)
  3985. {
  3986. struct drm_i915_private *dev_priv = dev->dev_private;
  3987. int pipe;
  3988. for_each_pipe(pipe) {
  3989. I915_WRITE(DSPCNTR(pipe),
  3990. I915_READ(DSPCNTR(pipe)) |
  3991. DISPPLANE_TRICKLE_FEED_DISABLE);
  3992. intel_flush_primary_plane(dev_priv, pipe);
  3993. }
  3994. }
  3995. static void ilk_init_lp_watermarks(struct drm_device *dev)
  3996. {
  3997. struct drm_i915_private *dev_priv = dev->dev_private;
  3998. I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
  3999. I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
  4000. I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
  4001. /*
  4002. * Don't touch WM1S_LP_EN here.
  4003. * Doing so could cause underruns.
  4004. */
  4005. }
  4006. static void ironlake_init_clock_gating(struct drm_device *dev)
  4007. {
  4008. struct drm_i915_private *dev_priv = dev->dev_private;
  4009. uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  4010. /*
  4011. * Required for FBC
  4012. * WaFbcDisableDpfcClockGating:ilk
  4013. */
  4014. dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
  4015. ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
  4016. ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
  4017. I915_WRITE(PCH_3DCGDIS0,
  4018. MARIUNIT_CLOCK_GATE_DISABLE |
  4019. SVSMUNIT_CLOCK_GATE_DISABLE);
  4020. I915_WRITE(PCH_3DCGDIS1,
  4021. VFMUNIT_CLOCK_GATE_DISABLE);
  4022. /*
  4023. * According to the spec the following bits should be set in
  4024. * order to enable memory self-refresh
  4025. * The bit 22/21 of 0x42004
  4026. * The bit 5 of 0x42020
  4027. * The bit 15 of 0x45000
  4028. */
  4029. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4030. (I915_READ(ILK_DISPLAY_CHICKEN2) |
  4031. ILK_DPARB_GATE | ILK_VSDPFD_FULL));
  4032. dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
  4033. I915_WRITE(DISP_ARB_CTL,
  4034. (I915_READ(DISP_ARB_CTL) |
  4035. DISP_FBC_WM_DIS));
  4036. ilk_init_lp_watermarks(dev);
  4037. /*
  4038. * Based on the document from hardware guys the following bits
  4039. * should be set unconditionally in order to enable FBC.
  4040. * The bit 22 of 0x42000
  4041. * The bit 22 of 0x42004
  4042. * The bit 7,8,9 of 0x42020.
  4043. */
  4044. if (IS_IRONLAKE_M(dev)) {
  4045. /* WaFbcAsynchFlipDisableFbcQueue:ilk */
  4046. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  4047. I915_READ(ILK_DISPLAY_CHICKEN1) |
  4048. ILK_FBCQ_DIS);
  4049. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4050. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4051. ILK_DPARB_GATE);
  4052. }
  4053. I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
  4054. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4055. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4056. ILK_ELPIN_409_SELECT);
  4057. I915_WRITE(_3D_CHICKEN2,
  4058. _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
  4059. _3D_CHICKEN2_WM_READ_PIPELINED);
  4060. /* WaDisableRenderCachePipelinedFlush:ilk */
  4061. I915_WRITE(CACHE_MODE_0,
  4062. _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
  4063. /* WaDisable_RenderCache_OperationalFlush:ilk */
  4064. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4065. g4x_disable_trickle_feed(dev);
  4066. ibx_init_clock_gating(dev);
  4067. }
  4068. static void cpt_init_clock_gating(struct drm_device *dev)
  4069. {
  4070. struct drm_i915_private *dev_priv = dev->dev_private;
  4071. int pipe;
  4072. uint32_t val;
  4073. /*
  4074. * On Ibex Peak and Cougar Point, we need to disable clock
  4075. * gating for the panel power sequencer or it will fail to
  4076. * start up when no ports are active.
  4077. */
  4078. I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
  4079. PCH_DPLUNIT_CLOCK_GATE_DISABLE |
  4080. PCH_CPUNIT_CLOCK_GATE_DISABLE);
  4081. I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
  4082. DPLS_EDP_PPS_FIX_DIS);
  4083. /* The below fixes the weird display corruption, a few pixels shifted
  4084. * downward, on (only) LVDS of some HP laptops with IVY.
  4085. */
  4086. for_each_pipe(pipe) {
  4087. val = I915_READ(TRANS_CHICKEN2(pipe));
  4088. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  4089. val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
  4090. if (dev_priv->vbt.fdi_rx_polarity_inverted)
  4091. val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
  4092. val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
  4093. val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
  4094. val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
  4095. I915_WRITE(TRANS_CHICKEN2(pipe), val);
  4096. }
  4097. /* WADP0ClockGatingDisable */
  4098. for_each_pipe(pipe) {
  4099. I915_WRITE(TRANS_CHICKEN1(pipe),
  4100. TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  4101. }
  4102. }
  4103. static void gen6_check_mch_setup(struct drm_device *dev)
  4104. {
  4105. struct drm_i915_private *dev_priv = dev->dev_private;
  4106. uint32_t tmp;
  4107. tmp = I915_READ(MCH_SSKPD);
  4108. if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
  4109. DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
  4110. DRM_INFO("This can cause pipe underruns and display issues.\n");
  4111. DRM_INFO("Please upgrade your BIOS to fix this.\n");
  4112. }
  4113. }
  4114. static void gen6_init_clock_gating(struct drm_device *dev)
  4115. {
  4116. struct drm_i915_private *dev_priv = dev->dev_private;
  4117. uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  4118. I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
  4119. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4120. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4121. ILK_ELPIN_409_SELECT);
  4122. /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
  4123. I915_WRITE(_3D_CHICKEN,
  4124. _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
  4125. /* WaSetupGtModeTdRowDispatch:snb */
  4126. if (IS_SNB_GT1(dev))
  4127. I915_WRITE(GEN6_GT_MODE,
  4128. _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
  4129. /* WaDisable_RenderCache_OperationalFlush:snb */
  4130. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4131. /*
  4132. * BSpec recoomends 8x4 when MSAA is used,
  4133. * however in practice 16x4 seems fastest.
  4134. *
  4135. * Note that PS/WM thread counts depend on the WIZ hashing
  4136. * disable bit, which we don't touch here, but it's good
  4137. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  4138. */
  4139. I915_WRITE(GEN6_GT_MODE,
  4140. GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
  4141. ilk_init_lp_watermarks(dev);
  4142. I915_WRITE(CACHE_MODE_0,
  4143. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  4144. I915_WRITE(GEN6_UCGCTL1,
  4145. I915_READ(GEN6_UCGCTL1) |
  4146. GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
  4147. GEN6_CSUNIT_CLOCK_GATE_DISABLE);
  4148. /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
  4149. * gating disable must be set. Failure to set it results in
  4150. * flickering pixels due to Z write ordering failures after
  4151. * some amount of runtime in the Mesa "fire" demo, and Unigine
  4152. * Sanctuary and Tropics, and apparently anything else with
  4153. * alpha test or pixel discard.
  4154. *
  4155. * According to the spec, bit 11 (RCCUNIT) must also be set,
  4156. * but we didn't debug actual testcases to find it out.
  4157. *
  4158. * WaDisableRCCUnitClockGating:snb
  4159. * WaDisableRCPBUnitClockGating:snb
  4160. */
  4161. I915_WRITE(GEN6_UCGCTL2,
  4162. GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
  4163. GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
  4164. /* WaStripsFansDisableFastClipPerformanceFix:snb */
  4165. I915_WRITE(_3D_CHICKEN3,
  4166. _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
  4167. /*
  4168. * Bspec says:
  4169. * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
  4170. * 3DSTATE_SF number of SF output attributes is more than 16."
  4171. */
  4172. I915_WRITE(_3D_CHICKEN3,
  4173. _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
  4174. /*
  4175. * According to the spec the following bits should be
  4176. * set in order to enable memory self-refresh and fbc:
  4177. * The bit21 and bit22 of 0x42000
  4178. * The bit21 and bit22 of 0x42004
  4179. * The bit5 and bit7 of 0x42020
  4180. * The bit14 of 0x70180
  4181. * The bit14 of 0x71180
  4182. *
  4183. * WaFbcAsynchFlipDisableFbcQueue:snb
  4184. */
  4185. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  4186. I915_READ(ILK_DISPLAY_CHICKEN1) |
  4187. ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
  4188. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4189. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4190. ILK_DPARB_GATE | ILK_VSDPFD_FULL);
  4191. I915_WRITE(ILK_DSPCLK_GATE_D,
  4192. I915_READ(ILK_DSPCLK_GATE_D) |
  4193. ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
  4194. ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
  4195. g4x_disable_trickle_feed(dev);
  4196. cpt_init_clock_gating(dev);
  4197. gen6_check_mch_setup(dev);
  4198. }
  4199. static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
  4200. {
  4201. uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
  4202. /*
  4203. * WaVSThreadDispatchOverride:ivb,vlv
  4204. *
  4205. * This actually overrides the dispatch
  4206. * mode for all thread types.
  4207. */
  4208. reg &= ~GEN7_FF_SCHED_MASK;
  4209. reg |= GEN7_FF_TS_SCHED_HW;
  4210. reg |= GEN7_FF_VS_SCHED_HW;
  4211. reg |= GEN7_FF_DS_SCHED_HW;
  4212. I915_WRITE(GEN7_FF_THREAD_MODE, reg);
  4213. }
  4214. static void lpt_init_clock_gating(struct drm_device *dev)
  4215. {
  4216. struct drm_i915_private *dev_priv = dev->dev_private;
  4217. /*
  4218. * TODO: this bit should only be enabled when really needed, then
  4219. * disabled when not needed anymore in order to save power.
  4220. */
  4221. if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
  4222. I915_WRITE(SOUTH_DSPCLK_GATE_D,
  4223. I915_READ(SOUTH_DSPCLK_GATE_D) |
  4224. PCH_LP_PARTITION_LEVEL_DISABLE);
  4225. /* WADPOClockGatingDisable:hsw */
  4226. I915_WRITE(_TRANSA_CHICKEN1,
  4227. I915_READ(_TRANSA_CHICKEN1) |
  4228. TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  4229. }
  4230. static void lpt_suspend_hw(struct drm_device *dev)
  4231. {
  4232. struct drm_i915_private *dev_priv = dev->dev_private;
  4233. if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
  4234. uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
  4235. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  4236. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  4237. }
  4238. }
  4239. static void gen8_init_clock_gating(struct drm_device *dev)
  4240. {
  4241. struct drm_i915_private *dev_priv = dev->dev_private;
  4242. enum pipe pipe;
  4243. I915_WRITE(WM3_LP_ILK, 0);
  4244. I915_WRITE(WM2_LP_ILK, 0);
  4245. I915_WRITE(WM1_LP_ILK, 0);
  4246. /* FIXME(BDW): Check all the w/a, some might only apply to
  4247. * pre-production hw. */
  4248. /* WaDisablePartialInstShootdown:bdw */
  4249. I915_WRITE(GEN8_ROW_CHICKEN,
  4250. _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
  4251. /* WaDisableThreadStallDopClockGating:bdw */
  4252. /* FIXME: Unclear whether we really need this on production bdw. */
  4253. I915_WRITE(GEN8_ROW_CHICKEN,
  4254. _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
  4255. /*
  4256. * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
  4257. * pre-production hardware
  4258. */
  4259. I915_WRITE(HALF_SLICE_CHICKEN3,
  4260. _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
  4261. I915_WRITE(HALF_SLICE_CHICKEN3,
  4262. _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
  4263. I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
  4264. I915_WRITE(_3D_CHICKEN3,
  4265. _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
  4266. I915_WRITE(COMMON_SLICE_CHICKEN2,
  4267. _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
  4268. I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
  4269. _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
  4270. /* WaDisableDopClockGating:bdw May not be needed for production */
  4271. I915_WRITE(GEN7_ROW_CHICKEN2,
  4272. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4273. /* WaSwitchSolVfFArbitrationPriority:bdw */
  4274. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
  4275. /* WaPsrDPAMaskVBlankInSRD:bdw */
  4276. I915_WRITE(CHICKEN_PAR1_1,
  4277. I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
  4278. /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
  4279. for_each_pipe(pipe) {
  4280. I915_WRITE(CHICKEN_PIPESL_1(pipe),
  4281. I915_READ(CHICKEN_PIPESL_1(pipe)) |
  4282. BDW_DPRS_MASK_VBLANK_SRD);
  4283. }
  4284. /* Use Force Non-Coherent whenever executing a 3D context. This is a
  4285. * workaround for for a possible hang in the unlikely event a TLB
  4286. * invalidation occurs during a PSD flush.
  4287. */
  4288. I915_WRITE(HDC_CHICKEN0,
  4289. I915_READ(HDC_CHICKEN0) |
  4290. _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
  4291. /* WaVSRefCountFullforceMissDisable:bdw */
  4292. /* WaDSRefCountFullforceMissDisable:bdw */
  4293. I915_WRITE(GEN7_FF_THREAD_MODE,
  4294. I915_READ(GEN7_FF_THREAD_MODE) &
  4295. ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
  4296. /*
  4297. * BSpec recommends 8x4 when MSAA is used,
  4298. * however in practice 16x4 seems fastest.
  4299. *
  4300. * Note that PS/WM thread counts depend on the WIZ hashing
  4301. * disable bit, which we don't touch here, but it's good
  4302. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  4303. */
  4304. I915_WRITE(GEN7_GT_MODE,
  4305. GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
  4306. I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
  4307. _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
  4308. /* WaDisableSDEUnitClockGating:bdw */
  4309. I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
  4310. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  4311. /* Wa4x4STCOptimizationDisable:bdw */
  4312. I915_WRITE(CACHE_MODE_1,
  4313. _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
  4314. }
  4315. static void haswell_init_clock_gating(struct drm_device *dev)
  4316. {
  4317. struct drm_i915_private *dev_priv = dev->dev_private;
  4318. ilk_init_lp_watermarks(dev);
  4319. /* L3 caching of data atomics doesn't work -- disable it. */
  4320. I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
  4321. I915_WRITE(HSW_ROW_CHICKEN3,
  4322. _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
  4323. /* This is required by WaCatErrorRejectionIssue:hsw */
  4324. I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  4325. I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  4326. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  4327. /* WaVSRefCountFullforceMissDisable:hsw */
  4328. I915_WRITE(GEN7_FF_THREAD_MODE,
  4329. I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
  4330. /* WaDisable_RenderCache_OperationalFlush:hsw */
  4331. I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4332. /* enable HiZ Raw Stall Optimization */
  4333. I915_WRITE(CACHE_MODE_0_GEN7,
  4334. _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
  4335. /* WaDisable4x2SubspanOptimization:hsw */
  4336. I915_WRITE(CACHE_MODE_1,
  4337. _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  4338. /*
  4339. * BSpec recommends 8x4 when MSAA is used,
  4340. * however in practice 16x4 seems fastest.
  4341. *
  4342. * Note that PS/WM thread counts depend on the WIZ hashing
  4343. * disable bit, which we don't touch here, but it's good
  4344. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  4345. */
  4346. I915_WRITE(GEN7_GT_MODE,
  4347. GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
  4348. /* WaSwitchSolVfFArbitrationPriority:hsw */
  4349. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
  4350. /* WaRsPkgCStateDisplayPMReq:hsw */
  4351. I915_WRITE(CHICKEN_PAR1_1,
  4352. I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
  4353. lpt_init_clock_gating(dev);
  4354. }
  4355. static void ivybridge_init_clock_gating(struct drm_device *dev)
  4356. {
  4357. struct drm_i915_private *dev_priv = dev->dev_private;
  4358. uint32_t snpcr;
  4359. ilk_init_lp_watermarks(dev);
  4360. I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
  4361. /* WaDisableEarlyCull:ivb */
  4362. I915_WRITE(_3D_CHICKEN3,
  4363. _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
  4364. /* WaDisableBackToBackFlipFix:ivb */
  4365. I915_WRITE(IVB_CHICKEN3,
  4366. CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
  4367. CHICKEN3_DGMG_DONE_FIX_DISABLE);
  4368. /* WaDisablePSDDualDispatchEnable:ivb */
  4369. if (IS_IVB_GT1(dev))
  4370. I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
  4371. _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
  4372. /* WaDisable_RenderCache_OperationalFlush:ivb */
  4373. I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4374. /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
  4375. I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
  4376. GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
  4377. /* WaApplyL3ControlAndL3ChickenMode:ivb */
  4378. I915_WRITE(GEN7_L3CNTLREG1,
  4379. GEN7_WA_FOR_GEN7_L3_CONTROL);
  4380. I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
  4381. GEN7_WA_L3_CHICKEN_MODE);
  4382. if (IS_IVB_GT1(dev))
  4383. I915_WRITE(GEN7_ROW_CHICKEN2,
  4384. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4385. else {
  4386. /* must write both registers */
  4387. I915_WRITE(GEN7_ROW_CHICKEN2,
  4388. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4389. I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
  4390. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4391. }
  4392. /* WaForceL3Serialization:ivb */
  4393. I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
  4394. ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
  4395. /*
  4396. * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
  4397. * This implements the WaDisableRCZUnitClockGating:ivb workaround.
  4398. */
  4399. I915_WRITE(GEN6_UCGCTL2,
  4400. GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  4401. /* This is required by WaCatErrorRejectionIssue:ivb */
  4402. I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  4403. I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  4404. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  4405. g4x_disable_trickle_feed(dev);
  4406. gen7_setup_fixed_func_scheduler(dev_priv);
  4407. if (0) { /* causes HiZ corruption on ivb:gt1 */
  4408. /* enable HiZ Raw Stall Optimization */
  4409. I915_WRITE(CACHE_MODE_0_GEN7,
  4410. _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
  4411. }
  4412. /* WaDisable4x2SubspanOptimization:ivb */
  4413. I915_WRITE(CACHE_MODE_1,
  4414. _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  4415. /*
  4416. * BSpec recommends 8x4 when MSAA is used,
  4417. * however in practice 16x4 seems fastest.
  4418. *
  4419. * Note that PS/WM thread counts depend on the WIZ hashing
  4420. * disable bit, which we don't touch here, but it's good
  4421. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  4422. */
  4423. I915_WRITE(GEN7_GT_MODE,
  4424. GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
  4425. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  4426. snpcr &= ~GEN6_MBC_SNPCR_MASK;
  4427. snpcr |= GEN6_MBC_SNPCR_MED;
  4428. I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
  4429. if (!HAS_PCH_NOP(dev))
  4430. cpt_init_clock_gating(dev);
  4431. gen6_check_mch_setup(dev);
  4432. }
  4433. static void valleyview_init_clock_gating(struct drm_device *dev)
  4434. {
  4435. struct drm_i915_private *dev_priv = dev->dev_private;
  4436. u32 val;
  4437. mutex_lock(&dev_priv->rps.hw_lock);
  4438. val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  4439. mutex_unlock(&dev_priv->rps.hw_lock);
  4440. switch ((val >> 6) & 3) {
  4441. case 0:
  4442. case 1:
  4443. dev_priv->mem_freq = 800;
  4444. break;
  4445. case 2:
  4446. dev_priv->mem_freq = 1066;
  4447. break;
  4448. case 3:
  4449. dev_priv->mem_freq = 1333;
  4450. break;
  4451. }
  4452. DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  4453. dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
  4454. DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
  4455. dev_priv->vlv_cdclk_freq);
  4456. I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  4457. /* WaDisableEarlyCull:vlv */
  4458. I915_WRITE(_3D_CHICKEN3,
  4459. _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
  4460. /* WaDisableBackToBackFlipFix:vlv */
  4461. I915_WRITE(IVB_CHICKEN3,
  4462. CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
  4463. CHICKEN3_DGMG_DONE_FIX_DISABLE);
  4464. /* WaPsdDispatchEnable:vlv */
  4465. /* WaDisablePSDDualDispatchEnable:vlv */
  4466. I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
  4467. _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
  4468. GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
  4469. /* WaDisable_RenderCache_OperationalFlush:vlv */
  4470. I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4471. /* WaForceL3Serialization:vlv */
  4472. I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
  4473. ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
  4474. /* WaDisableDopClockGating:vlv */
  4475. I915_WRITE(GEN7_ROW_CHICKEN2,
  4476. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4477. /* This is required by WaCatErrorRejectionIssue:vlv */
  4478. I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  4479. I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  4480. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  4481. gen7_setup_fixed_func_scheduler(dev_priv);
  4482. /*
  4483. * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
  4484. * This implements the WaDisableRCZUnitClockGating:vlv workaround.
  4485. */
  4486. I915_WRITE(GEN6_UCGCTL2,
  4487. GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  4488. /* WaDisableL3Bank2xClockGate:vlv
  4489. * Disabling L3 clock gating- MMIO 940c[25] = 1
  4490. * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
  4491. I915_WRITE(GEN7_UCGCTL4,
  4492. I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
  4493. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  4494. /*
  4495. * BSpec says this must be set, even though
  4496. * WaDisable4x2SubspanOptimization isn't listed for VLV.
  4497. */
  4498. I915_WRITE(CACHE_MODE_1,
  4499. _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  4500. /*
  4501. * WaIncreaseL3CreditsForVLVB0:vlv
  4502. * This is the hardware default actually.
  4503. */
  4504. I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
  4505. /*
  4506. * WaDisableVLVClockGating_VBIIssue:vlv
  4507. * Disable clock gating on th GCFG unit to prevent a delay
  4508. * in the reporting of vblank events.
  4509. */
  4510. I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
  4511. }
  4512. static void cherryview_init_clock_gating(struct drm_device *dev)
  4513. {
  4514. struct drm_i915_private *dev_priv = dev->dev_private;
  4515. I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  4516. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  4517. /* WaDisablePartialInstShootdown:chv */
  4518. I915_WRITE(GEN8_ROW_CHICKEN,
  4519. _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
  4520. /* WaDisableThreadStallDopClockGating:chv */
  4521. I915_WRITE(GEN8_ROW_CHICKEN,
  4522. _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
  4523. /* WaVSRefCountFullforceMissDisable:chv */
  4524. /* WaDSRefCountFullforceMissDisable:chv */
  4525. I915_WRITE(GEN7_FF_THREAD_MODE,
  4526. I915_READ(GEN7_FF_THREAD_MODE) &
  4527. ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
  4528. /* WaDisableSemaphoreAndSyncFlipWait:chv */
  4529. I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
  4530. _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
  4531. /* WaDisableCSUnitClockGating:chv */
  4532. I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
  4533. GEN6_CSUNIT_CLOCK_GATE_DISABLE);
  4534. /* WaDisableSDEUnitClockGating:chv */
  4535. I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
  4536. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  4537. /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
  4538. I915_WRITE(HALF_SLICE_CHICKEN3,
  4539. _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
  4540. /* WaDisableGunitClockGating:chv (pre-production hw) */
  4541. I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
  4542. GINT_DIS);
  4543. /* WaDisableFfDopClockGating:chv (pre-production hw) */
  4544. I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
  4545. _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
  4546. /* WaDisableDopClockGating:chv (pre-production hw) */
  4547. I915_WRITE(GEN7_ROW_CHICKEN2,
  4548. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4549. I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
  4550. GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
  4551. }
  4552. static void g4x_init_clock_gating(struct drm_device *dev)
  4553. {
  4554. struct drm_i915_private *dev_priv = dev->dev_private;
  4555. uint32_t dspclk_gate;
  4556. I915_WRITE(RENCLK_GATE_D1, 0);
  4557. I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
  4558. GS_UNIT_CLOCK_GATE_DISABLE |
  4559. CL_UNIT_CLOCK_GATE_DISABLE);
  4560. I915_WRITE(RAMCLK_GATE_D, 0);
  4561. dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
  4562. OVRUNIT_CLOCK_GATE_DISABLE |
  4563. OVCUNIT_CLOCK_GATE_DISABLE;
  4564. if (IS_GM45(dev))
  4565. dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
  4566. I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
  4567. /* WaDisableRenderCachePipelinedFlush */
  4568. I915_WRITE(CACHE_MODE_0,
  4569. _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
  4570. /* WaDisable_RenderCache_OperationalFlush:g4x */
  4571. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4572. g4x_disable_trickle_feed(dev);
  4573. }
  4574. static void crestline_init_clock_gating(struct drm_device *dev)
  4575. {
  4576. struct drm_i915_private *dev_priv = dev->dev_private;
  4577. I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
  4578. I915_WRITE(RENCLK_GATE_D2, 0);
  4579. I915_WRITE(DSPCLK_GATE_D, 0);
  4580. I915_WRITE(RAMCLK_GATE_D, 0);
  4581. I915_WRITE16(DEUC, 0);
  4582. I915_WRITE(MI_ARB_STATE,
  4583. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  4584. /* WaDisable_RenderCache_OperationalFlush:gen4 */
  4585. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4586. }
  4587. static void broadwater_init_clock_gating(struct drm_device *dev)
  4588. {
  4589. struct drm_i915_private *dev_priv = dev->dev_private;
  4590. I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
  4591. I965_RCC_CLOCK_GATE_DISABLE |
  4592. I965_RCPB_CLOCK_GATE_DISABLE |
  4593. I965_ISC_CLOCK_GATE_DISABLE |
  4594. I965_FBC_CLOCK_GATE_DISABLE);
  4595. I915_WRITE(RENCLK_GATE_D2, 0);
  4596. I915_WRITE(MI_ARB_STATE,
  4597. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  4598. /* WaDisable_RenderCache_OperationalFlush:gen4 */
  4599. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4600. }
  4601. static void gen3_init_clock_gating(struct drm_device *dev)
  4602. {
  4603. struct drm_i915_private *dev_priv = dev->dev_private;
  4604. u32 dstate = I915_READ(D_STATE);
  4605. dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
  4606. DSTATE_DOT_CLOCK_GATING;
  4607. I915_WRITE(D_STATE, dstate);
  4608. if (IS_PINEVIEW(dev))
  4609. I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
  4610. /* IIR "flip pending" means done if this bit is set */
  4611. I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
  4612. /* interrupts should cause a wake up from C3 */
  4613. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
  4614. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  4615. I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  4616. }
  4617. static void i85x_init_clock_gating(struct drm_device *dev)
  4618. {
  4619. struct drm_i915_private *dev_priv = dev->dev_private;
  4620. I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
  4621. /* interrupts should cause a wake up from C3 */
  4622. I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
  4623. _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
  4624. }
  4625. static void i830_init_clock_gating(struct drm_device *dev)
  4626. {
  4627. struct drm_i915_private *dev_priv = dev->dev_private;
  4628. I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
  4629. }
  4630. void intel_init_clock_gating(struct drm_device *dev)
  4631. {
  4632. struct drm_i915_private *dev_priv = dev->dev_private;
  4633. dev_priv->display.init_clock_gating(dev);
  4634. }
  4635. void intel_suspend_hw(struct drm_device *dev)
  4636. {
  4637. if (HAS_PCH_LPT(dev))
  4638. lpt_suspend_hw(dev);
  4639. }
  4640. #define for_each_power_well(i, power_well, domain_mask, power_domains) \
  4641. for (i = 0; \
  4642. i < (power_domains)->power_well_count && \
  4643. ((power_well) = &(power_domains)->power_wells[i]); \
  4644. i++) \
  4645. if ((power_well)->domains & (domain_mask))
  4646. #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
  4647. for (i = (power_domains)->power_well_count - 1; \
  4648. i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
  4649. i--) \
  4650. if ((power_well)->domains & (domain_mask))
  4651. /**
  4652. * We should only use the power well if we explicitly asked the hardware to
  4653. * enable it, so check if it's enabled and also check if we've requested it to
  4654. * be enabled.
  4655. */
  4656. static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
  4657. struct i915_power_well *power_well)
  4658. {
  4659. return I915_READ(HSW_PWR_WELL_DRIVER) ==
  4660. (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  4661. }
  4662. bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
  4663. enum intel_display_power_domain domain)
  4664. {
  4665. struct i915_power_domains *power_domains;
  4666. struct i915_power_well *power_well;
  4667. bool is_enabled;
  4668. int i;
  4669. if (dev_priv->pm.suspended)
  4670. return false;
  4671. power_domains = &dev_priv->power_domains;
  4672. is_enabled = true;
  4673. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  4674. if (power_well->always_on)
  4675. continue;
  4676. if (!power_well->hw_enabled) {
  4677. is_enabled = false;
  4678. break;
  4679. }
  4680. }
  4681. return is_enabled;
  4682. }
  4683. bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
  4684. enum intel_display_power_domain domain)
  4685. {
  4686. struct i915_power_domains *power_domains;
  4687. bool ret;
  4688. power_domains = &dev_priv->power_domains;
  4689. mutex_lock(&power_domains->lock);
  4690. ret = intel_display_power_enabled_unlocked(dev_priv, domain);
  4691. mutex_unlock(&power_domains->lock);
  4692. return ret;
  4693. }
  4694. /*
  4695. * Starting with Haswell, we have a "Power Down Well" that can be turned off
  4696. * when not needed anymore. We have 4 registers that can request the power well
  4697. * to be enabled, and it will only be disabled if none of the registers is
  4698. * requesting it to be enabled.
  4699. */
  4700. static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  4701. {
  4702. struct drm_device *dev = dev_priv->dev;
  4703. unsigned long irqflags;
  4704. /*
  4705. * After we re-enable the power well, if we touch VGA register 0x3d5
  4706. * we'll get unclaimed register interrupts. This stops after we write
  4707. * anything to the VGA MSR register. The vgacon module uses this
  4708. * register all the time, so if we unbind our driver and, as a
  4709. * consequence, bind vgacon, we'll get stuck in an infinite loop at
  4710. * console_unlock(). So make here we touch the VGA MSR register, making
  4711. * sure vgacon can keep working normally without triggering interrupts
  4712. * and error messages.
  4713. */
  4714. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  4715. outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
  4716. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  4717. if (IS_BROADWELL(dev)) {
  4718. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  4719. I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
  4720. dev_priv->de_irq_mask[PIPE_B]);
  4721. I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
  4722. ~dev_priv->de_irq_mask[PIPE_B] |
  4723. GEN8_PIPE_VBLANK);
  4724. I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
  4725. dev_priv->de_irq_mask[PIPE_C]);
  4726. I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
  4727. ~dev_priv->de_irq_mask[PIPE_C] |
  4728. GEN8_PIPE_VBLANK);
  4729. POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
  4730. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  4731. }
  4732. }
  4733. static void hsw_set_power_well(struct drm_i915_private *dev_priv,
  4734. struct i915_power_well *power_well, bool enable)
  4735. {
  4736. bool is_enabled, enable_requested;
  4737. uint32_t tmp;
  4738. tmp = I915_READ(HSW_PWR_WELL_DRIVER);
  4739. is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
  4740. enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
  4741. if (enable) {
  4742. if (!enable_requested)
  4743. I915_WRITE(HSW_PWR_WELL_DRIVER,
  4744. HSW_PWR_WELL_ENABLE_REQUEST);
  4745. if (!is_enabled) {
  4746. DRM_DEBUG_KMS("Enabling power well\n");
  4747. if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
  4748. HSW_PWR_WELL_STATE_ENABLED), 20))
  4749. DRM_ERROR("Timeout enabling power well\n");
  4750. }
  4751. hsw_power_well_post_enable(dev_priv);
  4752. } else {
  4753. if (enable_requested) {
  4754. I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  4755. POSTING_READ(HSW_PWR_WELL_DRIVER);
  4756. DRM_DEBUG_KMS("Requesting to disable the power well\n");
  4757. }
  4758. }
  4759. }
  4760. static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
  4761. struct i915_power_well *power_well)
  4762. {
  4763. hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  4764. /*
  4765. * We're taking over the BIOS, so clear any requests made by it since
  4766. * the driver is in charge now.
  4767. */
  4768. if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
  4769. I915_WRITE(HSW_PWR_WELL_BIOS, 0);
  4770. }
  4771. static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
  4772. struct i915_power_well *power_well)
  4773. {
  4774. hsw_set_power_well(dev_priv, power_well, true);
  4775. }
  4776. static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
  4777. struct i915_power_well *power_well)
  4778. {
  4779. hsw_set_power_well(dev_priv, power_well, false);
  4780. }
  4781. static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
  4782. struct i915_power_well *power_well)
  4783. {
  4784. }
  4785. static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
  4786. struct i915_power_well *power_well)
  4787. {
  4788. return true;
  4789. }
  4790. void __vlv_set_power_well(struct drm_i915_private *dev_priv,
  4791. enum punit_power_well power_well_id, bool enable)
  4792. {
  4793. struct drm_device *dev = dev_priv->dev;
  4794. u32 mask;
  4795. u32 state;
  4796. u32 ctrl;
  4797. enum pipe pipe;
  4798. if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
  4799. if (enable) {
  4800. /*
  4801. * Enable the CRI clock source so we can get at the
  4802. * display and the reference clock for VGA
  4803. * hotplug / manual detection.
  4804. */
  4805. I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
  4806. DPLL_REFA_CLK_ENABLE_VLV |
  4807. DPLL_INTEGRATED_CRI_CLK_VLV);
  4808. udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
  4809. } else {
  4810. for_each_pipe(pipe)
  4811. assert_pll_disabled(dev_priv, pipe);
  4812. /* Assert common reset */
  4813. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
  4814. ~DPIO_CMNRST);
  4815. }
  4816. }
  4817. mask = PUNIT_PWRGT_MASK(power_well_id);
  4818. state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  4819. PUNIT_PWRGT_PWR_GATE(power_well_id);
  4820. mutex_lock(&dev_priv->rps.hw_lock);
  4821. #define COND \
  4822. ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
  4823. if (COND)
  4824. goto out;
  4825. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
  4826. ctrl &= ~mask;
  4827. ctrl |= state;
  4828. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
  4829. if (wait_for(COND, 100))
  4830. DRM_ERROR("timout setting power well state %08x (%08x)\n",
  4831. state,
  4832. vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
  4833. #undef COND
  4834. out:
  4835. mutex_unlock(&dev_priv->rps.hw_lock);
  4836. /*
  4837. * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
  4838. * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
  4839. * a. GUnit 0x2110 bit[0] set to 1 (def 0)
  4840. * b. The other bits such as sfr settings / modesel may all
  4841. * be set to 0.
  4842. *
  4843. * This should only be done on init and resume from S3 with
  4844. * both PLLs disabled, or we risk losing DPIO and PLL
  4845. * synchronization.
  4846. */
  4847. if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
  4848. I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
  4849. }
  4850. static void vlv_set_power_well(struct drm_i915_private *dev_priv,
  4851. struct i915_power_well *power_well, bool enable)
  4852. {
  4853. enum punit_power_well power_well_id = power_well->data;
  4854. __vlv_set_power_well(dev_priv, power_well_id, enable);
  4855. }
  4856. static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
  4857. struct i915_power_well *power_well)
  4858. {
  4859. vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
  4860. }
  4861. static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
  4862. struct i915_power_well *power_well)
  4863. {
  4864. vlv_set_power_well(dev_priv, power_well, true);
  4865. }
  4866. static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
  4867. struct i915_power_well *power_well)
  4868. {
  4869. vlv_set_power_well(dev_priv, power_well, false);
  4870. }
  4871. static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
  4872. struct i915_power_well *power_well)
  4873. {
  4874. int power_well_id = power_well->data;
  4875. bool enabled = false;
  4876. u32 mask;
  4877. u32 state;
  4878. u32 ctrl;
  4879. mask = PUNIT_PWRGT_MASK(power_well_id);
  4880. ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
  4881. mutex_lock(&dev_priv->rps.hw_lock);
  4882. state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
  4883. /*
  4884. * We only ever set the power-on and power-gate states, anything
  4885. * else is unexpected.
  4886. */
  4887. WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
  4888. state != PUNIT_PWRGT_PWR_GATE(power_well_id));
  4889. if (state == ctrl)
  4890. enabled = true;
  4891. /*
  4892. * A transient state at this point would mean some unexpected party
  4893. * is poking at the power controls too.
  4894. */
  4895. ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
  4896. WARN_ON(ctrl != state);
  4897. mutex_unlock(&dev_priv->rps.hw_lock);
  4898. return enabled;
  4899. }
  4900. static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
  4901. struct i915_power_well *power_well)
  4902. {
  4903. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  4904. vlv_set_power_well(dev_priv, power_well, true);
  4905. spin_lock_irq(&dev_priv->irq_lock);
  4906. valleyview_enable_display_irqs(dev_priv);
  4907. spin_unlock_irq(&dev_priv->irq_lock);
  4908. /*
  4909. * During driver initialization/resume we can avoid restoring the
  4910. * part of the HW/SW state that will be inited anyway explicitly.
  4911. */
  4912. if (dev_priv->power_domains.initializing)
  4913. return;
  4914. intel_hpd_init(dev_priv->dev);
  4915. i915_redisable_vga_power_on(dev_priv->dev);
  4916. }
  4917. static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
  4918. struct i915_power_well *power_well)
  4919. {
  4920. WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
  4921. spin_lock_irq(&dev_priv->irq_lock);
  4922. valleyview_disable_display_irqs(dev_priv);
  4923. spin_unlock_irq(&dev_priv->irq_lock);
  4924. vlv_set_power_well(dev_priv, power_well, false);
  4925. }
  4926. static void check_power_well_state(struct drm_i915_private *dev_priv,
  4927. struct i915_power_well *power_well)
  4928. {
  4929. bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
  4930. if (power_well->always_on || !i915.disable_power_well) {
  4931. if (!enabled)
  4932. goto mismatch;
  4933. return;
  4934. }
  4935. if (enabled != (power_well->count > 0))
  4936. goto mismatch;
  4937. return;
  4938. mismatch:
  4939. WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
  4940. power_well->name, power_well->always_on, enabled,
  4941. power_well->count, i915.disable_power_well);
  4942. }
  4943. void intel_display_power_get(struct drm_i915_private *dev_priv,
  4944. enum intel_display_power_domain domain)
  4945. {
  4946. struct i915_power_domains *power_domains;
  4947. struct i915_power_well *power_well;
  4948. int i;
  4949. intel_runtime_pm_get(dev_priv);
  4950. power_domains = &dev_priv->power_domains;
  4951. mutex_lock(&power_domains->lock);
  4952. for_each_power_well(i, power_well, BIT(domain), power_domains) {
  4953. if (!power_well->count++) {
  4954. DRM_DEBUG_KMS("enabling %s\n", power_well->name);
  4955. power_well->ops->enable(dev_priv, power_well);
  4956. power_well->hw_enabled = true;
  4957. }
  4958. check_power_well_state(dev_priv, power_well);
  4959. }
  4960. power_domains->domain_use_count[domain]++;
  4961. mutex_unlock(&power_domains->lock);
  4962. }
  4963. void intel_display_power_put(struct drm_i915_private *dev_priv,
  4964. enum intel_display_power_domain domain)
  4965. {
  4966. struct i915_power_domains *power_domains;
  4967. struct i915_power_well *power_well;
  4968. int i;
  4969. power_domains = &dev_priv->power_domains;
  4970. mutex_lock(&power_domains->lock);
  4971. WARN_ON(!power_domains->domain_use_count[domain]);
  4972. power_domains->domain_use_count[domain]--;
  4973. for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
  4974. WARN_ON(!power_well->count);
  4975. if (!--power_well->count && i915.disable_power_well) {
  4976. DRM_DEBUG_KMS("disabling %s\n", power_well->name);
  4977. power_well->hw_enabled = false;
  4978. power_well->ops->disable(dev_priv, power_well);
  4979. }
  4980. check_power_well_state(dev_priv, power_well);
  4981. }
  4982. mutex_unlock(&power_domains->lock);
  4983. intel_runtime_pm_put(dev_priv);
  4984. }
  4985. static struct i915_power_domains *hsw_pwr;
  4986. /* Display audio driver power well request */
  4987. int i915_request_power_well(void)
  4988. {
  4989. struct drm_i915_private *dev_priv;
  4990. if (!hsw_pwr)
  4991. return -ENODEV;
  4992. dev_priv = container_of(hsw_pwr, struct drm_i915_private,
  4993. power_domains);
  4994. intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
  4995. return 0;
  4996. }
  4997. EXPORT_SYMBOL_GPL(i915_request_power_well);
  4998. /* Display audio driver power well release */
  4999. int i915_release_power_well(void)
  5000. {
  5001. struct drm_i915_private *dev_priv;
  5002. if (!hsw_pwr)
  5003. return -ENODEV;
  5004. dev_priv = container_of(hsw_pwr, struct drm_i915_private,
  5005. power_domains);
  5006. intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
  5007. return 0;
  5008. }
  5009. EXPORT_SYMBOL_GPL(i915_release_power_well);
  5010. /*
  5011. * Private interface for the audio driver to get CDCLK in kHz.
  5012. *
  5013. * Caller must request power well using i915_request_power_well() prior to
  5014. * making the call.
  5015. */
  5016. int i915_get_cdclk_freq(void)
  5017. {
  5018. struct drm_i915_private *dev_priv;
  5019. if (!hsw_pwr)
  5020. return -ENODEV;
  5021. dev_priv = container_of(hsw_pwr, struct drm_i915_private,
  5022. power_domains);
  5023. return intel_ddi_get_cdclk_freq(dev_priv);
  5024. }
  5025. EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
  5026. #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
  5027. #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
  5028. BIT(POWER_DOMAIN_PIPE_A) | \
  5029. BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
  5030. BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
  5031. BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
  5032. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  5033. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  5034. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  5035. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  5036. BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
  5037. BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
  5038. BIT(POWER_DOMAIN_PORT_CRT) | \
  5039. BIT(POWER_DOMAIN_INIT))
  5040. #define HSW_DISPLAY_POWER_DOMAINS ( \
  5041. (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
  5042. BIT(POWER_DOMAIN_INIT))
  5043. #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
  5044. HSW_ALWAYS_ON_POWER_DOMAINS | \
  5045. BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
  5046. #define BDW_DISPLAY_POWER_DOMAINS ( \
  5047. (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
  5048. BIT(POWER_DOMAIN_INIT))
  5049. #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
  5050. #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
  5051. #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
  5052. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  5053. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  5054. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  5055. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  5056. BIT(POWER_DOMAIN_PORT_CRT) | \
  5057. BIT(POWER_DOMAIN_INIT))
  5058. #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
  5059. BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
  5060. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  5061. BIT(POWER_DOMAIN_INIT))
  5062. #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
  5063. BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
  5064. BIT(POWER_DOMAIN_INIT))
  5065. #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
  5066. BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
  5067. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  5068. BIT(POWER_DOMAIN_INIT))
  5069. #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
  5070. BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
  5071. BIT(POWER_DOMAIN_INIT))
  5072. static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
  5073. .sync_hw = i9xx_always_on_power_well_noop,
  5074. .enable = i9xx_always_on_power_well_noop,
  5075. .disable = i9xx_always_on_power_well_noop,
  5076. .is_enabled = i9xx_always_on_power_well_enabled,
  5077. };
  5078. static struct i915_power_well i9xx_always_on_power_well[] = {
  5079. {
  5080. .name = "always-on",
  5081. .always_on = 1,
  5082. .domains = POWER_DOMAIN_MASK,
  5083. .ops = &i9xx_always_on_power_well_ops,
  5084. },
  5085. };
  5086. static const struct i915_power_well_ops hsw_power_well_ops = {
  5087. .sync_hw = hsw_power_well_sync_hw,
  5088. .enable = hsw_power_well_enable,
  5089. .disable = hsw_power_well_disable,
  5090. .is_enabled = hsw_power_well_enabled,
  5091. };
  5092. static struct i915_power_well hsw_power_wells[] = {
  5093. {
  5094. .name = "always-on",
  5095. .always_on = 1,
  5096. .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
  5097. .ops = &i9xx_always_on_power_well_ops,
  5098. },
  5099. {
  5100. .name = "display",
  5101. .domains = HSW_DISPLAY_POWER_DOMAINS,
  5102. .ops = &hsw_power_well_ops,
  5103. },
  5104. };
  5105. static struct i915_power_well bdw_power_wells[] = {
  5106. {
  5107. .name = "always-on",
  5108. .always_on = 1,
  5109. .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
  5110. .ops = &i9xx_always_on_power_well_ops,
  5111. },
  5112. {
  5113. .name = "display",
  5114. .domains = BDW_DISPLAY_POWER_DOMAINS,
  5115. .ops = &hsw_power_well_ops,
  5116. },
  5117. };
  5118. static const struct i915_power_well_ops vlv_display_power_well_ops = {
  5119. .sync_hw = vlv_power_well_sync_hw,
  5120. .enable = vlv_display_power_well_enable,
  5121. .disable = vlv_display_power_well_disable,
  5122. .is_enabled = vlv_power_well_enabled,
  5123. };
  5124. static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
  5125. .sync_hw = vlv_power_well_sync_hw,
  5126. .enable = vlv_power_well_enable,
  5127. .disable = vlv_power_well_disable,
  5128. .is_enabled = vlv_power_well_enabled,
  5129. };
  5130. static struct i915_power_well vlv_power_wells[] = {
  5131. {
  5132. .name = "always-on",
  5133. .always_on = 1,
  5134. .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
  5135. .ops = &i9xx_always_on_power_well_ops,
  5136. },
  5137. {
  5138. .name = "display",
  5139. .domains = VLV_DISPLAY_POWER_DOMAINS,
  5140. .data = PUNIT_POWER_WELL_DISP2D,
  5141. .ops = &vlv_display_power_well_ops,
  5142. },
  5143. {
  5144. .name = "dpio-tx-b-01",
  5145. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  5146. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  5147. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  5148. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  5149. .ops = &vlv_dpio_power_well_ops,
  5150. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
  5151. },
  5152. {
  5153. .name = "dpio-tx-b-23",
  5154. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  5155. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  5156. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  5157. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  5158. .ops = &vlv_dpio_power_well_ops,
  5159. .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
  5160. },
  5161. {
  5162. .name = "dpio-tx-c-01",
  5163. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  5164. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  5165. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  5166. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  5167. .ops = &vlv_dpio_power_well_ops,
  5168. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
  5169. },
  5170. {
  5171. .name = "dpio-tx-c-23",
  5172. .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
  5173. VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
  5174. VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
  5175. VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
  5176. .ops = &vlv_dpio_power_well_ops,
  5177. .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
  5178. },
  5179. {
  5180. .name = "dpio-common",
  5181. .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
  5182. .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
  5183. .ops = &vlv_dpio_power_well_ops,
  5184. },
  5185. };
  5186. #define set_power_wells(power_domains, __power_wells) ({ \
  5187. (power_domains)->power_wells = (__power_wells); \
  5188. (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
  5189. })
  5190. int intel_power_domains_init(struct drm_i915_private *dev_priv)
  5191. {
  5192. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  5193. mutex_init(&power_domains->lock);
  5194. /*
  5195. * The enabling order will be from lower to higher indexed wells,
  5196. * the disabling order is reversed.
  5197. */
  5198. if (IS_HASWELL(dev_priv->dev)) {
  5199. set_power_wells(power_domains, hsw_power_wells);
  5200. hsw_pwr = power_domains;
  5201. } else if (IS_BROADWELL(dev_priv->dev)) {
  5202. set_power_wells(power_domains, bdw_power_wells);
  5203. hsw_pwr = power_domains;
  5204. } else if (IS_VALLEYVIEW(dev_priv->dev)) {
  5205. set_power_wells(power_domains, vlv_power_wells);
  5206. } else {
  5207. set_power_wells(power_domains, i9xx_always_on_power_well);
  5208. }
  5209. return 0;
  5210. }
  5211. void intel_power_domains_remove(struct drm_i915_private *dev_priv)
  5212. {
  5213. hsw_pwr = NULL;
  5214. }
  5215. static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
  5216. {
  5217. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  5218. struct i915_power_well *power_well;
  5219. int i;
  5220. mutex_lock(&power_domains->lock);
  5221. for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
  5222. power_well->ops->sync_hw(dev_priv, power_well);
  5223. power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
  5224. power_well);
  5225. }
  5226. mutex_unlock(&power_domains->lock);
  5227. }
  5228. void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
  5229. {
  5230. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  5231. power_domains->initializing = true;
  5232. /* For now, we need the power well to be always enabled. */
  5233. intel_display_set_init_power(dev_priv, true);
  5234. intel_power_domains_resume(dev_priv);
  5235. power_domains->initializing = false;
  5236. }
  5237. void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
  5238. {
  5239. intel_runtime_pm_get(dev_priv);
  5240. }
  5241. void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
  5242. {
  5243. intel_runtime_pm_put(dev_priv);
  5244. }
  5245. void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  5246. {
  5247. struct drm_device *dev = dev_priv->dev;
  5248. struct device *device = &dev->pdev->dev;
  5249. if (!HAS_RUNTIME_PM(dev))
  5250. return;
  5251. pm_runtime_get_sync(device);
  5252. WARN(dev_priv->pm.suspended, "Device still suspended.\n");
  5253. }
  5254. void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  5255. {
  5256. struct drm_device *dev = dev_priv->dev;
  5257. struct device *device = &dev->pdev->dev;
  5258. if (!HAS_RUNTIME_PM(dev))
  5259. return;
  5260. WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
  5261. pm_runtime_get_noresume(device);
  5262. }
  5263. void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  5264. {
  5265. struct drm_device *dev = dev_priv->dev;
  5266. struct device *device = &dev->pdev->dev;
  5267. if (!HAS_RUNTIME_PM(dev))
  5268. return;
  5269. pm_runtime_mark_last_busy(device);
  5270. pm_runtime_put_autosuspend(device);
  5271. }
  5272. void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
  5273. {
  5274. struct drm_device *dev = dev_priv->dev;
  5275. struct device *device = &dev->pdev->dev;
  5276. if (!HAS_RUNTIME_PM(dev))
  5277. return;
  5278. pm_runtime_set_active(device);
  5279. /*
  5280. * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
  5281. * requirement.
  5282. */
  5283. if (!intel_enable_rc6(dev)) {
  5284. DRM_INFO("RC6 disabled, disabling runtime PM support\n");
  5285. return;
  5286. }
  5287. pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
  5288. pm_runtime_mark_last_busy(device);
  5289. pm_runtime_use_autosuspend(device);
  5290. pm_runtime_put_autosuspend(device);
  5291. }
  5292. void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
  5293. {
  5294. struct drm_device *dev = dev_priv->dev;
  5295. struct device *device = &dev->pdev->dev;
  5296. if (!HAS_RUNTIME_PM(dev))
  5297. return;
  5298. if (!intel_enable_rc6(dev))
  5299. return;
  5300. /* Make sure we're not suspended first. */
  5301. pm_runtime_get_sync(device);
  5302. pm_runtime_disable(device);
  5303. }
  5304. /* Set up chip specific power management-related functions */
  5305. void intel_init_pm(struct drm_device *dev)
  5306. {
  5307. struct drm_i915_private *dev_priv = dev->dev_private;
  5308. if (HAS_FBC(dev)) {
  5309. if (INTEL_INFO(dev)->gen >= 7) {
  5310. dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
  5311. dev_priv->display.enable_fbc = gen7_enable_fbc;
  5312. dev_priv->display.disable_fbc = ironlake_disable_fbc;
  5313. } else if (INTEL_INFO(dev)->gen >= 5) {
  5314. dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
  5315. dev_priv->display.enable_fbc = ironlake_enable_fbc;
  5316. dev_priv->display.disable_fbc = ironlake_disable_fbc;
  5317. } else if (IS_GM45(dev)) {
  5318. dev_priv->display.fbc_enabled = g4x_fbc_enabled;
  5319. dev_priv->display.enable_fbc = g4x_enable_fbc;
  5320. dev_priv->display.disable_fbc = g4x_disable_fbc;
  5321. } else {
  5322. dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
  5323. dev_priv->display.enable_fbc = i8xx_enable_fbc;
  5324. dev_priv->display.disable_fbc = i8xx_disable_fbc;
  5325. /* This value was pulled out of someone's hat */
  5326. I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
  5327. }
  5328. }
  5329. /* For cxsr */
  5330. if (IS_PINEVIEW(dev))
  5331. i915_pineview_get_mem_freq(dev);
  5332. else if (IS_GEN5(dev))
  5333. i915_ironlake_get_mem_freq(dev);
  5334. /* For FIFO watermark updates */
  5335. if (HAS_PCH_SPLIT(dev)) {
  5336. ilk_setup_wm_latency(dev);
  5337. if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
  5338. dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
  5339. (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
  5340. dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
  5341. dev_priv->display.update_wm = ilk_update_wm;
  5342. dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
  5343. } else {
  5344. DRM_DEBUG_KMS("Failed to read display plane latency. "
  5345. "Disable CxSR\n");
  5346. }
  5347. if (IS_GEN5(dev))
  5348. dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
  5349. else if (IS_GEN6(dev))
  5350. dev_priv->display.init_clock_gating = gen6_init_clock_gating;
  5351. else if (IS_IVYBRIDGE(dev))
  5352. dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
  5353. else if (IS_HASWELL(dev))
  5354. dev_priv->display.init_clock_gating = haswell_init_clock_gating;
  5355. else if (INTEL_INFO(dev)->gen == 8)
  5356. dev_priv->display.init_clock_gating = gen8_init_clock_gating;
  5357. } else if (IS_CHERRYVIEW(dev)) {
  5358. dev_priv->display.update_wm = valleyview_update_wm;
  5359. dev_priv->display.init_clock_gating =
  5360. cherryview_init_clock_gating;
  5361. } else if (IS_VALLEYVIEW(dev)) {
  5362. dev_priv->display.update_wm = valleyview_update_wm;
  5363. dev_priv->display.init_clock_gating =
  5364. valleyview_init_clock_gating;
  5365. } else if (IS_PINEVIEW(dev)) {
  5366. if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
  5367. dev_priv->is_ddr3,
  5368. dev_priv->fsb_freq,
  5369. dev_priv->mem_freq)) {
  5370. DRM_INFO("failed to find known CxSR latency "
  5371. "(found ddr%s fsb freq %d, mem freq %d), "
  5372. "disabling CxSR\n",
  5373. (dev_priv->is_ddr3 == 1) ? "3" : "2",
  5374. dev_priv->fsb_freq, dev_priv->mem_freq);
  5375. /* Disable CxSR and never update its watermark again */
  5376. pineview_disable_cxsr(dev);
  5377. dev_priv->display.update_wm = NULL;
  5378. } else
  5379. dev_priv->display.update_wm = pineview_update_wm;
  5380. dev_priv->display.init_clock_gating = gen3_init_clock_gating;
  5381. } else if (IS_G4X(dev)) {
  5382. dev_priv->display.update_wm = g4x_update_wm;
  5383. dev_priv->display.init_clock_gating = g4x_init_clock_gating;
  5384. } else if (IS_GEN4(dev)) {
  5385. dev_priv->display.update_wm = i965_update_wm;
  5386. if (IS_CRESTLINE(dev))
  5387. dev_priv->display.init_clock_gating = crestline_init_clock_gating;
  5388. else if (IS_BROADWATER(dev))
  5389. dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
  5390. } else if (IS_GEN3(dev)) {
  5391. dev_priv->display.update_wm = i9xx_update_wm;
  5392. dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
  5393. dev_priv->display.init_clock_gating = gen3_init_clock_gating;
  5394. } else if (IS_GEN2(dev)) {
  5395. if (INTEL_INFO(dev)->num_pipes == 1) {
  5396. dev_priv->display.update_wm = i845_update_wm;
  5397. dev_priv->display.get_fifo_size = i845_get_fifo_size;
  5398. } else {
  5399. dev_priv->display.update_wm = i9xx_update_wm;
  5400. dev_priv->display.get_fifo_size = i830_get_fifo_size;
  5401. }
  5402. if (IS_I85X(dev) || IS_I865G(dev))
  5403. dev_priv->display.init_clock_gating = i85x_init_clock_gating;
  5404. else
  5405. dev_priv->display.init_clock_gating = i830_init_clock_gating;
  5406. } else {
  5407. DRM_ERROR("unexpected fall-through in intel_init_pm\n");
  5408. }
  5409. }
  5410. int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
  5411. {
  5412. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  5413. if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
  5414. DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
  5415. return -EAGAIN;
  5416. }
  5417. I915_WRITE(GEN6_PCODE_DATA, *val);
  5418. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
  5419. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  5420. 500)) {
  5421. DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
  5422. return -ETIMEDOUT;
  5423. }
  5424. *val = I915_READ(GEN6_PCODE_DATA);
  5425. I915_WRITE(GEN6_PCODE_DATA, 0);
  5426. return 0;
  5427. }
  5428. int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
  5429. {
  5430. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  5431. if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
  5432. DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
  5433. return -EAGAIN;
  5434. }
  5435. I915_WRITE(GEN6_PCODE_DATA, val);
  5436. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
  5437. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  5438. 500)) {
  5439. DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
  5440. return -ETIMEDOUT;
  5441. }
  5442. I915_WRITE(GEN6_PCODE_DATA, 0);
  5443. return 0;
  5444. }
  5445. int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
  5446. {
  5447. int div;
  5448. /* 4 x czclk */
  5449. switch (dev_priv->mem_freq) {
  5450. case 800:
  5451. div = 10;
  5452. break;
  5453. case 1066:
  5454. div = 12;
  5455. break;
  5456. case 1333:
  5457. div = 16;
  5458. break;
  5459. default:
  5460. return -1;
  5461. }
  5462. return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
  5463. }
  5464. int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
  5465. {
  5466. int mul;
  5467. /* 4 x czclk */
  5468. switch (dev_priv->mem_freq) {
  5469. case 800:
  5470. mul = 10;
  5471. break;
  5472. case 1066:
  5473. mul = 12;
  5474. break;
  5475. case 1333:
  5476. mul = 16;
  5477. break;
  5478. default:
  5479. return -1;
  5480. }
  5481. return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
  5482. }
  5483. void intel_pm_setup(struct drm_device *dev)
  5484. {
  5485. struct drm_i915_private *dev_priv = dev->dev_private;
  5486. mutex_init(&dev_priv->rps.hw_lock);
  5487. INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
  5488. intel_gen6_powersave_work);
  5489. dev_priv->pm.suspended = false;
  5490. dev_priv->pm.irqs_disabled = false;
  5491. }