intel_pm.c 178 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388
  1. /*
  2. * Copyright © 2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <eugeni.dodonov@intel.com>
  25. *
  26. */
  27. #include <linux/cpufreq.h>
  28. #include "i915_drv.h"
  29. #include "intel_drv.h"
  30. #include "../../../platform/x86/intel_ips.h"
  31. #include <linux/module.h>
  32. /**
  33. * RC6 is a special power stage which allows the GPU to enter an very
  34. * low-voltage mode when idle, using down to 0V while at this stage. This
  35. * stage is entered automatically when the GPU is idle when RC6 support is
  36. * enabled, and as soon as new workload arises GPU wakes up automatically as well.
  37. *
  38. * There are different RC6 modes available in Intel GPU, which differentiate
  39. * among each other with the latency required to enter and leave RC6 and
  40. * voltage consumed by the GPU in different states.
  41. *
  42. * The combination of the following flags define which states GPU is allowed
  43. * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
  44. * RC6pp is deepest RC6. Their support by hardware varies according to the
  45. * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
  46. * which brings the most power savings; deeper states save more power, but
  47. * require higher latency to switch to and wake up.
  48. */
  49. #define INTEL_RC6_ENABLE (1<<0)
  50. #define INTEL_RC6p_ENABLE (1<<1)
  51. #define INTEL_RC6pp_ENABLE (1<<2)
  52. /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  53. * framebuffer contents in-memory, aiming at reducing the required bandwidth
  54. * during in-memory transfers and, therefore, reduce the power packet.
  55. *
  56. * The benefits of FBC are mostly visible with solid backgrounds and
  57. * variation-less patterns.
  58. *
  59. * FBC-related functionality can be enabled by the means of the
  60. * i915.i915_enable_fbc parameter
  61. */
  62. static void gen9_init_clock_gating(struct drm_device *dev)
  63. {
  64. struct drm_i915_private *dev_priv = dev->dev_private;
  65. /*
  66. * WaDisableSDEUnitClockGating:skl
  67. * This seems to be a pre-production w/a.
  68. */
  69. I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
  70. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  71. /*
  72. * WaDisableDgMirrorFixInHalfSliceChicken5:skl
  73. * This is a pre-production w/a.
  74. */
  75. I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
  76. I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
  77. ~GEN9_DG_MIRROR_FIX_ENABLE);
  78. /* Wa4x4STCOptimizationDisable:skl */
  79. I915_WRITE(CACHE_MODE_1,
  80. _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
  81. }
  82. static void i8xx_disable_fbc(struct drm_device *dev)
  83. {
  84. struct drm_i915_private *dev_priv = dev->dev_private;
  85. u32 fbc_ctl;
  86. dev_priv->fbc.enabled = false;
  87. /* Disable compression */
  88. fbc_ctl = I915_READ(FBC_CONTROL);
  89. if ((fbc_ctl & FBC_CTL_EN) == 0)
  90. return;
  91. fbc_ctl &= ~FBC_CTL_EN;
  92. I915_WRITE(FBC_CONTROL, fbc_ctl);
  93. /* Wait for compressing bit to clear */
  94. if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
  95. DRM_DEBUG_KMS("FBC idle timed out\n");
  96. return;
  97. }
  98. DRM_DEBUG_KMS("disabled FBC\n");
  99. }
  100. static void i8xx_enable_fbc(struct drm_crtc *crtc)
  101. {
  102. struct drm_device *dev = crtc->dev;
  103. struct drm_i915_private *dev_priv = dev->dev_private;
  104. struct drm_framebuffer *fb = crtc->primary->fb;
  105. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  106. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  107. int cfb_pitch;
  108. int i;
  109. u32 fbc_ctl;
  110. dev_priv->fbc.enabled = true;
  111. cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
  112. if (fb->pitches[0] < cfb_pitch)
  113. cfb_pitch = fb->pitches[0];
  114. /* FBC_CTL wants 32B or 64B units */
  115. if (IS_GEN2(dev))
  116. cfb_pitch = (cfb_pitch / 32) - 1;
  117. else
  118. cfb_pitch = (cfb_pitch / 64) - 1;
  119. /* Clear old tags */
  120. for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  121. I915_WRITE(FBC_TAG + (i * 4), 0);
  122. if (IS_GEN4(dev)) {
  123. u32 fbc_ctl2;
  124. /* Set it up... */
  125. fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
  126. fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
  127. I915_WRITE(FBC_CONTROL2, fbc_ctl2);
  128. I915_WRITE(FBC_FENCE_OFF, crtc->y);
  129. }
  130. /* enable it... */
  131. fbc_ctl = I915_READ(FBC_CONTROL);
  132. fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
  133. fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
  134. if (IS_I945GM(dev))
  135. fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
  136. fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
  137. fbc_ctl |= obj->fence_reg;
  138. I915_WRITE(FBC_CONTROL, fbc_ctl);
  139. DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
  140. cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
  141. }
  142. static bool i8xx_fbc_enabled(struct drm_device *dev)
  143. {
  144. struct drm_i915_private *dev_priv = dev->dev_private;
  145. return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
  146. }
  147. static void g4x_enable_fbc(struct drm_crtc *crtc)
  148. {
  149. struct drm_device *dev = crtc->dev;
  150. struct drm_i915_private *dev_priv = dev->dev_private;
  151. struct drm_framebuffer *fb = crtc->primary->fb;
  152. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  153. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  154. u32 dpfc_ctl;
  155. dev_priv->fbc.enabled = true;
  156. dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
  157. if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
  158. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  159. else
  160. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  161. dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
  162. I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
  163. /* enable it... */
  164. I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  165. DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  166. }
  167. static void g4x_disable_fbc(struct drm_device *dev)
  168. {
  169. struct drm_i915_private *dev_priv = dev->dev_private;
  170. u32 dpfc_ctl;
  171. dev_priv->fbc.enabled = false;
  172. /* Disable compression */
  173. dpfc_ctl = I915_READ(DPFC_CONTROL);
  174. if (dpfc_ctl & DPFC_CTL_EN) {
  175. dpfc_ctl &= ~DPFC_CTL_EN;
  176. I915_WRITE(DPFC_CONTROL, dpfc_ctl);
  177. DRM_DEBUG_KMS("disabled FBC\n");
  178. }
  179. }
  180. static bool g4x_fbc_enabled(struct drm_device *dev)
  181. {
  182. struct drm_i915_private *dev_priv = dev->dev_private;
  183. return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
  184. }
  185. static void sandybridge_blit_fbc_update(struct drm_device *dev)
  186. {
  187. struct drm_i915_private *dev_priv = dev->dev_private;
  188. u32 blt_ecoskpd;
  189. /* Make sure blitter notifies FBC of writes */
  190. /* Blitter is part of Media powerwell on VLV. No impact of
  191. * his param in other platforms for now */
  192. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
  193. blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
  194. blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
  195. GEN6_BLITTER_LOCK_SHIFT;
  196. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  197. blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
  198. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  199. blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
  200. GEN6_BLITTER_LOCK_SHIFT);
  201. I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
  202. POSTING_READ(GEN6_BLITTER_ECOSKPD);
  203. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
  204. }
  205. static void ironlake_enable_fbc(struct drm_crtc *crtc)
  206. {
  207. struct drm_device *dev = crtc->dev;
  208. struct drm_i915_private *dev_priv = dev->dev_private;
  209. struct drm_framebuffer *fb = crtc->primary->fb;
  210. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  211. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  212. u32 dpfc_ctl;
  213. dev_priv->fbc.enabled = true;
  214. dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
  215. if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
  216. dev_priv->fbc.threshold++;
  217. switch (dev_priv->fbc.threshold) {
  218. case 4:
  219. case 3:
  220. dpfc_ctl |= DPFC_CTL_LIMIT_4X;
  221. break;
  222. case 2:
  223. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  224. break;
  225. case 1:
  226. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  227. break;
  228. }
  229. dpfc_ctl |= DPFC_CTL_FENCE_EN;
  230. if (IS_GEN5(dev))
  231. dpfc_ctl |= obj->fence_reg;
  232. I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
  233. I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
  234. /* enable it... */
  235. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  236. if (IS_GEN6(dev)) {
  237. I915_WRITE(SNB_DPFC_CTL_SA,
  238. SNB_CPU_FENCE_ENABLE | obj->fence_reg);
  239. I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
  240. sandybridge_blit_fbc_update(dev);
  241. }
  242. DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  243. }
  244. static void ironlake_disable_fbc(struct drm_device *dev)
  245. {
  246. struct drm_i915_private *dev_priv = dev->dev_private;
  247. u32 dpfc_ctl;
  248. dev_priv->fbc.enabled = false;
  249. /* Disable compression */
  250. dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
  251. if (dpfc_ctl & DPFC_CTL_EN) {
  252. dpfc_ctl &= ~DPFC_CTL_EN;
  253. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
  254. DRM_DEBUG_KMS("disabled FBC\n");
  255. }
  256. }
  257. static bool ironlake_fbc_enabled(struct drm_device *dev)
  258. {
  259. struct drm_i915_private *dev_priv = dev->dev_private;
  260. return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
  261. }
  262. static void gen7_enable_fbc(struct drm_crtc *crtc)
  263. {
  264. struct drm_device *dev = crtc->dev;
  265. struct drm_i915_private *dev_priv = dev->dev_private;
  266. struct drm_framebuffer *fb = crtc->primary->fb;
  267. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  268. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  269. u32 dpfc_ctl;
  270. dev_priv->fbc.enabled = true;
  271. dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
  272. if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
  273. dev_priv->fbc.threshold++;
  274. switch (dev_priv->fbc.threshold) {
  275. case 4:
  276. case 3:
  277. dpfc_ctl |= DPFC_CTL_LIMIT_4X;
  278. break;
  279. case 2:
  280. dpfc_ctl |= DPFC_CTL_LIMIT_2X;
  281. break;
  282. case 1:
  283. dpfc_ctl |= DPFC_CTL_LIMIT_1X;
  284. break;
  285. }
  286. dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
  287. if (dev_priv->fbc.false_color)
  288. dpfc_ctl |= FBC_CTL_FALSE_COLOR;
  289. I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  290. if (IS_IVYBRIDGE(dev)) {
  291. /* WaFbcAsynchFlipDisableFbcQueue:ivb */
  292. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  293. I915_READ(ILK_DISPLAY_CHICKEN1) |
  294. ILK_FBCQ_DIS);
  295. } else {
  296. /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
  297. I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
  298. I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
  299. HSW_FBCQ_DIS);
  300. }
  301. I915_WRITE(SNB_DPFC_CTL_SA,
  302. SNB_CPU_FENCE_ENABLE | obj->fence_reg);
  303. I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
  304. sandybridge_blit_fbc_update(dev);
  305. DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  306. }
  307. bool intel_fbc_enabled(struct drm_device *dev)
  308. {
  309. struct drm_i915_private *dev_priv = dev->dev_private;
  310. return dev_priv->fbc.enabled;
  311. }
  312. void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
  313. {
  314. struct drm_i915_private *dev_priv = dev->dev_private;
  315. if (!IS_GEN8(dev))
  316. return;
  317. if (!intel_fbc_enabled(dev))
  318. return;
  319. I915_WRITE(MSG_FBC_REND_STATE, value);
  320. }
  321. static void intel_fbc_work_fn(struct work_struct *__work)
  322. {
  323. struct intel_fbc_work *work =
  324. container_of(to_delayed_work(__work),
  325. struct intel_fbc_work, work);
  326. struct drm_device *dev = work->crtc->dev;
  327. struct drm_i915_private *dev_priv = dev->dev_private;
  328. mutex_lock(&dev->struct_mutex);
  329. if (work == dev_priv->fbc.fbc_work) {
  330. /* Double check that we haven't switched fb without cancelling
  331. * the prior work.
  332. */
  333. if (work->crtc->primary->fb == work->fb) {
  334. dev_priv->display.enable_fbc(work->crtc);
  335. dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
  336. dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
  337. dev_priv->fbc.y = work->crtc->y;
  338. }
  339. dev_priv->fbc.fbc_work = NULL;
  340. }
  341. mutex_unlock(&dev->struct_mutex);
  342. kfree(work);
  343. }
  344. static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
  345. {
  346. if (dev_priv->fbc.fbc_work == NULL)
  347. return;
  348. DRM_DEBUG_KMS("cancelling pending FBC enable\n");
  349. /* Synchronisation is provided by struct_mutex and checking of
  350. * dev_priv->fbc.fbc_work, so we can perform the cancellation
  351. * entirely asynchronously.
  352. */
  353. if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
  354. /* tasklet was killed before being run, clean up */
  355. kfree(dev_priv->fbc.fbc_work);
  356. /* Mark the work as no longer wanted so that if it does
  357. * wake-up (because the work was already running and waiting
  358. * for our mutex), it will discover that is no longer
  359. * necessary to run.
  360. */
  361. dev_priv->fbc.fbc_work = NULL;
  362. }
  363. static void intel_enable_fbc(struct drm_crtc *crtc)
  364. {
  365. struct intel_fbc_work *work;
  366. struct drm_device *dev = crtc->dev;
  367. struct drm_i915_private *dev_priv = dev->dev_private;
  368. if (!dev_priv->display.enable_fbc)
  369. return;
  370. intel_cancel_fbc_work(dev_priv);
  371. work = kzalloc(sizeof(*work), GFP_KERNEL);
  372. if (work == NULL) {
  373. DRM_ERROR("Failed to allocate FBC work structure\n");
  374. dev_priv->display.enable_fbc(crtc);
  375. return;
  376. }
  377. work->crtc = crtc;
  378. work->fb = crtc->primary->fb;
  379. INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
  380. dev_priv->fbc.fbc_work = work;
  381. /* Delay the actual enabling to let pageflipping cease and the
  382. * display to settle before starting the compression. Note that
  383. * this delay also serves a second purpose: it allows for a
  384. * vblank to pass after disabling the FBC before we attempt
  385. * to modify the control registers.
  386. *
  387. * A more complicated solution would involve tracking vblanks
  388. * following the termination of the page-flipping sequence
  389. * and indeed performing the enable as a co-routine and not
  390. * waiting synchronously upon the vblank.
  391. *
  392. * WaFbcWaitForVBlankBeforeEnable:ilk,snb
  393. */
  394. schedule_delayed_work(&work->work, msecs_to_jiffies(50));
  395. }
  396. void intel_disable_fbc(struct drm_device *dev)
  397. {
  398. struct drm_i915_private *dev_priv = dev->dev_private;
  399. intel_cancel_fbc_work(dev_priv);
  400. if (!dev_priv->display.disable_fbc)
  401. return;
  402. dev_priv->display.disable_fbc(dev);
  403. dev_priv->fbc.plane = -1;
  404. }
  405. static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
  406. enum no_fbc_reason reason)
  407. {
  408. if (dev_priv->fbc.no_fbc_reason == reason)
  409. return false;
  410. dev_priv->fbc.no_fbc_reason = reason;
  411. return true;
  412. }
  413. /**
  414. * intel_update_fbc - enable/disable FBC as needed
  415. * @dev: the drm_device
  416. *
  417. * Set up the framebuffer compression hardware at mode set time. We
  418. * enable it if possible:
  419. * - plane A only (on pre-965)
  420. * - no pixel mulitply/line duplication
  421. * - no alpha buffer discard
  422. * - no dual wide
  423. * - framebuffer <= max_hdisplay in width, max_vdisplay in height
  424. *
  425. * We can't assume that any compression will take place (worst case),
  426. * so the compressed buffer has to be the same size as the uncompressed
  427. * one. It also must reside (along with the line length buffer) in
  428. * stolen memory.
  429. *
  430. * We need to enable/disable FBC on a global basis.
  431. */
  432. void intel_update_fbc(struct drm_device *dev)
  433. {
  434. struct drm_i915_private *dev_priv = dev->dev_private;
  435. struct drm_crtc *crtc = NULL, *tmp_crtc;
  436. struct intel_crtc *intel_crtc;
  437. struct drm_framebuffer *fb;
  438. struct drm_i915_gem_object *obj;
  439. const struct drm_display_mode *adjusted_mode;
  440. unsigned int max_width, max_height;
  441. if (!HAS_FBC(dev)) {
  442. set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
  443. return;
  444. }
  445. if (!i915.powersave) {
  446. if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
  447. DRM_DEBUG_KMS("fbc disabled per module param\n");
  448. return;
  449. }
  450. /*
  451. * If FBC is already on, we just have to verify that we can
  452. * keep it that way...
  453. * Need to disable if:
  454. * - more than one pipe is active
  455. * - changing FBC params (stride, fence, mode)
  456. * - new fb is too large to fit in compressed buffer
  457. * - going to an unsupported config (interlace, pixel multiply, etc.)
  458. */
  459. for_each_crtc(dev, tmp_crtc) {
  460. if (intel_crtc_active(tmp_crtc) &&
  461. to_intel_crtc(tmp_crtc)->primary_enabled) {
  462. if (crtc) {
  463. if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
  464. DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
  465. goto out_disable;
  466. }
  467. crtc = tmp_crtc;
  468. }
  469. }
  470. if (!crtc || crtc->primary->fb == NULL) {
  471. if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
  472. DRM_DEBUG_KMS("no output, disabling\n");
  473. goto out_disable;
  474. }
  475. intel_crtc = to_intel_crtc(crtc);
  476. fb = crtc->primary->fb;
  477. obj = intel_fb_obj(fb);
  478. adjusted_mode = &intel_crtc->config.adjusted_mode;
  479. if (i915.enable_fbc < 0) {
  480. if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
  481. DRM_DEBUG_KMS("disabled per chip default\n");
  482. goto out_disable;
  483. }
  484. if (!i915.enable_fbc) {
  485. if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
  486. DRM_DEBUG_KMS("fbc disabled per module param\n");
  487. goto out_disable;
  488. }
  489. if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
  490. (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
  491. if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
  492. DRM_DEBUG_KMS("mode incompatible with compression, "
  493. "disabling\n");
  494. goto out_disable;
  495. }
  496. if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
  497. max_width = 4096;
  498. max_height = 4096;
  499. } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  500. max_width = 4096;
  501. max_height = 2048;
  502. } else {
  503. max_width = 2048;
  504. max_height = 1536;
  505. }
  506. if (intel_crtc->config.pipe_src_w > max_width ||
  507. intel_crtc->config.pipe_src_h > max_height) {
  508. if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
  509. DRM_DEBUG_KMS("mode too large for compression, disabling\n");
  510. goto out_disable;
  511. }
  512. if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
  513. intel_crtc->plane != PLANE_A) {
  514. if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
  515. DRM_DEBUG_KMS("plane not A, disabling compression\n");
  516. goto out_disable;
  517. }
  518. /* The use of a CPU fence is mandatory in order to detect writes
  519. * by the CPU to the scanout and trigger updates to the FBC.
  520. */
  521. if (obj->tiling_mode != I915_TILING_X ||
  522. obj->fence_reg == I915_FENCE_REG_NONE) {
  523. if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
  524. DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
  525. goto out_disable;
  526. }
  527. if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
  528. to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
  529. if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
  530. DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
  531. goto out_disable;
  532. }
  533. /* If the kernel debugger is active, always disable compression */
  534. if (in_dbg_master())
  535. goto out_disable;
  536. if (i915_gem_stolen_setup_compression(dev, obj->base.size,
  537. drm_format_plane_cpp(fb->pixel_format, 0))) {
  538. if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
  539. DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
  540. goto out_disable;
  541. }
  542. /* If the scanout has not changed, don't modify the FBC settings.
  543. * Note that we make the fundamental assumption that the fb->obj
  544. * cannot be unpinned (and have its GTT offset and fence revoked)
  545. * without first being decoupled from the scanout and FBC disabled.
  546. */
  547. if (dev_priv->fbc.plane == intel_crtc->plane &&
  548. dev_priv->fbc.fb_id == fb->base.id &&
  549. dev_priv->fbc.y == crtc->y)
  550. return;
  551. if (intel_fbc_enabled(dev)) {
  552. /* We update FBC along two paths, after changing fb/crtc
  553. * configuration (modeswitching) and after page-flipping
  554. * finishes. For the latter, we know that not only did
  555. * we disable the FBC at the start of the page-flip
  556. * sequence, but also more than one vblank has passed.
  557. *
  558. * For the former case of modeswitching, it is possible
  559. * to switch between two FBC valid configurations
  560. * instantaneously so we do need to disable the FBC
  561. * before we can modify its control registers. We also
  562. * have to wait for the next vblank for that to take
  563. * effect. However, since we delay enabling FBC we can
  564. * assume that a vblank has passed since disabling and
  565. * that we can safely alter the registers in the deferred
  566. * callback.
  567. *
  568. * In the scenario that we go from a valid to invalid
  569. * and then back to valid FBC configuration we have
  570. * no strict enforcement that a vblank occurred since
  571. * disabling the FBC. However, along all current pipe
  572. * disabling paths we do need to wait for a vblank at
  573. * some point. And we wait before enabling FBC anyway.
  574. */
  575. DRM_DEBUG_KMS("disabling active FBC for update\n");
  576. intel_disable_fbc(dev);
  577. }
  578. intel_enable_fbc(crtc);
  579. dev_priv->fbc.no_fbc_reason = FBC_OK;
  580. return;
  581. out_disable:
  582. /* Multiple disables should be harmless */
  583. if (intel_fbc_enabled(dev)) {
  584. DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
  585. intel_disable_fbc(dev);
  586. }
  587. i915_gem_stolen_cleanup_compression(dev);
  588. }
  589. static void i915_pineview_get_mem_freq(struct drm_device *dev)
  590. {
  591. struct drm_i915_private *dev_priv = dev->dev_private;
  592. u32 tmp;
  593. tmp = I915_READ(CLKCFG);
  594. switch (tmp & CLKCFG_FSB_MASK) {
  595. case CLKCFG_FSB_533:
  596. dev_priv->fsb_freq = 533; /* 133*4 */
  597. break;
  598. case CLKCFG_FSB_800:
  599. dev_priv->fsb_freq = 800; /* 200*4 */
  600. break;
  601. case CLKCFG_FSB_667:
  602. dev_priv->fsb_freq = 667; /* 167*4 */
  603. break;
  604. case CLKCFG_FSB_400:
  605. dev_priv->fsb_freq = 400; /* 100*4 */
  606. break;
  607. }
  608. switch (tmp & CLKCFG_MEM_MASK) {
  609. case CLKCFG_MEM_533:
  610. dev_priv->mem_freq = 533;
  611. break;
  612. case CLKCFG_MEM_667:
  613. dev_priv->mem_freq = 667;
  614. break;
  615. case CLKCFG_MEM_800:
  616. dev_priv->mem_freq = 800;
  617. break;
  618. }
  619. /* detect pineview DDR3 setting */
  620. tmp = I915_READ(CSHRDDR3CTL);
  621. dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
  622. }
  623. static void i915_ironlake_get_mem_freq(struct drm_device *dev)
  624. {
  625. struct drm_i915_private *dev_priv = dev->dev_private;
  626. u16 ddrpll, csipll;
  627. ddrpll = I915_READ16(DDRMPLL1);
  628. csipll = I915_READ16(CSIPLL0);
  629. switch (ddrpll & 0xff) {
  630. case 0xc:
  631. dev_priv->mem_freq = 800;
  632. break;
  633. case 0x10:
  634. dev_priv->mem_freq = 1066;
  635. break;
  636. case 0x14:
  637. dev_priv->mem_freq = 1333;
  638. break;
  639. case 0x18:
  640. dev_priv->mem_freq = 1600;
  641. break;
  642. default:
  643. DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
  644. ddrpll & 0xff);
  645. dev_priv->mem_freq = 0;
  646. break;
  647. }
  648. dev_priv->ips.r_t = dev_priv->mem_freq;
  649. switch (csipll & 0x3ff) {
  650. case 0x00c:
  651. dev_priv->fsb_freq = 3200;
  652. break;
  653. case 0x00e:
  654. dev_priv->fsb_freq = 3733;
  655. break;
  656. case 0x010:
  657. dev_priv->fsb_freq = 4266;
  658. break;
  659. case 0x012:
  660. dev_priv->fsb_freq = 4800;
  661. break;
  662. case 0x014:
  663. dev_priv->fsb_freq = 5333;
  664. break;
  665. case 0x016:
  666. dev_priv->fsb_freq = 5866;
  667. break;
  668. case 0x018:
  669. dev_priv->fsb_freq = 6400;
  670. break;
  671. default:
  672. DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
  673. csipll & 0x3ff);
  674. dev_priv->fsb_freq = 0;
  675. break;
  676. }
  677. if (dev_priv->fsb_freq == 3200) {
  678. dev_priv->ips.c_m = 0;
  679. } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
  680. dev_priv->ips.c_m = 1;
  681. } else {
  682. dev_priv->ips.c_m = 2;
  683. }
  684. }
  685. static const struct cxsr_latency cxsr_latency_table[] = {
  686. {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
  687. {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
  688. {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
  689. {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
  690. {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
  691. {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
  692. {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
  693. {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
  694. {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
  695. {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
  696. {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
  697. {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
  698. {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
  699. {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
  700. {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
  701. {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
  702. {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
  703. {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
  704. {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
  705. {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
  706. {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
  707. {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
  708. {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
  709. {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
  710. {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
  711. {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
  712. {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
  713. {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
  714. {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
  715. {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
  716. };
  717. static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
  718. int is_ddr3,
  719. int fsb,
  720. int mem)
  721. {
  722. const struct cxsr_latency *latency;
  723. int i;
  724. if (fsb == 0 || mem == 0)
  725. return NULL;
  726. for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
  727. latency = &cxsr_latency_table[i];
  728. if (is_desktop == latency->is_desktop &&
  729. is_ddr3 == latency->is_ddr3 &&
  730. fsb == latency->fsb_freq && mem == latency->mem_freq)
  731. return latency;
  732. }
  733. DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
  734. return NULL;
  735. }
  736. void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
  737. {
  738. struct drm_device *dev = dev_priv->dev;
  739. u32 val;
  740. if (IS_VALLEYVIEW(dev)) {
  741. I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
  742. } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
  743. I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
  744. } else if (IS_PINEVIEW(dev)) {
  745. val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
  746. val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
  747. I915_WRITE(DSPFW3, val);
  748. } else if (IS_I945G(dev) || IS_I945GM(dev)) {
  749. val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
  750. _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
  751. I915_WRITE(FW_BLC_SELF, val);
  752. } else if (IS_I915GM(dev)) {
  753. val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
  754. _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
  755. I915_WRITE(INSTPM, val);
  756. } else {
  757. return;
  758. }
  759. DRM_DEBUG_KMS("memory self-refresh is %s\n",
  760. enable ? "enabled" : "disabled");
  761. }
  762. /*
  763. * Latency for FIFO fetches is dependent on several factors:
  764. * - memory configuration (speed, channels)
  765. * - chipset
  766. * - current MCH state
  767. * It can be fairly high in some situations, so here we assume a fairly
  768. * pessimal value. It's a tradeoff between extra memory fetches (if we
  769. * set this value too high, the FIFO will fetch frequently to stay full)
  770. * and power consumption (set it too low to save power and we might see
  771. * FIFO underruns and display "flicker").
  772. *
  773. * A value of 5us seems to be a good balance; safe for very low end
  774. * platforms but not overly aggressive on lower latency configs.
  775. */
  776. static const int pessimal_latency_ns = 5000;
  777. static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
  778. {
  779. struct drm_i915_private *dev_priv = dev->dev_private;
  780. uint32_t dsparb = I915_READ(DSPARB);
  781. int size;
  782. size = dsparb & 0x7f;
  783. if (plane)
  784. size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
  785. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  786. plane ? "B" : "A", size);
  787. return size;
  788. }
  789. static int i830_get_fifo_size(struct drm_device *dev, int plane)
  790. {
  791. struct drm_i915_private *dev_priv = dev->dev_private;
  792. uint32_t dsparb = I915_READ(DSPARB);
  793. int size;
  794. size = dsparb & 0x1ff;
  795. if (plane)
  796. size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
  797. size >>= 1; /* Convert to cachelines */
  798. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  799. plane ? "B" : "A", size);
  800. return size;
  801. }
  802. static int i845_get_fifo_size(struct drm_device *dev, int plane)
  803. {
  804. struct drm_i915_private *dev_priv = dev->dev_private;
  805. uint32_t dsparb = I915_READ(DSPARB);
  806. int size;
  807. size = dsparb & 0x7f;
  808. size >>= 2; /* Convert to cachelines */
  809. DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
  810. plane ? "B" : "A",
  811. size);
  812. return size;
  813. }
  814. /* Pineview has different values for various configs */
  815. static const struct intel_watermark_params pineview_display_wm = {
  816. .fifo_size = PINEVIEW_DISPLAY_FIFO,
  817. .max_wm = PINEVIEW_MAX_WM,
  818. .default_wm = PINEVIEW_DFT_WM,
  819. .guard_size = PINEVIEW_GUARD_WM,
  820. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  821. };
  822. static const struct intel_watermark_params pineview_display_hplloff_wm = {
  823. .fifo_size = PINEVIEW_DISPLAY_FIFO,
  824. .max_wm = PINEVIEW_MAX_WM,
  825. .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
  826. .guard_size = PINEVIEW_GUARD_WM,
  827. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  828. };
  829. static const struct intel_watermark_params pineview_cursor_wm = {
  830. .fifo_size = PINEVIEW_CURSOR_FIFO,
  831. .max_wm = PINEVIEW_CURSOR_MAX_WM,
  832. .default_wm = PINEVIEW_CURSOR_DFT_WM,
  833. .guard_size = PINEVIEW_CURSOR_GUARD_WM,
  834. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  835. };
  836. static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
  837. .fifo_size = PINEVIEW_CURSOR_FIFO,
  838. .max_wm = PINEVIEW_CURSOR_MAX_WM,
  839. .default_wm = PINEVIEW_CURSOR_DFT_WM,
  840. .guard_size = PINEVIEW_CURSOR_GUARD_WM,
  841. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  842. };
  843. static const struct intel_watermark_params g4x_wm_info = {
  844. .fifo_size = G4X_FIFO_SIZE,
  845. .max_wm = G4X_MAX_WM,
  846. .default_wm = G4X_MAX_WM,
  847. .guard_size = 2,
  848. .cacheline_size = G4X_FIFO_LINE_SIZE,
  849. };
  850. static const struct intel_watermark_params g4x_cursor_wm_info = {
  851. .fifo_size = I965_CURSOR_FIFO,
  852. .max_wm = I965_CURSOR_MAX_WM,
  853. .default_wm = I965_CURSOR_DFT_WM,
  854. .guard_size = 2,
  855. .cacheline_size = G4X_FIFO_LINE_SIZE,
  856. };
  857. static const struct intel_watermark_params valleyview_wm_info = {
  858. .fifo_size = VALLEYVIEW_FIFO_SIZE,
  859. .max_wm = VALLEYVIEW_MAX_WM,
  860. .default_wm = VALLEYVIEW_MAX_WM,
  861. .guard_size = 2,
  862. .cacheline_size = G4X_FIFO_LINE_SIZE,
  863. };
  864. static const struct intel_watermark_params valleyview_cursor_wm_info = {
  865. .fifo_size = I965_CURSOR_FIFO,
  866. .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
  867. .default_wm = I965_CURSOR_DFT_WM,
  868. .guard_size = 2,
  869. .cacheline_size = G4X_FIFO_LINE_SIZE,
  870. };
  871. static const struct intel_watermark_params i965_cursor_wm_info = {
  872. .fifo_size = I965_CURSOR_FIFO,
  873. .max_wm = I965_CURSOR_MAX_WM,
  874. .default_wm = I965_CURSOR_DFT_WM,
  875. .guard_size = 2,
  876. .cacheline_size = I915_FIFO_LINE_SIZE,
  877. };
  878. static const struct intel_watermark_params i945_wm_info = {
  879. .fifo_size = I945_FIFO_SIZE,
  880. .max_wm = I915_MAX_WM,
  881. .default_wm = 1,
  882. .guard_size = 2,
  883. .cacheline_size = I915_FIFO_LINE_SIZE,
  884. };
  885. static const struct intel_watermark_params i915_wm_info = {
  886. .fifo_size = I915_FIFO_SIZE,
  887. .max_wm = I915_MAX_WM,
  888. .default_wm = 1,
  889. .guard_size = 2,
  890. .cacheline_size = I915_FIFO_LINE_SIZE,
  891. };
  892. static const struct intel_watermark_params i830_a_wm_info = {
  893. .fifo_size = I855GM_FIFO_SIZE,
  894. .max_wm = I915_MAX_WM,
  895. .default_wm = 1,
  896. .guard_size = 2,
  897. .cacheline_size = I830_FIFO_LINE_SIZE,
  898. };
  899. static const struct intel_watermark_params i830_bc_wm_info = {
  900. .fifo_size = I855GM_FIFO_SIZE,
  901. .max_wm = I915_MAX_WM/2,
  902. .default_wm = 1,
  903. .guard_size = 2,
  904. .cacheline_size = I830_FIFO_LINE_SIZE,
  905. };
  906. static const struct intel_watermark_params i845_wm_info = {
  907. .fifo_size = I830_FIFO_SIZE,
  908. .max_wm = I915_MAX_WM,
  909. .default_wm = 1,
  910. .guard_size = 2,
  911. .cacheline_size = I830_FIFO_LINE_SIZE,
  912. };
  913. /**
  914. * intel_calculate_wm - calculate watermark level
  915. * @clock_in_khz: pixel clock
  916. * @wm: chip FIFO params
  917. * @pixel_size: display pixel size
  918. * @latency_ns: memory latency for the platform
  919. *
  920. * Calculate the watermark level (the level at which the display plane will
  921. * start fetching from memory again). Each chip has a different display
  922. * FIFO size and allocation, so the caller needs to figure that out and pass
  923. * in the correct intel_watermark_params structure.
  924. *
  925. * As the pixel clock runs, the FIFO will be drained at a rate that depends
  926. * on the pixel size. When it reaches the watermark level, it'll start
  927. * fetching FIFO line sized based chunks from memory until the FIFO fills
  928. * past the watermark point. If the FIFO drains completely, a FIFO underrun
  929. * will occur, and a display engine hang could result.
  930. */
  931. static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
  932. const struct intel_watermark_params *wm,
  933. int fifo_size,
  934. int pixel_size,
  935. unsigned long latency_ns)
  936. {
  937. long entries_required, wm_size;
  938. /*
  939. * Note: we need to make sure we don't overflow for various clock &
  940. * latency values.
  941. * clocks go from a few thousand to several hundred thousand.
  942. * latency is usually a few thousand
  943. */
  944. entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
  945. 1000;
  946. entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
  947. DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
  948. wm_size = fifo_size - (entries_required + wm->guard_size);
  949. DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
  950. /* Don't promote wm_size to unsigned... */
  951. if (wm_size > (long)wm->max_wm)
  952. wm_size = wm->max_wm;
  953. if (wm_size <= 0)
  954. wm_size = wm->default_wm;
  955. /*
  956. * Bspec seems to indicate that the value shouldn't be lower than
  957. * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
  958. * Lets go for 8 which is the burst size since certain platforms
  959. * already use a hardcoded 8 (which is what the spec says should be
  960. * done).
  961. */
  962. if (wm_size <= 8)
  963. wm_size = 8;
  964. return wm_size;
  965. }
  966. static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
  967. {
  968. struct drm_crtc *crtc, *enabled = NULL;
  969. for_each_crtc(dev, crtc) {
  970. if (intel_crtc_active(crtc)) {
  971. if (enabled)
  972. return NULL;
  973. enabled = crtc;
  974. }
  975. }
  976. return enabled;
  977. }
  978. static void pineview_update_wm(struct drm_crtc *unused_crtc)
  979. {
  980. struct drm_device *dev = unused_crtc->dev;
  981. struct drm_i915_private *dev_priv = dev->dev_private;
  982. struct drm_crtc *crtc;
  983. const struct cxsr_latency *latency;
  984. u32 reg;
  985. unsigned long wm;
  986. latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
  987. dev_priv->fsb_freq, dev_priv->mem_freq);
  988. if (!latency) {
  989. DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
  990. intel_set_memory_cxsr(dev_priv, false);
  991. return;
  992. }
  993. crtc = single_enabled_crtc(dev);
  994. if (crtc) {
  995. const struct drm_display_mode *adjusted_mode;
  996. int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  997. int clock;
  998. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  999. clock = adjusted_mode->crtc_clock;
  1000. /* Display SR */
  1001. wm = intel_calculate_wm(clock, &pineview_display_wm,
  1002. pineview_display_wm.fifo_size,
  1003. pixel_size, latency->display_sr);
  1004. reg = I915_READ(DSPFW1);
  1005. reg &= ~DSPFW_SR_MASK;
  1006. reg |= wm << DSPFW_SR_SHIFT;
  1007. I915_WRITE(DSPFW1, reg);
  1008. DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
  1009. /* cursor SR */
  1010. wm = intel_calculate_wm(clock, &pineview_cursor_wm,
  1011. pineview_display_wm.fifo_size,
  1012. pixel_size, latency->cursor_sr);
  1013. reg = I915_READ(DSPFW3);
  1014. reg &= ~DSPFW_CURSOR_SR_MASK;
  1015. reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
  1016. I915_WRITE(DSPFW3, reg);
  1017. /* Display HPLL off SR */
  1018. wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
  1019. pineview_display_hplloff_wm.fifo_size,
  1020. pixel_size, latency->display_hpll_disable);
  1021. reg = I915_READ(DSPFW3);
  1022. reg &= ~DSPFW_HPLL_SR_MASK;
  1023. reg |= wm & DSPFW_HPLL_SR_MASK;
  1024. I915_WRITE(DSPFW3, reg);
  1025. /* cursor HPLL off SR */
  1026. wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
  1027. pineview_display_hplloff_wm.fifo_size,
  1028. pixel_size, latency->cursor_hpll_disable);
  1029. reg = I915_READ(DSPFW3);
  1030. reg &= ~DSPFW_HPLL_CURSOR_MASK;
  1031. reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
  1032. I915_WRITE(DSPFW3, reg);
  1033. DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
  1034. intel_set_memory_cxsr(dev_priv, true);
  1035. } else {
  1036. intel_set_memory_cxsr(dev_priv, false);
  1037. }
  1038. }
  1039. static bool g4x_compute_wm0(struct drm_device *dev,
  1040. int plane,
  1041. const struct intel_watermark_params *display,
  1042. int display_latency_ns,
  1043. const struct intel_watermark_params *cursor,
  1044. int cursor_latency_ns,
  1045. int *plane_wm,
  1046. int *cursor_wm)
  1047. {
  1048. struct drm_crtc *crtc;
  1049. const struct drm_display_mode *adjusted_mode;
  1050. int htotal, hdisplay, clock, pixel_size;
  1051. int line_time_us, line_count;
  1052. int entries, tlb_miss;
  1053. crtc = intel_get_crtc_for_plane(dev, plane);
  1054. if (!intel_crtc_active(crtc)) {
  1055. *cursor_wm = cursor->guard_size;
  1056. *plane_wm = display->guard_size;
  1057. return false;
  1058. }
  1059. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1060. clock = adjusted_mode->crtc_clock;
  1061. htotal = adjusted_mode->crtc_htotal;
  1062. hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
  1063. pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  1064. /* Use the small buffer method to calculate plane watermark */
  1065. entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
  1066. tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
  1067. if (tlb_miss > 0)
  1068. entries += tlb_miss;
  1069. entries = DIV_ROUND_UP(entries, display->cacheline_size);
  1070. *plane_wm = entries + display->guard_size;
  1071. if (*plane_wm > (int)display->max_wm)
  1072. *plane_wm = display->max_wm;
  1073. /* Use the large buffer method to calculate cursor watermark */
  1074. line_time_us = max(htotal * 1000 / clock, 1);
  1075. line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
  1076. entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
  1077. tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
  1078. if (tlb_miss > 0)
  1079. entries += tlb_miss;
  1080. entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
  1081. *cursor_wm = entries + cursor->guard_size;
  1082. if (*cursor_wm > (int)cursor->max_wm)
  1083. *cursor_wm = (int)cursor->max_wm;
  1084. return true;
  1085. }
  1086. /*
  1087. * Check the wm result.
  1088. *
  1089. * If any calculated watermark values is larger than the maximum value that
  1090. * can be programmed into the associated watermark register, that watermark
  1091. * must be disabled.
  1092. */
  1093. static bool g4x_check_srwm(struct drm_device *dev,
  1094. int display_wm, int cursor_wm,
  1095. const struct intel_watermark_params *display,
  1096. const struct intel_watermark_params *cursor)
  1097. {
  1098. DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
  1099. display_wm, cursor_wm);
  1100. if (display_wm > display->max_wm) {
  1101. DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
  1102. display_wm, display->max_wm);
  1103. return false;
  1104. }
  1105. if (cursor_wm > cursor->max_wm) {
  1106. DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
  1107. cursor_wm, cursor->max_wm);
  1108. return false;
  1109. }
  1110. if (!(display_wm || cursor_wm)) {
  1111. DRM_DEBUG_KMS("SR latency is 0, disabling\n");
  1112. return false;
  1113. }
  1114. return true;
  1115. }
  1116. static bool g4x_compute_srwm(struct drm_device *dev,
  1117. int plane,
  1118. int latency_ns,
  1119. const struct intel_watermark_params *display,
  1120. const struct intel_watermark_params *cursor,
  1121. int *display_wm, int *cursor_wm)
  1122. {
  1123. struct drm_crtc *crtc;
  1124. const struct drm_display_mode *adjusted_mode;
  1125. int hdisplay, htotal, pixel_size, clock;
  1126. unsigned long line_time_us;
  1127. int line_count, line_size;
  1128. int small, large;
  1129. int entries;
  1130. if (!latency_ns) {
  1131. *display_wm = *cursor_wm = 0;
  1132. return false;
  1133. }
  1134. crtc = intel_get_crtc_for_plane(dev, plane);
  1135. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1136. clock = adjusted_mode->crtc_clock;
  1137. htotal = adjusted_mode->crtc_htotal;
  1138. hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
  1139. pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  1140. line_time_us = max(htotal * 1000 / clock, 1);
  1141. line_count = (latency_ns / line_time_us + 1000) / 1000;
  1142. line_size = hdisplay * pixel_size;
  1143. /* Use the minimum of the small and large buffer method for primary */
  1144. small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
  1145. large = line_count * line_size;
  1146. entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
  1147. *display_wm = entries + display->guard_size;
  1148. /* calculate the self-refresh watermark for display cursor */
  1149. entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
  1150. entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
  1151. *cursor_wm = entries + cursor->guard_size;
  1152. return g4x_check_srwm(dev,
  1153. *display_wm, *cursor_wm,
  1154. display, cursor);
  1155. }
  1156. static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
  1157. int pixel_size,
  1158. int *prec_mult,
  1159. int *drain_latency)
  1160. {
  1161. struct drm_device *dev = crtc->dev;
  1162. int entries;
  1163. int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
  1164. if (WARN(clock == 0, "Pixel clock is zero!\n"))
  1165. return false;
  1166. if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
  1167. return false;
  1168. entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
  1169. if (IS_CHERRYVIEW(dev))
  1170. *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
  1171. DRAIN_LATENCY_PRECISION_16;
  1172. else
  1173. *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
  1174. DRAIN_LATENCY_PRECISION_32;
  1175. *drain_latency = (64 * (*prec_mult) * 4) / entries;
  1176. if (*drain_latency > DRAIN_LATENCY_MASK)
  1177. *drain_latency = DRAIN_LATENCY_MASK;
  1178. return true;
  1179. }
  1180. /*
  1181. * Update drain latency registers of memory arbiter
  1182. *
  1183. * Valleyview SoC has a new memory arbiter and needs drain latency registers
  1184. * to be programmed. Each plane has a drain latency multiplier and a drain
  1185. * latency value.
  1186. */
  1187. static void vlv_update_drain_latency(struct drm_crtc *crtc)
  1188. {
  1189. struct drm_device *dev = crtc->dev;
  1190. struct drm_i915_private *dev_priv = dev->dev_private;
  1191. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1192. int pixel_size;
  1193. int drain_latency;
  1194. enum pipe pipe = intel_crtc->pipe;
  1195. int plane_prec, prec_mult, plane_dl;
  1196. const int high_precision = IS_CHERRYVIEW(dev) ?
  1197. DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
  1198. plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
  1199. DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
  1200. (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
  1201. if (!intel_crtc_active(crtc)) {
  1202. I915_WRITE(VLV_DDL(pipe), plane_dl);
  1203. return;
  1204. }
  1205. /* Primary plane Drain Latency */
  1206. pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
  1207. if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
  1208. plane_prec = (prec_mult == high_precision) ?
  1209. DDL_PLANE_PRECISION_HIGH :
  1210. DDL_PLANE_PRECISION_LOW;
  1211. plane_dl |= plane_prec | drain_latency;
  1212. }
  1213. /* Cursor Drain Latency
  1214. * BPP is always 4 for cursor
  1215. */
  1216. pixel_size = 4;
  1217. /* Program cursor DL only if it is enabled */
  1218. if (intel_crtc->cursor_base &&
  1219. vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
  1220. plane_prec = (prec_mult == high_precision) ?
  1221. DDL_CURSOR_PRECISION_HIGH :
  1222. DDL_CURSOR_PRECISION_LOW;
  1223. plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
  1224. }
  1225. I915_WRITE(VLV_DDL(pipe), plane_dl);
  1226. }
  1227. #define single_plane_enabled(mask) is_power_of_2(mask)
  1228. static void valleyview_update_wm(struct drm_crtc *crtc)
  1229. {
  1230. struct drm_device *dev = crtc->dev;
  1231. static const int sr_latency_ns = 12000;
  1232. struct drm_i915_private *dev_priv = dev->dev_private;
  1233. int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
  1234. int plane_sr, cursor_sr;
  1235. int ignore_plane_sr, ignore_cursor_sr;
  1236. unsigned int enabled = 0;
  1237. bool cxsr_enabled;
  1238. vlv_update_drain_latency(crtc);
  1239. if (g4x_compute_wm0(dev, PIPE_A,
  1240. &valleyview_wm_info, pessimal_latency_ns,
  1241. &valleyview_cursor_wm_info, pessimal_latency_ns,
  1242. &planea_wm, &cursora_wm))
  1243. enabled |= 1 << PIPE_A;
  1244. if (g4x_compute_wm0(dev, PIPE_B,
  1245. &valleyview_wm_info, pessimal_latency_ns,
  1246. &valleyview_cursor_wm_info, pessimal_latency_ns,
  1247. &planeb_wm, &cursorb_wm))
  1248. enabled |= 1 << PIPE_B;
  1249. if (single_plane_enabled(enabled) &&
  1250. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1251. sr_latency_ns,
  1252. &valleyview_wm_info,
  1253. &valleyview_cursor_wm_info,
  1254. &plane_sr, &ignore_cursor_sr) &&
  1255. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1256. 2*sr_latency_ns,
  1257. &valleyview_wm_info,
  1258. &valleyview_cursor_wm_info,
  1259. &ignore_plane_sr, &cursor_sr)) {
  1260. cxsr_enabled = true;
  1261. } else {
  1262. cxsr_enabled = false;
  1263. intel_set_memory_cxsr(dev_priv, false);
  1264. plane_sr = cursor_sr = 0;
  1265. }
  1266. DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
  1267. "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
  1268. planea_wm, cursora_wm,
  1269. planeb_wm, cursorb_wm,
  1270. plane_sr, cursor_sr);
  1271. I915_WRITE(DSPFW1,
  1272. (plane_sr << DSPFW_SR_SHIFT) |
  1273. (cursorb_wm << DSPFW_CURSORB_SHIFT) |
  1274. (planeb_wm << DSPFW_PLANEB_SHIFT) |
  1275. (planea_wm << DSPFW_PLANEA_SHIFT));
  1276. I915_WRITE(DSPFW2,
  1277. (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
  1278. (cursora_wm << DSPFW_CURSORA_SHIFT));
  1279. I915_WRITE(DSPFW3,
  1280. (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
  1281. (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  1282. if (cxsr_enabled)
  1283. intel_set_memory_cxsr(dev_priv, true);
  1284. }
  1285. static void cherryview_update_wm(struct drm_crtc *crtc)
  1286. {
  1287. struct drm_device *dev = crtc->dev;
  1288. static const int sr_latency_ns = 12000;
  1289. struct drm_i915_private *dev_priv = dev->dev_private;
  1290. int planea_wm, planeb_wm, planec_wm;
  1291. int cursora_wm, cursorb_wm, cursorc_wm;
  1292. int plane_sr, cursor_sr;
  1293. int ignore_plane_sr, ignore_cursor_sr;
  1294. unsigned int enabled = 0;
  1295. bool cxsr_enabled;
  1296. vlv_update_drain_latency(crtc);
  1297. if (g4x_compute_wm0(dev, PIPE_A,
  1298. &valleyview_wm_info, pessimal_latency_ns,
  1299. &valleyview_cursor_wm_info, pessimal_latency_ns,
  1300. &planea_wm, &cursora_wm))
  1301. enabled |= 1 << PIPE_A;
  1302. if (g4x_compute_wm0(dev, PIPE_B,
  1303. &valleyview_wm_info, pessimal_latency_ns,
  1304. &valleyview_cursor_wm_info, pessimal_latency_ns,
  1305. &planeb_wm, &cursorb_wm))
  1306. enabled |= 1 << PIPE_B;
  1307. if (g4x_compute_wm0(dev, PIPE_C,
  1308. &valleyview_wm_info, pessimal_latency_ns,
  1309. &valleyview_cursor_wm_info, pessimal_latency_ns,
  1310. &planec_wm, &cursorc_wm))
  1311. enabled |= 1 << PIPE_C;
  1312. if (single_plane_enabled(enabled) &&
  1313. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1314. sr_latency_ns,
  1315. &valleyview_wm_info,
  1316. &valleyview_cursor_wm_info,
  1317. &plane_sr, &ignore_cursor_sr) &&
  1318. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1319. 2*sr_latency_ns,
  1320. &valleyview_wm_info,
  1321. &valleyview_cursor_wm_info,
  1322. &ignore_plane_sr, &cursor_sr)) {
  1323. cxsr_enabled = true;
  1324. } else {
  1325. cxsr_enabled = false;
  1326. intel_set_memory_cxsr(dev_priv, false);
  1327. plane_sr = cursor_sr = 0;
  1328. }
  1329. DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
  1330. "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
  1331. "SR: plane=%d, cursor=%d\n",
  1332. planea_wm, cursora_wm,
  1333. planeb_wm, cursorb_wm,
  1334. planec_wm, cursorc_wm,
  1335. plane_sr, cursor_sr);
  1336. I915_WRITE(DSPFW1,
  1337. (plane_sr << DSPFW_SR_SHIFT) |
  1338. (cursorb_wm << DSPFW_CURSORB_SHIFT) |
  1339. (planeb_wm << DSPFW_PLANEB_SHIFT) |
  1340. (planea_wm << DSPFW_PLANEA_SHIFT));
  1341. I915_WRITE(DSPFW2,
  1342. (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
  1343. (cursora_wm << DSPFW_CURSORA_SHIFT));
  1344. I915_WRITE(DSPFW3,
  1345. (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
  1346. (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  1347. I915_WRITE(DSPFW9_CHV,
  1348. (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
  1349. DSPFW_CURSORC_MASK)) |
  1350. (planec_wm << DSPFW_PLANEC_SHIFT) |
  1351. (cursorc_wm << DSPFW_CURSORC_SHIFT));
  1352. if (cxsr_enabled)
  1353. intel_set_memory_cxsr(dev_priv, true);
  1354. }
  1355. static void valleyview_update_sprite_wm(struct drm_plane *plane,
  1356. struct drm_crtc *crtc,
  1357. uint32_t sprite_width,
  1358. uint32_t sprite_height,
  1359. int pixel_size,
  1360. bool enabled, bool scaled)
  1361. {
  1362. struct drm_device *dev = crtc->dev;
  1363. struct drm_i915_private *dev_priv = dev->dev_private;
  1364. int pipe = to_intel_plane(plane)->pipe;
  1365. int sprite = to_intel_plane(plane)->plane;
  1366. int drain_latency;
  1367. int plane_prec;
  1368. int sprite_dl;
  1369. int prec_mult;
  1370. const int high_precision = IS_CHERRYVIEW(dev) ?
  1371. DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
  1372. sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
  1373. (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
  1374. if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
  1375. &drain_latency)) {
  1376. plane_prec = (prec_mult == high_precision) ?
  1377. DDL_SPRITE_PRECISION_HIGH(sprite) :
  1378. DDL_SPRITE_PRECISION_LOW(sprite);
  1379. sprite_dl |= plane_prec |
  1380. (drain_latency << DDL_SPRITE_SHIFT(sprite));
  1381. }
  1382. I915_WRITE(VLV_DDL(pipe), sprite_dl);
  1383. }
  1384. static void g4x_update_wm(struct drm_crtc *crtc)
  1385. {
  1386. struct drm_device *dev = crtc->dev;
  1387. static const int sr_latency_ns = 12000;
  1388. struct drm_i915_private *dev_priv = dev->dev_private;
  1389. int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
  1390. int plane_sr, cursor_sr;
  1391. unsigned int enabled = 0;
  1392. bool cxsr_enabled;
  1393. if (g4x_compute_wm0(dev, PIPE_A,
  1394. &g4x_wm_info, pessimal_latency_ns,
  1395. &g4x_cursor_wm_info, pessimal_latency_ns,
  1396. &planea_wm, &cursora_wm))
  1397. enabled |= 1 << PIPE_A;
  1398. if (g4x_compute_wm0(dev, PIPE_B,
  1399. &g4x_wm_info, pessimal_latency_ns,
  1400. &g4x_cursor_wm_info, pessimal_latency_ns,
  1401. &planeb_wm, &cursorb_wm))
  1402. enabled |= 1 << PIPE_B;
  1403. if (single_plane_enabled(enabled) &&
  1404. g4x_compute_srwm(dev, ffs(enabled) - 1,
  1405. sr_latency_ns,
  1406. &g4x_wm_info,
  1407. &g4x_cursor_wm_info,
  1408. &plane_sr, &cursor_sr)) {
  1409. cxsr_enabled = true;
  1410. } else {
  1411. cxsr_enabled = false;
  1412. intel_set_memory_cxsr(dev_priv, false);
  1413. plane_sr = cursor_sr = 0;
  1414. }
  1415. DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
  1416. "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
  1417. planea_wm, cursora_wm,
  1418. planeb_wm, cursorb_wm,
  1419. plane_sr, cursor_sr);
  1420. I915_WRITE(DSPFW1,
  1421. (plane_sr << DSPFW_SR_SHIFT) |
  1422. (cursorb_wm << DSPFW_CURSORB_SHIFT) |
  1423. (planeb_wm << DSPFW_PLANEB_SHIFT) |
  1424. (planea_wm << DSPFW_PLANEA_SHIFT));
  1425. I915_WRITE(DSPFW2,
  1426. (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
  1427. (cursora_wm << DSPFW_CURSORA_SHIFT));
  1428. /* HPLL off in SR has some issues on G4x... disable it */
  1429. I915_WRITE(DSPFW3,
  1430. (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
  1431. (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  1432. if (cxsr_enabled)
  1433. intel_set_memory_cxsr(dev_priv, true);
  1434. }
  1435. static void i965_update_wm(struct drm_crtc *unused_crtc)
  1436. {
  1437. struct drm_device *dev = unused_crtc->dev;
  1438. struct drm_i915_private *dev_priv = dev->dev_private;
  1439. struct drm_crtc *crtc;
  1440. int srwm = 1;
  1441. int cursor_sr = 16;
  1442. bool cxsr_enabled;
  1443. /* Calc sr entries for one plane configs */
  1444. crtc = single_enabled_crtc(dev);
  1445. if (crtc) {
  1446. /* self-refresh has much higher latency */
  1447. static const int sr_latency_ns = 12000;
  1448. const struct drm_display_mode *adjusted_mode =
  1449. &to_intel_crtc(crtc)->config.adjusted_mode;
  1450. int clock = adjusted_mode->crtc_clock;
  1451. int htotal = adjusted_mode->crtc_htotal;
  1452. int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
  1453. int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
  1454. unsigned long line_time_us;
  1455. int entries;
  1456. line_time_us = max(htotal * 1000 / clock, 1);
  1457. /* Use ns/us then divide to preserve precision */
  1458. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  1459. pixel_size * hdisplay;
  1460. entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
  1461. srwm = I965_FIFO_SIZE - entries;
  1462. if (srwm < 0)
  1463. srwm = 1;
  1464. srwm &= 0x1ff;
  1465. DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
  1466. entries, srwm);
  1467. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  1468. pixel_size * to_intel_crtc(crtc)->cursor_width;
  1469. entries = DIV_ROUND_UP(entries,
  1470. i965_cursor_wm_info.cacheline_size);
  1471. cursor_sr = i965_cursor_wm_info.fifo_size -
  1472. (entries + i965_cursor_wm_info.guard_size);
  1473. if (cursor_sr > i965_cursor_wm_info.max_wm)
  1474. cursor_sr = i965_cursor_wm_info.max_wm;
  1475. DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
  1476. "cursor %d\n", srwm, cursor_sr);
  1477. cxsr_enabled = true;
  1478. } else {
  1479. cxsr_enabled = false;
  1480. /* Turn off self refresh if both pipes are enabled */
  1481. intel_set_memory_cxsr(dev_priv, false);
  1482. }
  1483. DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
  1484. srwm);
  1485. /* 965 has limitations... */
  1486. I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
  1487. (8 << DSPFW_CURSORB_SHIFT) |
  1488. (8 << DSPFW_PLANEB_SHIFT) |
  1489. (8 << DSPFW_PLANEA_SHIFT));
  1490. I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
  1491. (8 << DSPFW_PLANEC_SHIFT_OLD));
  1492. /* update cursor SR watermark */
  1493. I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
  1494. if (cxsr_enabled)
  1495. intel_set_memory_cxsr(dev_priv, true);
  1496. }
  1497. static void i9xx_update_wm(struct drm_crtc *unused_crtc)
  1498. {
  1499. struct drm_device *dev = unused_crtc->dev;
  1500. struct drm_i915_private *dev_priv = dev->dev_private;
  1501. const struct intel_watermark_params *wm_info;
  1502. uint32_t fwater_lo;
  1503. uint32_t fwater_hi;
  1504. int cwm, srwm = 1;
  1505. int fifo_size;
  1506. int planea_wm, planeb_wm;
  1507. struct drm_crtc *crtc, *enabled = NULL;
  1508. if (IS_I945GM(dev))
  1509. wm_info = &i945_wm_info;
  1510. else if (!IS_GEN2(dev))
  1511. wm_info = &i915_wm_info;
  1512. else
  1513. wm_info = &i830_a_wm_info;
  1514. fifo_size = dev_priv->display.get_fifo_size(dev, 0);
  1515. crtc = intel_get_crtc_for_plane(dev, 0);
  1516. if (intel_crtc_active(crtc)) {
  1517. const struct drm_display_mode *adjusted_mode;
  1518. int cpp = crtc->primary->fb->bits_per_pixel / 8;
  1519. if (IS_GEN2(dev))
  1520. cpp = 4;
  1521. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1522. planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
  1523. wm_info, fifo_size, cpp,
  1524. pessimal_latency_ns);
  1525. enabled = crtc;
  1526. } else {
  1527. planea_wm = fifo_size - wm_info->guard_size;
  1528. if (planea_wm > (long)wm_info->max_wm)
  1529. planea_wm = wm_info->max_wm;
  1530. }
  1531. if (IS_GEN2(dev))
  1532. wm_info = &i830_bc_wm_info;
  1533. fifo_size = dev_priv->display.get_fifo_size(dev, 1);
  1534. crtc = intel_get_crtc_for_plane(dev, 1);
  1535. if (intel_crtc_active(crtc)) {
  1536. const struct drm_display_mode *adjusted_mode;
  1537. int cpp = crtc->primary->fb->bits_per_pixel / 8;
  1538. if (IS_GEN2(dev))
  1539. cpp = 4;
  1540. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1541. planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
  1542. wm_info, fifo_size, cpp,
  1543. pessimal_latency_ns);
  1544. if (enabled == NULL)
  1545. enabled = crtc;
  1546. else
  1547. enabled = NULL;
  1548. } else {
  1549. planeb_wm = fifo_size - wm_info->guard_size;
  1550. if (planeb_wm > (long)wm_info->max_wm)
  1551. planeb_wm = wm_info->max_wm;
  1552. }
  1553. DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
  1554. if (IS_I915GM(dev) && enabled) {
  1555. struct drm_i915_gem_object *obj;
  1556. obj = intel_fb_obj(enabled->primary->fb);
  1557. /* self-refresh seems busted with untiled */
  1558. if (obj->tiling_mode == I915_TILING_NONE)
  1559. enabled = NULL;
  1560. }
  1561. /*
  1562. * Overlay gets an aggressive default since video jitter is bad.
  1563. */
  1564. cwm = 2;
  1565. /* Play safe and disable self-refresh before adjusting watermarks. */
  1566. intel_set_memory_cxsr(dev_priv, false);
  1567. /* Calc sr entries for one plane configs */
  1568. if (HAS_FW_BLC(dev) && enabled) {
  1569. /* self-refresh has much higher latency */
  1570. static const int sr_latency_ns = 6000;
  1571. const struct drm_display_mode *adjusted_mode =
  1572. &to_intel_crtc(enabled)->config.adjusted_mode;
  1573. int clock = adjusted_mode->crtc_clock;
  1574. int htotal = adjusted_mode->crtc_htotal;
  1575. int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
  1576. int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
  1577. unsigned long line_time_us;
  1578. int entries;
  1579. line_time_us = max(htotal * 1000 / clock, 1);
  1580. /* Use ns/us then divide to preserve precision */
  1581. entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
  1582. pixel_size * hdisplay;
  1583. entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
  1584. DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
  1585. srwm = wm_info->fifo_size - entries;
  1586. if (srwm < 0)
  1587. srwm = 1;
  1588. if (IS_I945G(dev) || IS_I945GM(dev))
  1589. I915_WRITE(FW_BLC_SELF,
  1590. FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
  1591. else if (IS_I915GM(dev))
  1592. I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
  1593. }
  1594. DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
  1595. planea_wm, planeb_wm, cwm, srwm);
  1596. fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
  1597. fwater_hi = (cwm & 0x1f);
  1598. /* Set request length to 8 cachelines per fetch */
  1599. fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
  1600. fwater_hi = fwater_hi | (1 << 8);
  1601. I915_WRITE(FW_BLC, fwater_lo);
  1602. I915_WRITE(FW_BLC2, fwater_hi);
  1603. if (enabled)
  1604. intel_set_memory_cxsr(dev_priv, true);
  1605. }
  1606. static void i845_update_wm(struct drm_crtc *unused_crtc)
  1607. {
  1608. struct drm_device *dev = unused_crtc->dev;
  1609. struct drm_i915_private *dev_priv = dev->dev_private;
  1610. struct drm_crtc *crtc;
  1611. const struct drm_display_mode *adjusted_mode;
  1612. uint32_t fwater_lo;
  1613. int planea_wm;
  1614. crtc = single_enabled_crtc(dev);
  1615. if (crtc == NULL)
  1616. return;
  1617. adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
  1618. planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
  1619. &i845_wm_info,
  1620. dev_priv->display.get_fifo_size(dev, 0),
  1621. 4, pessimal_latency_ns);
  1622. fwater_lo = I915_READ(FW_BLC) & ~0xfff;
  1623. fwater_lo |= (3<<8) | planea_wm;
  1624. DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
  1625. I915_WRITE(FW_BLC, fwater_lo);
  1626. }
  1627. static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
  1628. struct drm_crtc *crtc)
  1629. {
  1630. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1631. uint32_t pixel_rate;
  1632. pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
  1633. /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
  1634. * adjust the pixel_rate here. */
  1635. if (intel_crtc->config.pch_pfit.enabled) {
  1636. uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
  1637. uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
  1638. pipe_w = intel_crtc->config.pipe_src_w;
  1639. pipe_h = intel_crtc->config.pipe_src_h;
  1640. pfit_w = (pfit_size >> 16) & 0xFFFF;
  1641. pfit_h = pfit_size & 0xFFFF;
  1642. if (pipe_w < pfit_w)
  1643. pipe_w = pfit_w;
  1644. if (pipe_h < pfit_h)
  1645. pipe_h = pfit_h;
  1646. pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
  1647. pfit_w * pfit_h);
  1648. }
  1649. return pixel_rate;
  1650. }
  1651. /* latency must be in 0.1us units. */
  1652. static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
  1653. uint32_t latency)
  1654. {
  1655. uint64_t ret;
  1656. if (WARN(latency == 0, "Latency value missing\n"))
  1657. return UINT_MAX;
  1658. ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
  1659. ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
  1660. return ret;
  1661. }
  1662. /* latency must be in 0.1us units. */
  1663. static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
  1664. uint32_t horiz_pixels, uint8_t bytes_per_pixel,
  1665. uint32_t latency)
  1666. {
  1667. uint32_t ret;
  1668. if (WARN(latency == 0, "Latency value missing\n"))
  1669. return UINT_MAX;
  1670. ret = (latency * pixel_rate) / (pipe_htotal * 10000);
  1671. ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
  1672. ret = DIV_ROUND_UP(ret, 64) + 2;
  1673. return ret;
  1674. }
  1675. static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
  1676. uint8_t bytes_per_pixel)
  1677. {
  1678. return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
  1679. }
  1680. struct ilk_pipe_wm_parameters {
  1681. bool active;
  1682. uint32_t pipe_htotal;
  1683. uint32_t pixel_rate;
  1684. struct intel_plane_wm_parameters pri;
  1685. struct intel_plane_wm_parameters spr;
  1686. struct intel_plane_wm_parameters cur;
  1687. };
  1688. struct ilk_wm_maximums {
  1689. uint16_t pri;
  1690. uint16_t spr;
  1691. uint16_t cur;
  1692. uint16_t fbc;
  1693. };
  1694. /* used in computing the new watermarks state */
  1695. struct intel_wm_config {
  1696. unsigned int num_pipes_active;
  1697. bool sprites_enabled;
  1698. bool sprites_scaled;
  1699. };
  1700. /*
  1701. * For both WM_PIPE and WM_LP.
  1702. * mem_value must be in 0.1us units.
  1703. */
  1704. static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
  1705. uint32_t mem_value,
  1706. bool is_lp)
  1707. {
  1708. uint32_t method1, method2;
  1709. if (!params->active || !params->pri.enabled)
  1710. return 0;
  1711. method1 = ilk_wm_method1(params->pixel_rate,
  1712. params->pri.bytes_per_pixel,
  1713. mem_value);
  1714. if (!is_lp)
  1715. return method1;
  1716. method2 = ilk_wm_method2(params->pixel_rate,
  1717. params->pipe_htotal,
  1718. params->pri.horiz_pixels,
  1719. params->pri.bytes_per_pixel,
  1720. mem_value);
  1721. return min(method1, method2);
  1722. }
  1723. /*
  1724. * For both WM_PIPE and WM_LP.
  1725. * mem_value must be in 0.1us units.
  1726. */
  1727. static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
  1728. uint32_t mem_value)
  1729. {
  1730. uint32_t method1, method2;
  1731. if (!params->active || !params->spr.enabled)
  1732. return 0;
  1733. method1 = ilk_wm_method1(params->pixel_rate,
  1734. params->spr.bytes_per_pixel,
  1735. mem_value);
  1736. method2 = ilk_wm_method2(params->pixel_rate,
  1737. params->pipe_htotal,
  1738. params->spr.horiz_pixels,
  1739. params->spr.bytes_per_pixel,
  1740. mem_value);
  1741. return min(method1, method2);
  1742. }
  1743. /*
  1744. * For both WM_PIPE and WM_LP.
  1745. * mem_value must be in 0.1us units.
  1746. */
  1747. static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
  1748. uint32_t mem_value)
  1749. {
  1750. if (!params->active || !params->cur.enabled)
  1751. return 0;
  1752. return ilk_wm_method2(params->pixel_rate,
  1753. params->pipe_htotal,
  1754. params->cur.horiz_pixels,
  1755. params->cur.bytes_per_pixel,
  1756. mem_value);
  1757. }
  1758. /* Only for WM_LP. */
  1759. static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
  1760. uint32_t pri_val)
  1761. {
  1762. if (!params->active || !params->pri.enabled)
  1763. return 0;
  1764. return ilk_wm_fbc(pri_val,
  1765. params->pri.horiz_pixels,
  1766. params->pri.bytes_per_pixel);
  1767. }
  1768. static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
  1769. {
  1770. if (INTEL_INFO(dev)->gen >= 8)
  1771. return 3072;
  1772. else if (INTEL_INFO(dev)->gen >= 7)
  1773. return 768;
  1774. else
  1775. return 512;
  1776. }
  1777. static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
  1778. int level, bool is_sprite)
  1779. {
  1780. if (INTEL_INFO(dev)->gen >= 8)
  1781. /* BDW primary/sprite plane watermarks */
  1782. return level == 0 ? 255 : 2047;
  1783. else if (INTEL_INFO(dev)->gen >= 7)
  1784. /* IVB/HSW primary/sprite plane watermarks */
  1785. return level == 0 ? 127 : 1023;
  1786. else if (!is_sprite)
  1787. /* ILK/SNB primary plane watermarks */
  1788. return level == 0 ? 127 : 511;
  1789. else
  1790. /* ILK/SNB sprite plane watermarks */
  1791. return level == 0 ? 63 : 255;
  1792. }
  1793. static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
  1794. int level)
  1795. {
  1796. if (INTEL_INFO(dev)->gen >= 7)
  1797. return level == 0 ? 63 : 255;
  1798. else
  1799. return level == 0 ? 31 : 63;
  1800. }
  1801. static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
  1802. {
  1803. if (INTEL_INFO(dev)->gen >= 8)
  1804. return 31;
  1805. else
  1806. return 15;
  1807. }
  1808. /* Calculate the maximum primary/sprite plane watermark */
  1809. static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
  1810. int level,
  1811. const struct intel_wm_config *config,
  1812. enum intel_ddb_partitioning ddb_partitioning,
  1813. bool is_sprite)
  1814. {
  1815. unsigned int fifo_size = ilk_display_fifo_size(dev);
  1816. /* if sprites aren't enabled, sprites get nothing */
  1817. if (is_sprite && !config->sprites_enabled)
  1818. return 0;
  1819. /* HSW allows LP1+ watermarks even with multiple pipes */
  1820. if (level == 0 || config->num_pipes_active > 1) {
  1821. fifo_size /= INTEL_INFO(dev)->num_pipes;
  1822. /*
  1823. * For some reason the non self refresh
  1824. * FIFO size is only half of the self
  1825. * refresh FIFO size on ILK/SNB.
  1826. */
  1827. if (INTEL_INFO(dev)->gen <= 6)
  1828. fifo_size /= 2;
  1829. }
  1830. if (config->sprites_enabled) {
  1831. /* level 0 is always calculated with 1:1 split */
  1832. if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
  1833. if (is_sprite)
  1834. fifo_size *= 5;
  1835. fifo_size /= 6;
  1836. } else {
  1837. fifo_size /= 2;
  1838. }
  1839. }
  1840. /* clamp to max that the registers can hold */
  1841. return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
  1842. }
  1843. /* Calculate the maximum cursor plane watermark */
  1844. static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
  1845. int level,
  1846. const struct intel_wm_config *config)
  1847. {
  1848. /* HSW LP1+ watermarks w/ multiple pipes */
  1849. if (level > 0 && config->num_pipes_active > 1)
  1850. return 64;
  1851. /* otherwise just report max that registers can hold */
  1852. return ilk_cursor_wm_reg_max(dev, level);
  1853. }
  1854. static void ilk_compute_wm_maximums(const struct drm_device *dev,
  1855. int level,
  1856. const struct intel_wm_config *config,
  1857. enum intel_ddb_partitioning ddb_partitioning,
  1858. struct ilk_wm_maximums *max)
  1859. {
  1860. max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
  1861. max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
  1862. max->cur = ilk_cursor_wm_max(dev, level, config);
  1863. max->fbc = ilk_fbc_wm_reg_max(dev);
  1864. }
  1865. static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
  1866. int level,
  1867. struct ilk_wm_maximums *max)
  1868. {
  1869. max->pri = ilk_plane_wm_reg_max(dev, level, false);
  1870. max->spr = ilk_plane_wm_reg_max(dev, level, true);
  1871. max->cur = ilk_cursor_wm_reg_max(dev, level);
  1872. max->fbc = ilk_fbc_wm_reg_max(dev);
  1873. }
  1874. static bool ilk_validate_wm_level(int level,
  1875. const struct ilk_wm_maximums *max,
  1876. struct intel_wm_level *result)
  1877. {
  1878. bool ret;
  1879. /* already determined to be invalid? */
  1880. if (!result->enable)
  1881. return false;
  1882. result->enable = result->pri_val <= max->pri &&
  1883. result->spr_val <= max->spr &&
  1884. result->cur_val <= max->cur;
  1885. ret = result->enable;
  1886. /*
  1887. * HACK until we can pre-compute everything,
  1888. * and thus fail gracefully if LP0 watermarks
  1889. * are exceeded...
  1890. */
  1891. if (level == 0 && !result->enable) {
  1892. if (result->pri_val > max->pri)
  1893. DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
  1894. level, result->pri_val, max->pri);
  1895. if (result->spr_val > max->spr)
  1896. DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
  1897. level, result->spr_val, max->spr);
  1898. if (result->cur_val > max->cur)
  1899. DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
  1900. level, result->cur_val, max->cur);
  1901. result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
  1902. result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
  1903. result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
  1904. result->enable = true;
  1905. }
  1906. return ret;
  1907. }
  1908. static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
  1909. int level,
  1910. const struct ilk_pipe_wm_parameters *p,
  1911. struct intel_wm_level *result)
  1912. {
  1913. uint16_t pri_latency = dev_priv->wm.pri_latency[level];
  1914. uint16_t spr_latency = dev_priv->wm.spr_latency[level];
  1915. uint16_t cur_latency = dev_priv->wm.cur_latency[level];
  1916. /* WM1+ latency values stored in 0.5us units */
  1917. if (level > 0) {
  1918. pri_latency *= 5;
  1919. spr_latency *= 5;
  1920. cur_latency *= 5;
  1921. }
  1922. result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
  1923. result->spr_val = ilk_compute_spr_wm(p, spr_latency);
  1924. result->cur_val = ilk_compute_cur_wm(p, cur_latency);
  1925. result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
  1926. result->enable = true;
  1927. }
  1928. static uint32_t
  1929. hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
  1930. {
  1931. struct drm_i915_private *dev_priv = dev->dev_private;
  1932. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1933. struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
  1934. u32 linetime, ips_linetime;
  1935. if (!intel_crtc_active(crtc))
  1936. return 0;
  1937. /* The WM are computed with base on how long it takes to fill a single
  1938. * row at the given clock rate, multiplied by 8.
  1939. * */
  1940. linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
  1941. mode->crtc_clock);
  1942. ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
  1943. intel_ddi_get_cdclk_freq(dev_priv));
  1944. return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
  1945. PIPE_WM_LINETIME_TIME(linetime);
  1946. }
  1947. static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
  1948. {
  1949. struct drm_i915_private *dev_priv = dev->dev_private;
  1950. if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  1951. uint64_t sskpd = I915_READ64(MCH_SSKPD);
  1952. wm[0] = (sskpd >> 56) & 0xFF;
  1953. if (wm[0] == 0)
  1954. wm[0] = sskpd & 0xF;
  1955. wm[1] = (sskpd >> 4) & 0xFF;
  1956. wm[2] = (sskpd >> 12) & 0xFF;
  1957. wm[3] = (sskpd >> 20) & 0x1FF;
  1958. wm[4] = (sskpd >> 32) & 0x1FF;
  1959. } else if (INTEL_INFO(dev)->gen >= 6) {
  1960. uint32_t sskpd = I915_READ(MCH_SSKPD);
  1961. wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
  1962. wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
  1963. wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
  1964. wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
  1965. } else if (INTEL_INFO(dev)->gen >= 5) {
  1966. uint32_t mltr = I915_READ(MLTR_ILK);
  1967. /* ILK primary LP0 latency is 700 ns */
  1968. wm[0] = 7;
  1969. wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
  1970. wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
  1971. }
  1972. }
  1973. static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
  1974. {
  1975. /* ILK sprite LP0 latency is 1300 ns */
  1976. if (INTEL_INFO(dev)->gen == 5)
  1977. wm[0] = 13;
  1978. }
  1979. static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
  1980. {
  1981. /* ILK cursor LP0 latency is 1300 ns */
  1982. if (INTEL_INFO(dev)->gen == 5)
  1983. wm[0] = 13;
  1984. /* WaDoubleCursorLP3Latency:ivb */
  1985. if (IS_IVYBRIDGE(dev))
  1986. wm[3] *= 2;
  1987. }
  1988. int ilk_wm_max_level(const struct drm_device *dev)
  1989. {
  1990. /* how many WM levels are we expecting */
  1991. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  1992. return 4;
  1993. else if (INTEL_INFO(dev)->gen >= 6)
  1994. return 3;
  1995. else
  1996. return 2;
  1997. }
  1998. static void intel_print_wm_latency(struct drm_device *dev,
  1999. const char *name,
  2000. const uint16_t wm[5])
  2001. {
  2002. int level, max_level = ilk_wm_max_level(dev);
  2003. for (level = 0; level <= max_level; level++) {
  2004. unsigned int latency = wm[level];
  2005. if (latency == 0) {
  2006. DRM_ERROR("%s WM%d latency not provided\n",
  2007. name, level);
  2008. continue;
  2009. }
  2010. /* WM1+ latency values in 0.5us units */
  2011. if (level > 0)
  2012. latency *= 5;
  2013. DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
  2014. name, level, wm[level],
  2015. latency / 10, latency % 10);
  2016. }
  2017. }
  2018. static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
  2019. uint16_t wm[5], uint16_t min)
  2020. {
  2021. int level, max_level = ilk_wm_max_level(dev_priv->dev);
  2022. if (wm[0] >= min)
  2023. return false;
  2024. wm[0] = max(wm[0], min);
  2025. for (level = 1; level <= max_level; level++)
  2026. wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
  2027. return true;
  2028. }
  2029. static void snb_wm_latency_quirk(struct drm_device *dev)
  2030. {
  2031. struct drm_i915_private *dev_priv = dev->dev_private;
  2032. bool changed;
  2033. /*
  2034. * The BIOS provided WM memory latency values are often
  2035. * inadequate for high resolution displays. Adjust them.
  2036. */
  2037. changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
  2038. ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
  2039. ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
  2040. if (!changed)
  2041. return;
  2042. DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
  2043. intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
  2044. intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
  2045. intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
  2046. }
  2047. static void ilk_setup_wm_latency(struct drm_device *dev)
  2048. {
  2049. struct drm_i915_private *dev_priv = dev->dev_private;
  2050. intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
  2051. memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
  2052. sizeof(dev_priv->wm.pri_latency));
  2053. memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
  2054. sizeof(dev_priv->wm.pri_latency));
  2055. intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
  2056. intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
  2057. intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
  2058. intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
  2059. intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
  2060. if (IS_GEN6(dev))
  2061. snb_wm_latency_quirk(dev);
  2062. }
  2063. static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
  2064. struct ilk_pipe_wm_parameters *p)
  2065. {
  2066. struct drm_device *dev = crtc->dev;
  2067. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2068. enum pipe pipe = intel_crtc->pipe;
  2069. struct drm_plane *plane;
  2070. if (!intel_crtc_active(crtc))
  2071. return;
  2072. p->active = true;
  2073. p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
  2074. p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
  2075. p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
  2076. p->cur.bytes_per_pixel = 4;
  2077. p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
  2078. p->cur.horiz_pixels = intel_crtc->cursor_width;
  2079. /* TODO: for now, assume primary and cursor planes are always enabled. */
  2080. p->pri.enabled = true;
  2081. p->cur.enabled = true;
  2082. drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
  2083. struct intel_plane *intel_plane = to_intel_plane(plane);
  2084. if (intel_plane->pipe == pipe) {
  2085. p->spr = intel_plane->wm;
  2086. break;
  2087. }
  2088. }
  2089. }
  2090. static void ilk_compute_wm_config(struct drm_device *dev,
  2091. struct intel_wm_config *config)
  2092. {
  2093. struct intel_crtc *intel_crtc;
  2094. /* Compute the currently _active_ config */
  2095. for_each_intel_crtc(dev, intel_crtc) {
  2096. const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
  2097. if (!wm->pipe_enabled)
  2098. continue;
  2099. config->sprites_enabled |= wm->sprites_enabled;
  2100. config->sprites_scaled |= wm->sprites_scaled;
  2101. config->num_pipes_active++;
  2102. }
  2103. }
  2104. /* Compute new watermarks for the pipe */
  2105. static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
  2106. const struct ilk_pipe_wm_parameters *params,
  2107. struct intel_pipe_wm *pipe_wm)
  2108. {
  2109. struct drm_device *dev = crtc->dev;
  2110. const struct drm_i915_private *dev_priv = dev->dev_private;
  2111. int level, max_level = ilk_wm_max_level(dev);
  2112. /* LP0 watermark maximums depend on this pipe alone */
  2113. struct intel_wm_config config = {
  2114. .num_pipes_active = 1,
  2115. .sprites_enabled = params->spr.enabled,
  2116. .sprites_scaled = params->spr.scaled,
  2117. };
  2118. struct ilk_wm_maximums max;
  2119. pipe_wm->pipe_enabled = params->active;
  2120. pipe_wm->sprites_enabled = params->spr.enabled;
  2121. pipe_wm->sprites_scaled = params->spr.scaled;
  2122. /* ILK/SNB: LP2+ watermarks only w/o sprites */
  2123. if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
  2124. max_level = 1;
  2125. /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
  2126. if (params->spr.scaled)
  2127. max_level = 0;
  2128. ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
  2129. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2130. pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
  2131. /* LP0 watermarks always use 1/2 DDB partitioning */
  2132. ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
  2133. /* At least LP0 must be valid */
  2134. if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
  2135. return false;
  2136. ilk_compute_wm_reg_maximums(dev, 1, &max);
  2137. for (level = 1; level <= max_level; level++) {
  2138. struct intel_wm_level wm = {};
  2139. ilk_compute_wm_level(dev_priv, level, params, &wm);
  2140. /*
  2141. * Disable any watermark level that exceeds the
  2142. * register maximums since such watermarks are
  2143. * always invalid.
  2144. */
  2145. if (!ilk_validate_wm_level(level, &max, &wm))
  2146. break;
  2147. pipe_wm->wm[level] = wm;
  2148. }
  2149. return true;
  2150. }
  2151. /*
  2152. * Merge the watermarks from all active pipes for a specific level.
  2153. */
  2154. static void ilk_merge_wm_level(struct drm_device *dev,
  2155. int level,
  2156. struct intel_wm_level *ret_wm)
  2157. {
  2158. const struct intel_crtc *intel_crtc;
  2159. ret_wm->enable = true;
  2160. for_each_intel_crtc(dev, intel_crtc) {
  2161. const struct intel_pipe_wm *active = &intel_crtc->wm.active;
  2162. const struct intel_wm_level *wm = &active->wm[level];
  2163. if (!active->pipe_enabled)
  2164. continue;
  2165. /*
  2166. * The watermark values may have been used in the past,
  2167. * so we must maintain them in the registers for some
  2168. * time even if the level is now disabled.
  2169. */
  2170. if (!wm->enable)
  2171. ret_wm->enable = false;
  2172. ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
  2173. ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
  2174. ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
  2175. ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
  2176. }
  2177. }
  2178. /*
  2179. * Merge all low power watermarks for all active pipes.
  2180. */
  2181. static void ilk_wm_merge(struct drm_device *dev,
  2182. const struct intel_wm_config *config,
  2183. const struct ilk_wm_maximums *max,
  2184. struct intel_pipe_wm *merged)
  2185. {
  2186. int level, max_level = ilk_wm_max_level(dev);
  2187. int last_enabled_level = max_level;
  2188. /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
  2189. if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
  2190. config->num_pipes_active > 1)
  2191. return;
  2192. /* ILK: FBC WM must be disabled always */
  2193. merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
  2194. /* merge each WM1+ level */
  2195. for (level = 1; level <= max_level; level++) {
  2196. struct intel_wm_level *wm = &merged->wm[level];
  2197. ilk_merge_wm_level(dev, level, wm);
  2198. if (level > last_enabled_level)
  2199. wm->enable = false;
  2200. else if (!ilk_validate_wm_level(level, max, wm))
  2201. /* make sure all following levels get disabled */
  2202. last_enabled_level = level - 1;
  2203. /*
  2204. * The spec says it is preferred to disable
  2205. * FBC WMs instead of disabling a WM level.
  2206. */
  2207. if (wm->fbc_val > max->fbc) {
  2208. if (wm->enable)
  2209. merged->fbc_wm_enabled = false;
  2210. wm->fbc_val = 0;
  2211. }
  2212. }
  2213. /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
  2214. /*
  2215. * FIXME this is racy. FBC might get enabled later.
  2216. * What we should check here is whether FBC can be
  2217. * enabled sometime later.
  2218. */
  2219. if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
  2220. for (level = 2; level <= max_level; level++) {
  2221. struct intel_wm_level *wm = &merged->wm[level];
  2222. wm->enable = false;
  2223. }
  2224. }
  2225. }
  2226. static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
  2227. {
  2228. /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
  2229. return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
  2230. }
  2231. /* The value we need to program into the WM_LPx latency field */
  2232. static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
  2233. {
  2234. struct drm_i915_private *dev_priv = dev->dev_private;
  2235. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2236. return 2 * level;
  2237. else
  2238. return dev_priv->wm.pri_latency[level];
  2239. }
  2240. static void ilk_compute_wm_results(struct drm_device *dev,
  2241. const struct intel_pipe_wm *merged,
  2242. enum intel_ddb_partitioning partitioning,
  2243. struct ilk_wm_values *results)
  2244. {
  2245. struct intel_crtc *intel_crtc;
  2246. int level, wm_lp;
  2247. results->enable_fbc_wm = merged->fbc_wm_enabled;
  2248. results->partitioning = partitioning;
  2249. /* LP1+ register values */
  2250. for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
  2251. const struct intel_wm_level *r;
  2252. level = ilk_wm_lp_to_level(wm_lp, merged);
  2253. r = &merged->wm[level];
  2254. /*
  2255. * Maintain the watermark values even if the level is
  2256. * disabled. Doing otherwise could cause underruns.
  2257. */
  2258. results->wm_lp[wm_lp - 1] =
  2259. (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
  2260. (r->pri_val << WM1_LP_SR_SHIFT) |
  2261. r->cur_val;
  2262. if (r->enable)
  2263. results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
  2264. if (INTEL_INFO(dev)->gen >= 8)
  2265. results->wm_lp[wm_lp - 1] |=
  2266. r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
  2267. else
  2268. results->wm_lp[wm_lp - 1] |=
  2269. r->fbc_val << WM1_LP_FBC_SHIFT;
  2270. /*
  2271. * Always set WM1S_LP_EN when spr_val != 0, even if the
  2272. * level is disabled. Doing otherwise could cause underruns.
  2273. */
  2274. if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
  2275. WARN_ON(wm_lp != 1);
  2276. results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
  2277. } else
  2278. results->wm_lp_spr[wm_lp - 1] = r->spr_val;
  2279. }
  2280. /* LP0 register values */
  2281. for_each_intel_crtc(dev, intel_crtc) {
  2282. enum pipe pipe = intel_crtc->pipe;
  2283. const struct intel_wm_level *r =
  2284. &intel_crtc->wm.active.wm[0];
  2285. if (WARN_ON(!r->enable))
  2286. continue;
  2287. results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
  2288. results->wm_pipe[pipe] =
  2289. (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
  2290. (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
  2291. r->cur_val;
  2292. }
  2293. }
  2294. /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  2295. * case both are at the same level. Prefer r1 in case they're the same. */
  2296. static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
  2297. struct intel_pipe_wm *r1,
  2298. struct intel_pipe_wm *r2)
  2299. {
  2300. int level, max_level = ilk_wm_max_level(dev);
  2301. int level1 = 0, level2 = 0;
  2302. for (level = 1; level <= max_level; level++) {
  2303. if (r1->wm[level].enable)
  2304. level1 = level;
  2305. if (r2->wm[level].enable)
  2306. level2 = level;
  2307. }
  2308. if (level1 == level2) {
  2309. if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
  2310. return r2;
  2311. else
  2312. return r1;
  2313. } else if (level1 > level2) {
  2314. return r1;
  2315. } else {
  2316. return r2;
  2317. }
  2318. }
  2319. /* dirty bits used to track which watermarks need changes */
  2320. #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
  2321. #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
  2322. #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
  2323. #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
  2324. #define WM_DIRTY_FBC (1 << 24)
  2325. #define WM_DIRTY_DDB (1 << 25)
  2326. static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
  2327. const struct ilk_wm_values *old,
  2328. const struct ilk_wm_values *new)
  2329. {
  2330. unsigned int dirty = 0;
  2331. enum pipe pipe;
  2332. int wm_lp;
  2333. for_each_pipe(dev_priv, pipe) {
  2334. if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
  2335. dirty |= WM_DIRTY_LINETIME(pipe);
  2336. /* Must disable LP1+ watermarks too */
  2337. dirty |= WM_DIRTY_LP_ALL;
  2338. }
  2339. if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
  2340. dirty |= WM_DIRTY_PIPE(pipe);
  2341. /* Must disable LP1+ watermarks too */
  2342. dirty |= WM_DIRTY_LP_ALL;
  2343. }
  2344. }
  2345. if (old->enable_fbc_wm != new->enable_fbc_wm) {
  2346. dirty |= WM_DIRTY_FBC;
  2347. /* Must disable LP1+ watermarks too */
  2348. dirty |= WM_DIRTY_LP_ALL;
  2349. }
  2350. if (old->partitioning != new->partitioning) {
  2351. dirty |= WM_DIRTY_DDB;
  2352. /* Must disable LP1+ watermarks too */
  2353. dirty |= WM_DIRTY_LP_ALL;
  2354. }
  2355. /* LP1+ watermarks already deemed dirty, no need to continue */
  2356. if (dirty & WM_DIRTY_LP_ALL)
  2357. return dirty;
  2358. /* Find the lowest numbered LP1+ watermark in need of an update... */
  2359. for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
  2360. if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
  2361. old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
  2362. break;
  2363. }
  2364. /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
  2365. for (; wm_lp <= 3; wm_lp++)
  2366. dirty |= WM_DIRTY_LP(wm_lp);
  2367. return dirty;
  2368. }
  2369. static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
  2370. unsigned int dirty)
  2371. {
  2372. struct ilk_wm_values *previous = &dev_priv->wm.hw;
  2373. bool changed = false;
  2374. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
  2375. previous->wm_lp[2] &= ~WM1_LP_SR_EN;
  2376. I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
  2377. changed = true;
  2378. }
  2379. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
  2380. previous->wm_lp[1] &= ~WM1_LP_SR_EN;
  2381. I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
  2382. changed = true;
  2383. }
  2384. if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
  2385. previous->wm_lp[0] &= ~WM1_LP_SR_EN;
  2386. I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
  2387. changed = true;
  2388. }
  2389. /*
  2390. * Don't touch WM1S_LP_EN here.
  2391. * Doing so could cause underruns.
  2392. */
  2393. return changed;
  2394. }
  2395. /*
  2396. * The spec says we shouldn't write when we don't need, because every write
  2397. * causes WMs to be re-evaluated, expending some power.
  2398. */
  2399. static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
  2400. struct ilk_wm_values *results)
  2401. {
  2402. struct drm_device *dev = dev_priv->dev;
  2403. struct ilk_wm_values *previous = &dev_priv->wm.hw;
  2404. unsigned int dirty;
  2405. uint32_t val;
  2406. dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
  2407. if (!dirty)
  2408. return;
  2409. _ilk_disable_lp_wm(dev_priv, dirty);
  2410. if (dirty & WM_DIRTY_PIPE(PIPE_A))
  2411. I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
  2412. if (dirty & WM_DIRTY_PIPE(PIPE_B))
  2413. I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
  2414. if (dirty & WM_DIRTY_PIPE(PIPE_C))
  2415. I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
  2416. if (dirty & WM_DIRTY_LINETIME(PIPE_A))
  2417. I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
  2418. if (dirty & WM_DIRTY_LINETIME(PIPE_B))
  2419. I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
  2420. if (dirty & WM_DIRTY_LINETIME(PIPE_C))
  2421. I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
  2422. if (dirty & WM_DIRTY_DDB) {
  2423. if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  2424. val = I915_READ(WM_MISC);
  2425. if (results->partitioning == INTEL_DDB_PART_1_2)
  2426. val &= ~WM_MISC_DATA_PARTITION_5_6;
  2427. else
  2428. val |= WM_MISC_DATA_PARTITION_5_6;
  2429. I915_WRITE(WM_MISC, val);
  2430. } else {
  2431. val = I915_READ(DISP_ARB_CTL2);
  2432. if (results->partitioning == INTEL_DDB_PART_1_2)
  2433. val &= ~DISP_DATA_PARTITION_5_6;
  2434. else
  2435. val |= DISP_DATA_PARTITION_5_6;
  2436. I915_WRITE(DISP_ARB_CTL2, val);
  2437. }
  2438. }
  2439. if (dirty & WM_DIRTY_FBC) {
  2440. val = I915_READ(DISP_ARB_CTL);
  2441. if (results->enable_fbc_wm)
  2442. val &= ~DISP_FBC_WM_DIS;
  2443. else
  2444. val |= DISP_FBC_WM_DIS;
  2445. I915_WRITE(DISP_ARB_CTL, val);
  2446. }
  2447. if (dirty & WM_DIRTY_LP(1) &&
  2448. previous->wm_lp_spr[0] != results->wm_lp_spr[0])
  2449. I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
  2450. if (INTEL_INFO(dev)->gen >= 7) {
  2451. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
  2452. I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
  2453. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
  2454. I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
  2455. }
  2456. if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
  2457. I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
  2458. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
  2459. I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
  2460. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
  2461. I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
  2462. dev_priv->wm.hw = *results;
  2463. }
  2464. static bool ilk_disable_lp_wm(struct drm_device *dev)
  2465. {
  2466. struct drm_i915_private *dev_priv = dev->dev_private;
  2467. return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
  2468. }
  2469. static void ilk_update_wm(struct drm_crtc *crtc)
  2470. {
  2471. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2472. struct drm_device *dev = crtc->dev;
  2473. struct drm_i915_private *dev_priv = dev->dev_private;
  2474. struct ilk_wm_maximums max;
  2475. struct ilk_pipe_wm_parameters params = {};
  2476. struct ilk_wm_values results = {};
  2477. enum intel_ddb_partitioning partitioning;
  2478. struct intel_pipe_wm pipe_wm = {};
  2479. struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
  2480. struct intel_wm_config config = {};
  2481. ilk_compute_wm_parameters(crtc, &params);
  2482. intel_compute_pipe_wm(crtc, &params, &pipe_wm);
  2483. if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
  2484. return;
  2485. intel_crtc->wm.active = pipe_wm;
  2486. ilk_compute_wm_config(dev, &config);
  2487. ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
  2488. ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
  2489. /* 5/6 split only in single pipe config on IVB+ */
  2490. if (INTEL_INFO(dev)->gen >= 7 &&
  2491. config.num_pipes_active == 1 && config.sprites_enabled) {
  2492. ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
  2493. ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
  2494. best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
  2495. } else {
  2496. best_lp_wm = &lp_wm_1_2;
  2497. }
  2498. partitioning = (best_lp_wm == &lp_wm_1_2) ?
  2499. INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
  2500. ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
  2501. ilk_write_wm_values(dev_priv, &results);
  2502. }
  2503. static void
  2504. ilk_update_sprite_wm(struct drm_plane *plane,
  2505. struct drm_crtc *crtc,
  2506. uint32_t sprite_width, uint32_t sprite_height,
  2507. int pixel_size, bool enabled, bool scaled)
  2508. {
  2509. struct drm_device *dev = plane->dev;
  2510. struct intel_plane *intel_plane = to_intel_plane(plane);
  2511. intel_plane->wm.enabled = enabled;
  2512. intel_plane->wm.scaled = scaled;
  2513. intel_plane->wm.horiz_pixels = sprite_width;
  2514. intel_plane->wm.vert_pixels = sprite_width;
  2515. intel_plane->wm.bytes_per_pixel = pixel_size;
  2516. /*
  2517. * IVB workaround: must disable low power watermarks for at least
  2518. * one frame before enabling scaling. LP watermarks can be re-enabled
  2519. * when scaling is disabled.
  2520. *
  2521. * WaCxSRDisabledForSpriteScaling:ivb
  2522. */
  2523. if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
  2524. intel_wait_for_vblank(dev, intel_plane->pipe);
  2525. ilk_update_wm(crtc);
  2526. }
  2527. static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
  2528. {
  2529. struct drm_device *dev = crtc->dev;
  2530. struct drm_i915_private *dev_priv = dev->dev_private;
  2531. struct ilk_wm_values *hw = &dev_priv->wm.hw;
  2532. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2533. struct intel_pipe_wm *active = &intel_crtc->wm.active;
  2534. enum pipe pipe = intel_crtc->pipe;
  2535. static const unsigned int wm0_pipe_reg[] = {
  2536. [PIPE_A] = WM0_PIPEA_ILK,
  2537. [PIPE_B] = WM0_PIPEB_ILK,
  2538. [PIPE_C] = WM0_PIPEC_IVB,
  2539. };
  2540. hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
  2541. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2542. hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
  2543. active->pipe_enabled = intel_crtc_active(crtc);
  2544. if (active->pipe_enabled) {
  2545. u32 tmp = hw->wm_pipe[pipe];
  2546. /*
  2547. * For active pipes LP0 watermark is marked as
  2548. * enabled, and LP1+ watermaks as disabled since
  2549. * we can't really reverse compute them in case
  2550. * multiple pipes are active.
  2551. */
  2552. active->wm[0].enable = true;
  2553. active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
  2554. active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
  2555. active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
  2556. active->linetime = hw->wm_linetime[pipe];
  2557. } else {
  2558. int level, max_level = ilk_wm_max_level(dev);
  2559. /*
  2560. * For inactive pipes, all watermark levels
  2561. * should be marked as enabled but zeroed,
  2562. * which is what we'd compute them to.
  2563. */
  2564. for (level = 0; level <= max_level; level++)
  2565. active->wm[level].enable = true;
  2566. }
  2567. }
  2568. void ilk_wm_get_hw_state(struct drm_device *dev)
  2569. {
  2570. struct drm_i915_private *dev_priv = dev->dev_private;
  2571. struct ilk_wm_values *hw = &dev_priv->wm.hw;
  2572. struct drm_crtc *crtc;
  2573. for_each_crtc(dev, crtc)
  2574. ilk_pipe_wm_get_hw_state(crtc);
  2575. hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
  2576. hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
  2577. hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
  2578. hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
  2579. if (INTEL_INFO(dev)->gen >= 7) {
  2580. hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
  2581. hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
  2582. }
  2583. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2584. hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
  2585. INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
  2586. else if (IS_IVYBRIDGE(dev))
  2587. hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
  2588. INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
  2589. hw->enable_fbc_wm =
  2590. !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
  2591. }
  2592. /**
  2593. * intel_update_watermarks - update FIFO watermark values based on current modes
  2594. *
  2595. * Calculate watermark values for the various WM regs based on current mode
  2596. * and plane configuration.
  2597. *
  2598. * There are several cases to deal with here:
  2599. * - normal (i.e. non-self-refresh)
  2600. * - self-refresh (SR) mode
  2601. * - lines are large relative to FIFO size (buffer can hold up to 2)
  2602. * - lines are small relative to FIFO size (buffer can hold more than 2
  2603. * lines), so need to account for TLB latency
  2604. *
  2605. * The normal calculation is:
  2606. * watermark = dotclock * bytes per pixel * latency
  2607. * where latency is platform & configuration dependent (we assume pessimal
  2608. * values here).
  2609. *
  2610. * The SR calculation is:
  2611. * watermark = (trunc(latency/line time)+1) * surface width *
  2612. * bytes per pixel
  2613. * where
  2614. * line time = htotal / dotclock
  2615. * surface width = hdisplay for normal plane and 64 for cursor
  2616. * and latency is assumed to be high, as above.
  2617. *
  2618. * The final value programmed to the register should always be rounded up,
  2619. * and include an extra 2 entries to account for clock crossings.
  2620. *
  2621. * We don't use the sprite, so we can ignore that. And on Crestline we have
  2622. * to set the non-SR watermarks to 8.
  2623. */
  2624. void intel_update_watermarks(struct drm_crtc *crtc)
  2625. {
  2626. struct drm_i915_private *dev_priv = crtc->dev->dev_private;
  2627. if (dev_priv->display.update_wm)
  2628. dev_priv->display.update_wm(crtc);
  2629. }
  2630. void intel_update_sprite_watermarks(struct drm_plane *plane,
  2631. struct drm_crtc *crtc,
  2632. uint32_t sprite_width,
  2633. uint32_t sprite_height,
  2634. int pixel_size,
  2635. bool enabled, bool scaled)
  2636. {
  2637. struct drm_i915_private *dev_priv = plane->dev->dev_private;
  2638. if (dev_priv->display.update_sprite_wm)
  2639. dev_priv->display.update_sprite_wm(plane, crtc,
  2640. sprite_width, sprite_height,
  2641. pixel_size, enabled, scaled);
  2642. }
  2643. static struct drm_i915_gem_object *
  2644. intel_alloc_context_page(struct drm_device *dev)
  2645. {
  2646. struct drm_i915_gem_object *ctx;
  2647. int ret;
  2648. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  2649. ctx = i915_gem_alloc_object(dev, 4096);
  2650. if (!ctx) {
  2651. DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
  2652. return NULL;
  2653. }
  2654. ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
  2655. if (ret) {
  2656. DRM_ERROR("failed to pin power context: %d\n", ret);
  2657. goto err_unref;
  2658. }
  2659. ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
  2660. if (ret) {
  2661. DRM_ERROR("failed to set-domain on power context: %d\n", ret);
  2662. goto err_unpin;
  2663. }
  2664. return ctx;
  2665. err_unpin:
  2666. i915_gem_object_ggtt_unpin(ctx);
  2667. err_unref:
  2668. drm_gem_object_unreference(&ctx->base);
  2669. return NULL;
  2670. }
  2671. /**
  2672. * Lock protecting IPS related data structures
  2673. */
  2674. DEFINE_SPINLOCK(mchdev_lock);
  2675. /* Global for IPS driver to get at the current i915 device. Protected by
  2676. * mchdev_lock. */
  2677. static struct drm_i915_private *i915_mch_dev;
  2678. bool ironlake_set_drps(struct drm_device *dev, u8 val)
  2679. {
  2680. struct drm_i915_private *dev_priv = dev->dev_private;
  2681. u16 rgvswctl;
  2682. assert_spin_locked(&mchdev_lock);
  2683. rgvswctl = I915_READ16(MEMSWCTL);
  2684. if (rgvswctl & MEMCTL_CMD_STS) {
  2685. DRM_DEBUG("gpu busy, RCS change rejected\n");
  2686. return false; /* still busy with another command */
  2687. }
  2688. rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
  2689. (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
  2690. I915_WRITE16(MEMSWCTL, rgvswctl);
  2691. POSTING_READ16(MEMSWCTL);
  2692. rgvswctl |= MEMCTL_CMD_STS;
  2693. I915_WRITE16(MEMSWCTL, rgvswctl);
  2694. return true;
  2695. }
  2696. static void ironlake_enable_drps(struct drm_device *dev)
  2697. {
  2698. struct drm_i915_private *dev_priv = dev->dev_private;
  2699. u32 rgvmodectl = I915_READ(MEMMODECTL);
  2700. u8 fmax, fmin, fstart, vstart;
  2701. spin_lock_irq(&mchdev_lock);
  2702. /* Enable temp reporting */
  2703. I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
  2704. I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
  2705. /* 100ms RC evaluation intervals */
  2706. I915_WRITE(RCUPEI, 100000);
  2707. I915_WRITE(RCDNEI, 100000);
  2708. /* Set max/min thresholds to 90ms and 80ms respectively */
  2709. I915_WRITE(RCBMAXAVG, 90000);
  2710. I915_WRITE(RCBMINAVG, 80000);
  2711. I915_WRITE(MEMIHYST, 1);
  2712. /* Set up min, max, and cur for interrupt handling */
  2713. fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
  2714. fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
  2715. fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
  2716. MEMMODE_FSTART_SHIFT;
  2717. vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
  2718. PXVFREQ_PX_SHIFT;
  2719. dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
  2720. dev_priv->ips.fstart = fstart;
  2721. dev_priv->ips.max_delay = fstart;
  2722. dev_priv->ips.min_delay = fmin;
  2723. dev_priv->ips.cur_delay = fstart;
  2724. DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
  2725. fmax, fmin, fstart);
  2726. I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
  2727. /*
  2728. * Interrupts will be enabled in ironlake_irq_postinstall
  2729. */
  2730. I915_WRITE(VIDSTART, vstart);
  2731. POSTING_READ(VIDSTART);
  2732. rgvmodectl |= MEMMODE_SWMODE_EN;
  2733. I915_WRITE(MEMMODECTL, rgvmodectl);
  2734. if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
  2735. DRM_ERROR("stuck trying to change perf mode\n");
  2736. mdelay(1);
  2737. ironlake_set_drps(dev, fstart);
  2738. dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
  2739. I915_READ(0x112e0);
  2740. dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
  2741. dev_priv->ips.last_count2 = I915_READ(0x112f4);
  2742. dev_priv->ips.last_time2 = ktime_get_raw_ns();
  2743. spin_unlock_irq(&mchdev_lock);
  2744. }
  2745. static void ironlake_disable_drps(struct drm_device *dev)
  2746. {
  2747. struct drm_i915_private *dev_priv = dev->dev_private;
  2748. u16 rgvswctl;
  2749. spin_lock_irq(&mchdev_lock);
  2750. rgvswctl = I915_READ16(MEMSWCTL);
  2751. /* Ack interrupts, disable EFC interrupt */
  2752. I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
  2753. I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
  2754. I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
  2755. I915_WRITE(DEIIR, DE_PCU_EVENT);
  2756. I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
  2757. /* Go back to the starting frequency */
  2758. ironlake_set_drps(dev, dev_priv->ips.fstart);
  2759. mdelay(1);
  2760. rgvswctl |= MEMCTL_CMD_STS;
  2761. I915_WRITE(MEMSWCTL, rgvswctl);
  2762. mdelay(1);
  2763. spin_unlock_irq(&mchdev_lock);
  2764. }
  2765. /* There's a funny hw issue where the hw returns all 0 when reading from
  2766. * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
  2767. * ourselves, instead of doing a rmw cycle (which might result in us clearing
  2768. * all limits and the gpu stuck at whatever frequency it is at atm).
  2769. */
  2770. static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
  2771. {
  2772. u32 limits;
  2773. /* Only set the down limit when we've reached the lowest level to avoid
  2774. * getting more interrupts, otherwise leave this clear. This prevents a
  2775. * race in the hw when coming out of rc6: There's a tiny window where
  2776. * the hw runs at the minimal clock before selecting the desired
  2777. * frequency, if the down threshold expires in that window we will not
  2778. * receive a down interrupt. */
  2779. limits = dev_priv->rps.max_freq_softlimit << 24;
  2780. if (val <= dev_priv->rps.min_freq_softlimit)
  2781. limits |= dev_priv->rps.min_freq_softlimit << 16;
  2782. return limits;
  2783. }
  2784. static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
  2785. {
  2786. int new_power;
  2787. new_power = dev_priv->rps.power;
  2788. switch (dev_priv->rps.power) {
  2789. case LOW_POWER:
  2790. if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
  2791. new_power = BETWEEN;
  2792. break;
  2793. case BETWEEN:
  2794. if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
  2795. new_power = LOW_POWER;
  2796. else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
  2797. new_power = HIGH_POWER;
  2798. break;
  2799. case HIGH_POWER:
  2800. if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
  2801. new_power = BETWEEN;
  2802. break;
  2803. }
  2804. /* Max/min bins are special */
  2805. if (val == dev_priv->rps.min_freq_softlimit)
  2806. new_power = LOW_POWER;
  2807. if (val == dev_priv->rps.max_freq_softlimit)
  2808. new_power = HIGH_POWER;
  2809. if (new_power == dev_priv->rps.power)
  2810. return;
  2811. /* Note the units here are not exactly 1us, but 1280ns. */
  2812. switch (new_power) {
  2813. case LOW_POWER:
  2814. /* Upclock if more than 95% busy over 16ms */
  2815. I915_WRITE(GEN6_RP_UP_EI, 12500);
  2816. I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
  2817. /* Downclock if less than 85% busy over 32ms */
  2818. I915_WRITE(GEN6_RP_DOWN_EI, 25000);
  2819. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
  2820. I915_WRITE(GEN6_RP_CONTROL,
  2821. GEN6_RP_MEDIA_TURBO |
  2822. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  2823. GEN6_RP_MEDIA_IS_GFX |
  2824. GEN6_RP_ENABLE |
  2825. GEN6_RP_UP_BUSY_AVG |
  2826. GEN6_RP_DOWN_IDLE_AVG);
  2827. break;
  2828. case BETWEEN:
  2829. /* Upclock if more than 90% busy over 13ms */
  2830. I915_WRITE(GEN6_RP_UP_EI, 10250);
  2831. I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
  2832. /* Downclock if less than 75% busy over 32ms */
  2833. I915_WRITE(GEN6_RP_DOWN_EI, 25000);
  2834. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
  2835. I915_WRITE(GEN6_RP_CONTROL,
  2836. GEN6_RP_MEDIA_TURBO |
  2837. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  2838. GEN6_RP_MEDIA_IS_GFX |
  2839. GEN6_RP_ENABLE |
  2840. GEN6_RP_UP_BUSY_AVG |
  2841. GEN6_RP_DOWN_IDLE_AVG);
  2842. break;
  2843. case HIGH_POWER:
  2844. /* Upclock if more than 85% busy over 10ms */
  2845. I915_WRITE(GEN6_RP_UP_EI, 8000);
  2846. I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
  2847. /* Downclock if less than 60% busy over 32ms */
  2848. I915_WRITE(GEN6_RP_DOWN_EI, 25000);
  2849. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
  2850. I915_WRITE(GEN6_RP_CONTROL,
  2851. GEN6_RP_MEDIA_TURBO |
  2852. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  2853. GEN6_RP_MEDIA_IS_GFX |
  2854. GEN6_RP_ENABLE |
  2855. GEN6_RP_UP_BUSY_AVG |
  2856. GEN6_RP_DOWN_IDLE_AVG);
  2857. break;
  2858. }
  2859. dev_priv->rps.power = new_power;
  2860. dev_priv->rps.last_adj = 0;
  2861. }
  2862. static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
  2863. {
  2864. u32 mask = 0;
  2865. if (val > dev_priv->rps.min_freq_softlimit)
  2866. mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
  2867. if (val < dev_priv->rps.max_freq_softlimit)
  2868. mask |= GEN6_PM_RP_UP_THRESHOLD;
  2869. mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
  2870. mask &= dev_priv->pm_rps_events;
  2871. /* IVB and SNB hard hangs on looping batchbuffer
  2872. * if GEN6_PM_UP_EI_EXPIRED is masked.
  2873. */
  2874. if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
  2875. mask |= GEN6_PM_RP_UP_EI_EXPIRED;
  2876. if (IS_GEN8(dev_priv->dev))
  2877. mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
  2878. return ~mask;
  2879. }
  2880. /* gen6_set_rps is called to update the frequency request, but should also be
  2881. * called when the range (min_delay and max_delay) is modified so that we can
  2882. * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
  2883. void gen6_set_rps(struct drm_device *dev, u8 val)
  2884. {
  2885. struct drm_i915_private *dev_priv = dev->dev_private;
  2886. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  2887. WARN_ON(val > dev_priv->rps.max_freq_softlimit);
  2888. WARN_ON(val < dev_priv->rps.min_freq_softlimit);
  2889. /* min/max delay may still have been modified so be sure to
  2890. * write the limits value.
  2891. */
  2892. if (val != dev_priv->rps.cur_freq) {
  2893. gen6_set_rps_thresholds(dev_priv, val);
  2894. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2895. I915_WRITE(GEN6_RPNSWREQ,
  2896. HSW_FREQUENCY(val));
  2897. else
  2898. I915_WRITE(GEN6_RPNSWREQ,
  2899. GEN6_FREQUENCY(val) |
  2900. GEN6_OFFSET(0) |
  2901. GEN6_AGGRESSIVE_TURBO);
  2902. }
  2903. /* Make sure we continue to get interrupts
  2904. * until we hit the minimum or maximum frequencies.
  2905. */
  2906. I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
  2907. I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
  2908. POSTING_READ(GEN6_RPNSWREQ);
  2909. dev_priv->rps.cur_freq = val;
  2910. trace_intel_gpu_freq_change(val * 50);
  2911. }
  2912. /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
  2913. *
  2914. * * If Gfx is Idle, then
  2915. * 1. Mask Turbo interrupts
  2916. * 2. Bring up Gfx clock
  2917. * 3. Change the freq to Rpn and wait till P-Unit updates freq
  2918. * 4. Clear the Force GFX CLK ON bit so that Gfx can down
  2919. * 5. Unmask Turbo interrupts
  2920. */
  2921. static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
  2922. {
  2923. struct drm_device *dev = dev_priv->dev;
  2924. /* Latest VLV doesn't need to force the gfx clock */
  2925. if (dev->pdev->revision >= 0xd) {
  2926. valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
  2927. return;
  2928. }
  2929. /*
  2930. * When we are idle. Drop to min voltage state.
  2931. */
  2932. if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
  2933. return;
  2934. /* Mask turbo interrupt so that they will not come in between */
  2935. I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
  2936. vlv_force_gfx_clock(dev_priv, true);
  2937. dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
  2938. vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
  2939. dev_priv->rps.min_freq_softlimit);
  2940. if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
  2941. & GENFREQSTATUS) == 0, 5))
  2942. DRM_ERROR("timed out waiting for Punit\n");
  2943. vlv_force_gfx_clock(dev_priv, false);
  2944. I915_WRITE(GEN6_PMINTRMSK,
  2945. gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
  2946. }
  2947. void gen6_rps_idle(struct drm_i915_private *dev_priv)
  2948. {
  2949. struct drm_device *dev = dev_priv->dev;
  2950. mutex_lock(&dev_priv->rps.hw_lock);
  2951. if (dev_priv->rps.enabled) {
  2952. if (IS_CHERRYVIEW(dev))
  2953. valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
  2954. else if (IS_VALLEYVIEW(dev))
  2955. vlv_set_rps_idle(dev_priv);
  2956. else
  2957. gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
  2958. dev_priv->rps.last_adj = 0;
  2959. }
  2960. mutex_unlock(&dev_priv->rps.hw_lock);
  2961. }
  2962. void gen6_rps_boost(struct drm_i915_private *dev_priv)
  2963. {
  2964. struct drm_device *dev = dev_priv->dev;
  2965. mutex_lock(&dev_priv->rps.hw_lock);
  2966. if (dev_priv->rps.enabled) {
  2967. if (IS_VALLEYVIEW(dev))
  2968. valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
  2969. else
  2970. gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
  2971. dev_priv->rps.last_adj = 0;
  2972. }
  2973. mutex_unlock(&dev_priv->rps.hw_lock);
  2974. }
  2975. void valleyview_set_rps(struct drm_device *dev, u8 val)
  2976. {
  2977. struct drm_i915_private *dev_priv = dev->dev_private;
  2978. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  2979. WARN_ON(val > dev_priv->rps.max_freq_softlimit);
  2980. WARN_ON(val < dev_priv->rps.min_freq_softlimit);
  2981. if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
  2982. "Odd GPU freq value\n"))
  2983. val &= ~1;
  2984. if (val != dev_priv->rps.cur_freq) {
  2985. DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
  2986. vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
  2987. dev_priv->rps.cur_freq,
  2988. vlv_gpu_freq(dev_priv, val), val);
  2989. vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
  2990. }
  2991. I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
  2992. dev_priv->rps.cur_freq = val;
  2993. trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
  2994. }
  2995. static void gen8_disable_rps_interrupts(struct drm_device *dev)
  2996. {
  2997. struct drm_i915_private *dev_priv = dev->dev_private;
  2998. I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
  2999. I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
  3000. ~dev_priv->pm_rps_events);
  3001. /* Complete PM interrupt masking here doesn't race with the rps work
  3002. * item again unmasking PM interrupts because that is using a different
  3003. * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
  3004. * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
  3005. * gen8_enable_rps will clean up. */
  3006. spin_lock_irq(&dev_priv->irq_lock);
  3007. dev_priv->rps.pm_iir = 0;
  3008. spin_unlock_irq(&dev_priv->irq_lock);
  3009. I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
  3010. }
  3011. static void gen6_disable_rps_interrupts(struct drm_device *dev)
  3012. {
  3013. struct drm_i915_private *dev_priv = dev->dev_private;
  3014. I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
  3015. I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
  3016. ~dev_priv->pm_rps_events);
  3017. /* Complete PM interrupt masking here doesn't race with the rps work
  3018. * item again unmasking PM interrupts because that is using a different
  3019. * register (PMIMR) to mask PM interrupts. The only risk is in leaving
  3020. * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
  3021. spin_lock_irq(&dev_priv->irq_lock);
  3022. dev_priv->rps.pm_iir = 0;
  3023. spin_unlock_irq(&dev_priv->irq_lock);
  3024. I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
  3025. }
  3026. static void gen6_disable_rps(struct drm_device *dev)
  3027. {
  3028. struct drm_i915_private *dev_priv = dev->dev_private;
  3029. I915_WRITE(GEN6_RC_CONTROL, 0);
  3030. I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
  3031. if (IS_BROADWELL(dev))
  3032. gen8_disable_rps_interrupts(dev);
  3033. else
  3034. gen6_disable_rps_interrupts(dev);
  3035. }
  3036. static void cherryview_disable_rps(struct drm_device *dev)
  3037. {
  3038. struct drm_i915_private *dev_priv = dev->dev_private;
  3039. I915_WRITE(GEN6_RC_CONTROL, 0);
  3040. gen8_disable_rps_interrupts(dev);
  3041. }
  3042. static void valleyview_disable_rps(struct drm_device *dev)
  3043. {
  3044. struct drm_i915_private *dev_priv = dev->dev_private;
  3045. /* we're doing forcewake before Disabling RC6,
  3046. * This what the BIOS expects when going into suspend */
  3047. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  3048. I915_WRITE(GEN6_RC_CONTROL, 0);
  3049. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3050. gen6_disable_rps_interrupts(dev);
  3051. }
  3052. static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
  3053. {
  3054. if (IS_VALLEYVIEW(dev)) {
  3055. if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
  3056. mode = GEN6_RC_CTL_RC6_ENABLE;
  3057. else
  3058. mode = 0;
  3059. }
  3060. if (HAS_RC6p(dev))
  3061. DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
  3062. (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
  3063. (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
  3064. (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
  3065. else
  3066. DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
  3067. (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
  3068. }
  3069. static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
  3070. {
  3071. /* No RC6 before Ironlake */
  3072. if (INTEL_INFO(dev)->gen < 5)
  3073. return 0;
  3074. /* RC6 is only on Ironlake mobile not on desktop */
  3075. if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
  3076. return 0;
  3077. /* Respect the kernel parameter if it is set */
  3078. if (enable_rc6 >= 0) {
  3079. int mask;
  3080. if (HAS_RC6p(dev))
  3081. mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
  3082. INTEL_RC6pp_ENABLE;
  3083. else
  3084. mask = INTEL_RC6_ENABLE;
  3085. if ((enable_rc6 & mask) != enable_rc6)
  3086. DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
  3087. enable_rc6 & mask, enable_rc6, mask);
  3088. return enable_rc6 & mask;
  3089. }
  3090. /* Disable RC6 on Ironlake */
  3091. if (INTEL_INFO(dev)->gen == 5)
  3092. return 0;
  3093. if (IS_IVYBRIDGE(dev))
  3094. return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
  3095. return INTEL_RC6_ENABLE;
  3096. }
  3097. int intel_enable_rc6(const struct drm_device *dev)
  3098. {
  3099. return i915.enable_rc6;
  3100. }
  3101. static void gen8_enable_rps_interrupts(struct drm_device *dev)
  3102. {
  3103. struct drm_i915_private *dev_priv = dev->dev_private;
  3104. spin_lock_irq(&dev_priv->irq_lock);
  3105. WARN_ON(dev_priv->rps.pm_iir);
  3106. gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  3107. I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
  3108. spin_unlock_irq(&dev_priv->irq_lock);
  3109. }
  3110. static void gen6_enable_rps_interrupts(struct drm_device *dev)
  3111. {
  3112. struct drm_i915_private *dev_priv = dev->dev_private;
  3113. spin_lock_irq(&dev_priv->irq_lock);
  3114. WARN_ON(dev_priv->rps.pm_iir);
  3115. gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  3116. I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
  3117. spin_unlock_irq(&dev_priv->irq_lock);
  3118. }
  3119. static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
  3120. {
  3121. /* All of these values are in units of 50MHz */
  3122. dev_priv->rps.cur_freq = 0;
  3123. /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
  3124. dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
  3125. dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
  3126. dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
  3127. /* XXX: only BYT has a special efficient freq */
  3128. dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
  3129. /* hw_max = RP0 until we check for overclocking */
  3130. dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
  3131. /* Preserve min/max settings in case of re-init */
  3132. if (dev_priv->rps.max_freq_softlimit == 0)
  3133. dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
  3134. if (dev_priv->rps.min_freq_softlimit == 0)
  3135. dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
  3136. }
  3137. static void gen8_enable_rps(struct drm_device *dev)
  3138. {
  3139. struct drm_i915_private *dev_priv = dev->dev_private;
  3140. struct intel_engine_cs *ring;
  3141. uint32_t rc6_mask = 0, rp_state_cap;
  3142. int unused;
  3143. /* 1a: Software RC state - RC0 */
  3144. I915_WRITE(GEN6_RC_STATE, 0);
  3145. /* 1c & 1d: Get forcewake during program sequence. Although the driver
  3146. * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
  3147. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  3148. /* 2a: Disable RC states. */
  3149. I915_WRITE(GEN6_RC_CONTROL, 0);
  3150. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  3151. parse_rp_state_cap(dev_priv, rp_state_cap);
  3152. /* 2b: Program RC6 thresholds.*/
  3153. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
  3154. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
  3155. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
  3156. for_each_ring(ring, dev_priv, unused)
  3157. I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  3158. I915_WRITE(GEN6_RC_SLEEP, 0);
  3159. if (IS_BROADWELL(dev))
  3160. I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
  3161. else
  3162. I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
  3163. /* 3: Enable RC6 */
  3164. if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
  3165. rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
  3166. intel_print_rc6_info(dev, rc6_mask);
  3167. if (IS_BROADWELL(dev))
  3168. I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
  3169. GEN7_RC_CTL_TO_MODE |
  3170. rc6_mask);
  3171. else
  3172. I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
  3173. GEN6_RC_CTL_EI_MODE(1) |
  3174. rc6_mask);
  3175. /* 4 Program defaults and thresholds for RPS*/
  3176. I915_WRITE(GEN6_RPNSWREQ,
  3177. HSW_FREQUENCY(dev_priv->rps.rp1_freq));
  3178. I915_WRITE(GEN6_RC_VIDEO_FREQ,
  3179. HSW_FREQUENCY(dev_priv->rps.rp1_freq));
  3180. /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
  3181. I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
  3182. /* Docs recommend 900MHz, and 300 MHz respectively */
  3183. I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
  3184. dev_priv->rps.max_freq_softlimit << 24 |
  3185. dev_priv->rps.min_freq_softlimit << 16);
  3186. I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
  3187. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
  3188. I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
  3189. I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
  3190. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  3191. /* 5: Enable RPS */
  3192. I915_WRITE(GEN6_RP_CONTROL,
  3193. GEN6_RP_MEDIA_TURBO |
  3194. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  3195. GEN6_RP_MEDIA_IS_GFX |
  3196. GEN6_RP_ENABLE |
  3197. GEN6_RP_UP_BUSY_AVG |
  3198. GEN6_RP_DOWN_IDLE_AVG);
  3199. /* 6: Ring frequency + overclocking (our driver does this later */
  3200. gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
  3201. gen8_enable_rps_interrupts(dev);
  3202. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3203. }
  3204. static void gen6_enable_rps(struct drm_device *dev)
  3205. {
  3206. struct drm_i915_private *dev_priv = dev->dev_private;
  3207. struct intel_engine_cs *ring;
  3208. u32 rp_state_cap;
  3209. u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
  3210. u32 gtfifodbg;
  3211. int rc6_mode;
  3212. int i, ret;
  3213. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  3214. /* Here begins a magic sequence of register writes to enable
  3215. * auto-downclocking.
  3216. *
  3217. * Perhaps there might be some value in exposing these to
  3218. * userspace...
  3219. */
  3220. I915_WRITE(GEN6_RC_STATE, 0);
  3221. /* Clear the DBG now so we don't confuse earlier errors */
  3222. if ((gtfifodbg = I915_READ(GTFIFODBG))) {
  3223. DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
  3224. I915_WRITE(GTFIFODBG, gtfifodbg);
  3225. }
  3226. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  3227. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  3228. parse_rp_state_cap(dev_priv, rp_state_cap);
  3229. /* disable the counters and set deterministic thresholds */
  3230. I915_WRITE(GEN6_RC_CONTROL, 0);
  3231. I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
  3232. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
  3233. I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
  3234. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
  3235. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
  3236. for_each_ring(ring, dev_priv, i)
  3237. I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  3238. I915_WRITE(GEN6_RC_SLEEP, 0);
  3239. I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
  3240. if (IS_IVYBRIDGE(dev))
  3241. I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
  3242. else
  3243. I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
  3244. I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
  3245. I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
  3246. /* Check if we are enabling RC6 */
  3247. rc6_mode = intel_enable_rc6(dev_priv->dev);
  3248. if (rc6_mode & INTEL_RC6_ENABLE)
  3249. rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
  3250. /* We don't use those on Haswell */
  3251. if (!IS_HASWELL(dev)) {
  3252. if (rc6_mode & INTEL_RC6p_ENABLE)
  3253. rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
  3254. if (rc6_mode & INTEL_RC6pp_ENABLE)
  3255. rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
  3256. }
  3257. intel_print_rc6_info(dev, rc6_mask);
  3258. I915_WRITE(GEN6_RC_CONTROL,
  3259. rc6_mask |
  3260. GEN6_RC_CTL_EI_MODE(1) |
  3261. GEN6_RC_CTL_HW_ENABLE);
  3262. /* Power down if completely idle for over 50ms */
  3263. I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
  3264. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  3265. ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
  3266. if (ret)
  3267. DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
  3268. ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
  3269. if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
  3270. DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
  3271. (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
  3272. (pcu_mbox & 0xff) * 50);
  3273. dev_priv->rps.max_freq = pcu_mbox & 0xff;
  3274. }
  3275. dev_priv->rps.power = HIGH_POWER; /* force a reset */
  3276. gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
  3277. gen6_enable_rps_interrupts(dev);
  3278. rc6vids = 0;
  3279. ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
  3280. if (IS_GEN6(dev) && ret) {
  3281. DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
  3282. } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
  3283. DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
  3284. GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
  3285. rc6vids &= 0xffff00;
  3286. rc6vids |= GEN6_ENCODE_RC6_VID(450);
  3287. ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
  3288. if (ret)
  3289. DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
  3290. }
  3291. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3292. }
  3293. static void __gen6_update_ring_freq(struct drm_device *dev)
  3294. {
  3295. struct drm_i915_private *dev_priv = dev->dev_private;
  3296. int min_freq = 15;
  3297. unsigned int gpu_freq;
  3298. unsigned int max_ia_freq, min_ring_freq;
  3299. int scaling_factor = 180;
  3300. struct cpufreq_policy *policy;
  3301. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  3302. policy = cpufreq_cpu_get(0);
  3303. if (policy) {
  3304. max_ia_freq = policy->cpuinfo.max_freq;
  3305. cpufreq_cpu_put(policy);
  3306. } else {
  3307. /*
  3308. * Default to measured freq if none found, PCU will ensure we
  3309. * don't go over
  3310. */
  3311. max_ia_freq = tsc_khz;
  3312. }
  3313. /* Convert from kHz to MHz */
  3314. max_ia_freq /= 1000;
  3315. min_ring_freq = I915_READ(DCLK) & 0xf;
  3316. /* convert DDR frequency from units of 266.6MHz to bandwidth */
  3317. min_ring_freq = mult_frac(min_ring_freq, 8, 3);
  3318. /*
  3319. * For each potential GPU frequency, load a ring frequency we'd like
  3320. * to use for memory access. We do this by specifying the IA frequency
  3321. * the PCU should use as a reference to determine the ring frequency.
  3322. */
  3323. for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
  3324. gpu_freq--) {
  3325. int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
  3326. unsigned int ia_freq = 0, ring_freq = 0;
  3327. if (INTEL_INFO(dev)->gen >= 8) {
  3328. /* max(2 * GT, DDR). NB: GT is 50MHz units */
  3329. ring_freq = max(min_ring_freq, gpu_freq);
  3330. } else if (IS_HASWELL(dev)) {
  3331. ring_freq = mult_frac(gpu_freq, 5, 4);
  3332. ring_freq = max(min_ring_freq, ring_freq);
  3333. /* leave ia_freq as the default, chosen by cpufreq */
  3334. } else {
  3335. /* On older processors, there is no separate ring
  3336. * clock domain, so in order to boost the bandwidth
  3337. * of the ring, we need to upclock the CPU (ia_freq).
  3338. *
  3339. * For GPU frequencies less than 750MHz,
  3340. * just use the lowest ring freq.
  3341. */
  3342. if (gpu_freq < min_freq)
  3343. ia_freq = 800;
  3344. else
  3345. ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
  3346. ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
  3347. }
  3348. sandybridge_pcode_write(dev_priv,
  3349. GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
  3350. ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
  3351. ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
  3352. gpu_freq);
  3353. }
  3354. }
  3355. void gen6_update_ring_freq(struct drm_device *dev)
  3356. {
  3357. struct drm_i915_private *dev_priv = dev->dev_private;
  3358. if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
  3359. return;
  3360. mutex_lock(&dev_priv->rps.hw_lock);
  3361. __gen6_update_ring_freq(dev);
  3362. mutex_unlock(&dev_priv->rps.hw_lock);
  3363. }
  3364. static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
  3365. {
  3366. u32 val, rp0;
  3367. val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
  3368. rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
  3369. return rp0;
  3370. }
  3371. static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
  3372. {
  3373. u32 val, rpe;
  3374. val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
  3375. rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
  3376. return rpe;
  3377. }
  3378. static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
  3379. {
  3380. u32 val, rp1;
  3381. val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  3382. rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
  3383. return rp1;
  3384. }
  3385. static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
  3386. {
  3387. u32 val, rpn;
  3388. val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
  3389. rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
  3390. return rpn;
  3391. }
  3392. static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
  3393. {
  3394. u32 val, rp1;
  3395. val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
  3396. rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
  3397. return rp1;
  3398. }
  3399. static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
  3400. {
  3401. u32 val, rp0;
  3402. val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
  3403. rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
  3404. /* Clamp to max */
  3405. rp0 = min_t(u32, rp0, 0xea);
  3406. return rp0;
  3407. }
  3408. static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
  3409. {
  3410. u32 val, rpe;
  3411. val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
  3412. rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
  3413. val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
  3414. rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
  3415. return rpe;
  3416. }
  3417. static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
  3418. {
  3419. return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
  3420. }
  3421. /* Check that the pctx buffer wasn't move under us. */
  3422. static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
  3423. {
  3424. unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
  3425. WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
  3426. dev_priv->vlv_pctx->stolen->start);
  3427. }
  3428. /* Check that the pcbr address is not empty. */
  3429. static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
  3430. {
  3431. unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
  3432. WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
  3433. }
  3434. static void cherryview_setup_pctx(struct drm_device *dev)
  3435. {
  3436. struct drm_i915_private *dev_priv = dev->dev_private;
  3437. unsigned long pctx_paddr, paddr;
  3438. struct i915_gtt *gtt = &dev_priv->gtt;
  3439. u32 pcbr;
  3440. int pctx_size = 32*1024;
  3441. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  3442. pcbr = I915_READ(VLV_PCBR);
  3443. if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
  3444. paddr = (dev_priv->mm.stolen_base +
  3445. (gtt->stolen_size - pctx_size));
  3446. pctx_paddr = (paddr & (~4095));
  3447. I915_WRITE(VLV_PCBR, pctx_paddr);
  3448. }
  3449. }
  3450. static void valleyview_setup_pctx(struct drm_device *dev)
  3451. {
  3452. struct drm_i915_private *dev_priv = dev->dev_private;
  3453. struct drm_i915_gem_object *pctx;
  3454. unsigned long pctx_paddr;
  3455. u32 pcbr;
  3456. int pctx_size = 24*1024;
  3457. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  3458. pcbr = I915_READ(VLV_PCBR);
  3459. if (pcbr) {
  3460. /* BIOS set it up already, grab the pre-alloc'd space */
  3461. int pcbr_offset;
  3462. pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
  3463. pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
  3464. pcbr_offset,
  3465. I915_GTT_OFFSET_NONE,
  3466. pctx_size);
  3467. goto out;
  3468. }
  3469. /*
  3470. * From the Gunit register HAS:
  3471. * The Gfx driver is expected to program this register and ensure
  3472. * proper allocation within Gfx stolen memory. For example, this
  3473. * register should be programmed such than the PCBR range does not
  3474. * overlap with other ranges, such as the frame buffer, protected
  3475. * memory, or any other relevant ranges.
  3476. */
  3477. pctx = i915_gem_object_create_stolen(dev, pctx_size);
  3478. if (!pctx) {
  3479. DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
  3480. return;
  3481. }
  3482. pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
  3483. I915_WRITE(VLV_PCBR, pctx_paddr);
  3484. out:
  3485. dev_priv->vlv_pctx = pctx;
  3486. }
  3487. static void valleyview_cleanup_pctx(struct drm_device *dev)
  3488. {
  3489. struct drm_i915_private *dev_priv = dev->dev_private;
  3490. if (WARN_ON(!dev_priv->vlv_pctx))
  3491. return;
  3492. drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
  3493. dev_priv->vlv_pctx = NULL;
  3494. }
  3495. static void valleyview_init_gt_powersave(struct drm_device *dev)
  3496. {
  3497. struct drm_i915_private *dev_priv = dev->dev_private;
  3498. u32 val;
  3499. valleyview_setup_pctx(dev);
  3500. mutex_lock(&dev_priv->rps.hw_lock);
  3501. val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  3502. switch ((val >> 6) & 3) {
  3503. case 0:
  3504. case 1:
  3505. dev_priv->mem_freq = 800;
  3506. break;
  3507. case 2:
  3508. dev_priv->mem_freq = 1066;
  3509. break;
  3510. case 3:
  3511. dev_priv->mem_freq = 1333;
  3512. break;
  3513. }
  3514. DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  3515. dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
  3516. dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
  3517. DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
  3518. vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
  3519. dev_priv->rps.max_freq);
  3520. dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
  3521. DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
  3522. vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
  3523. dev_priv->rps.efficient_freq);
  3524. dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
  3525. DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
  3526. vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
  3527. dev_priv->rps.rp1_freq);
  3528. dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
  3529. DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
  3530. vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
  3531. dev_priv->rps.min_freq);
  3532. /* Preserve min/max settings in case of re-init */
  3533. if (dev_priv->rps.max_freq_softlimit == 0)
  3534. dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
  3535. if (dev_priv->rps.min_freq_softlimit == 0)
  3536. dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
  3537. mutex_unlock(&dev_priv->rps.hw_lock);
  3538. }
  3539. static void cherryview_init_gt_powersave(struct drm_device *dev)
  3540. {
  3541. struct drm_i915_private *dev_priv = dev->dev_private;
  3542. u32 val;
  3543. cherryview_setup_pctx(dev);
  3544. mutex_lock(&dev_priv->rps.hw_lock);
  3545. val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
  3546. switch ((val >> 2) & 0x7) {
  3547. case 0:
  3548. case 1:
  3549. dev_priv->rps.cz_freq = 200;
  3550. dev_priv->mem_freq = 1600;
  3551. break;
  3552. case 2:
  3553. dev_priv->rps.cz_freq = 267;
  3554. dev_priv->mem_freq = 1600;
  3555. break;
  3556. case 3:
  3557. dev_priv->rps.cz_freq = 333;
  3558. dev_priv->mem_freq = 2000;
  3559. break;
  3560. case 4:
  3561. dev_priv->rps.cz_freq = 320;
  3562. dev_priv->mem_freq = 1600;
  3563. break;
  3564. case 5:
  3565. dev_priv->rps.cz_freq = 400;
  3566. dev_priv->mem_freq = 1600;
  3567. break;
  3568. }
  3569. DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  3570. dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
  3571. dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
  3572. DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
  3573. vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
  3574. dev_priv->rps.max_freq);
  3575. dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
  3576. DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
  3577. vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
  3578. dev_priv->rps.efficient_freq);
  3579. dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
  3580. DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
  3581. vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
  3582. dev_priv->rps.rp1_freq);
  3583. dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
  3584. DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
  3585. vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
  3586. dev_priv->rps.min_freq);
  3587. WARN_ONCE((dev_priv->rps.max_freq |
  3588. dev_priv->rps.efficient_freq |
  3589. dev_priv->rps.rp1_freq |
  3590. dev_priv->rps.min_freq) & 1,
  3591. "Odd GPU freq values\n");
  3592. /* Preserve min/max settings in case of re-init */
  3593. if (dev_priv->rps.max_freq_softlimit == 0)
  3594. dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
  3595. if (dev_priv->rps.min_freq_softlimit == 0)
  3596. dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
  3597. mutex_unlock(&dev_priv->rps.hw_lock);
  3598. }
  3599. static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
  3600. {
  3601. valleyview_cleanup_pctx(dev);
  3602. }
  3603. static void cherryview_enable_rps(struct drm_device *dev)
  3604. {
  3605. struct drm_i915_private *dev_priv = dev->dev_private;
  3606. struct intel_engine_cs *ring;
  3607. u32 gtfifodbg, val, rc6_mode = 0, pcbr;
  3608. int i;
  3609. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  3610. gtfifodbg = I915_READ(GTFIFODBG);
  3611. if (gtfifodbg) {
  3612. DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
  3613. gtfifodbg);
  3614. I915_WRITE(GTFIFODBG, gtfifodbg);
  3615. }
  3616. cherryview_check_pctx(dev_priv);
  3617. /* 1a & 1b: Get forcewake during program sequence. Although the driver
  3618. * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
  3619. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  3620. /* 2a: Program RC6 thresholds.*/
  3621. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
  3622. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
  3623. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
  3624. for_each_ring(ring, dev_priv, i)
  3625. I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  3626. I915_WRITE(GEN6_RC_SLEEP, 0);
  3627. I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
  3628. /* allows RC6 residency counter to work */
  3629. I915_WRITE(VLV_COUNTER_CONTROL,
  3630. _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
  3631. VLV_MEDIA_RC6_COUNT_EN |
  3632. VLV_RENDER_RC6_COUNT_EN));
  3633. /* For now we assume BIOS is allocating and populating the PCBR */
  3634. pcbr = I915_READ(VLV_PCBR);
  3635. DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
  3636. /* 3: Enable RC6 */
  3637. if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
  3638. (pcbr >> VLV_PCBR_ADDR_SHIFT))
  3639. rc6_mode = GEN6_RC_CTL_EI_MODE(1);
  3640. I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
  3641. /* 4 Program defaults and thresholds for RPS*/
  3642. I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
  3643. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
  3644. I915_WRITE(GEN6_RP_UP_EI, 66000);
  3645. I915_WRITE(GEN6_RP_DOWN_EI, 350000);
  3646. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  3647. /* WaDisablePwrmtrEvent:chv (pre-production hw) */
  3648. I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
  3649. I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
  3650. /* 5: Enable RPS */
  3651. I915_WRITE(GEN6_RP_CONTROL,
  3652. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  3653. GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
  3654. GEN6_RP_ENABLE |
  3655. GEN6_RP_UP_BUSY_AVG |
  3656. GEN6_RP_DOWN_IDLE_AVG);
  3657. val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  3658. DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
  3659. DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
  3660. dev_priv->rps.cur_freq = (val >> 8) & 0xff;
  3661. DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
  3662. vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
  3663. dev_priv->rps.cur_freq);
  3664. DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
  3665. vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
  3666. dev_priv->rps.efficient_freq);
  3667. valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
  3668. gen8_enable_rps_interrupts(dev);
  3669. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3670. }
  3671. static void valleyview_enable_rps(struct drm_device *dev)
  3672. {
  3673. struct drm_i915_private *dev_priv = dev->dev_private;
  3674. struct intel_engine_cs *ring;
  3675. u32 gtfifodbg, val, rc6_mode = 0;
  3676. int i;
  3677. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  3678. valleyview_check_pctx(dev_priv);
  3679. if ((gtfifodbg = I915_READ(GTFIFODBG))) {
  3680. DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
  3681. gtfifodbg);
  3682. I915_WRITE(GTFIFODBG, gtfifodbg);
  3683. }
  3684. /* If VLV, Forcewake all wells, else re-direct to regular path */
  3685. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  3686. I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
  3687. I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
  3688. I915_WRITE(GEN6_RP_UP_EI, 66000);
  3689. I915_WRITE(GEN6_RP_DOWN_EI, 350000);
  3690. I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
  3691. I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
  3692. I915_WRITE(GEN6_RP_CONTROL,
  3693. GEN6_RP_MEDIA_TURBO |
  3694. GEN6_RP_MEDIA_HW_NORMAL_MODE |
  3695. GEN6_RP_MEDIA_IS_GFX |
  3696. GEN6_RP_ENABLE |
  3697. GEN6_RP_UP_BUSY_AVG |
  3698. GEN6_RP_DOWN_IDLE_CONT);
  3699. I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
  3700. I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
  3701. I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
  3702. for_each_ring(ring, dev_priv, i)
  3703. I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  3704. I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
  3705. /* allows RC6 residency counter to work */
  3706. I915_WRITE(VLV_COUNTER_CONTROL,
  3707. _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
  3708. VLV_RENDER_RC0_COUNT_EN |
  3709. VLV_MEDIA_RC6_COUNT_EN |
  3710. VLV_RENDER_RC6_COUNT_EN));
  3711. if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
  3712. rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
  3713. intel_print_rc6_info(dev, rc6_mode);
  3714. I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
  3715. val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  3716. DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
  3717. DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
  3718. dev_priv->rps.cur_freq = (val >> 8) & 0xff;
  3719. DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
  3720. vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
  3721. dev_priv->rps.cur_freq);
  3722. DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
  3723. vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
  3724. dev_priv->rps.efficient_freq);
  3725. valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
  3726. gen6_enable_rps_interrupts(dev);
  3727. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3728. }
  3729. void ironlake_teardown_rc6(struct drm_device *dev)
  3730. {
  3731. struct drm_i915_private *dev_priv = dev->dev_private;
  3732. if (dev_priv->ips.renderctx) {
  3733. i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
  3734. drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
  3735. dev_priv->ips.renderctx = NULL;
  3736. }
  3737. if (dev_priv->ips.pwrctx) {
  3738. i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
  3739. drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
  3740. dev_priv->ips.pwrctx = NULL;
  3741. }
  3742. }
  3743. static void ironlake_disable_rc6(struct drm_device *dev)
  3744. {
  3745. struct drm_i915_private *dev_priv = dev->dev_private;
  3746. if (I915_READ(PWRCTXA)) {
  3747. /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
  3748. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
  3749. wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
  3750. 50);
  3751. I915_WRITE(PWRCTXA, 0);
  3752. POSTING_READ(PWRCTXA);
  3753. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
  3754. POSTING_READ(RSTDBYCTL);
  3755. }
  3756. }
  3757. static int ironlake_setup_rc6(struct drm_device *dev)
  3758. {
  3759. struct drm_i915_private *dev_priv = dev->dev_private;
  3760. if (dev_priv->ips.renderctx == NULL)
  3761. dev_priv->ips.renderctx = intel_alloc_context_page(dev);
  3762. if (!dev_priv->ips.renderctx)
  3763. return -ENOMEM;
  3764. if (dev_priv->ips.pwrctx == NULL)
  3765. dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
  3766. if (!dev_priv->ips.pwrctx) {
  3767. ironlake_teardown_rc6(dev);
  3768. return -ENOMEM;
  3769. }
  3770. return 0;
  3771. }
  3772. static void ironlake_enable_rc6(struct drm_device *dev)
  3773. {
  3774. struct drm_i915_private *dev_priv = dev->dev_private;
  3775. struct intel_engine_cs *ring = &dev_priv->ring[RCS];
  3776. bool was_interruptible;
  3777. int ret;
  3778. /* rc6 disabled by default due to repeated reports of hanging during
  3779. * boot and resume.
  3780. */
  3781. if (!intel_enable_rc6(dev))
  3782. return;
  3783. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  3784. ret = ironlake_setup_rc6(dev);
  3785. if (ret)
  3786. return;
  3787. was_interruptible = dev_priv->mm.interruptible;
  3788. dev_priv->mm.interruptible = false;
  3789. /*
  3790. * GPU can automatically power down the render unit if given a page
  3791. * to save state.
  3792. */
  3793. ret = intel_ring_begin(ring, 6);
  3794. if (ret) {
  3795. ironlake_teardown_rc6(dev);
  3796. dev_priv->mm.interruptible = was_interruptible;
  3797. return;
  3798. }
  3799. intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
  3800. intel_ring_emit(ring, MI_SET_CONTEXT);
  3801. intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
  3802. MI_MM_SPACE_GTT |
  3803. MI_SAVE_EXT_STATE_EN |
  3804. MI_RESTORE_EXT_STATE_EN |
  3805. MI_RESTORE_INHIBIT);
  3806. intel_ring_emit(ring, MI_SUSPEND_FLUSH);
  3807. intel_ring_emit(ring, MI_NOOP);
  3808. intel_ring_emit(ring, MI_FLUSH);
  3809. intel_ring_advance(ring);
  3810. /*
  3811. * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
  3812. * does an implicit flush, combined with MI_FLUSH above, it should be
  3813. * safe to assume that renderctx is valid
  3814. */
  3815. ret = intel_ring_idle(ring);
  3816. dev_priv->mm.interruptible = was_interruptible;
  3817. if (ret) {
  3818. DRM_ERROR("failed to enable ironlake power savings\n");
  3819. ironlake_teardown_rc6(dev);
  3820. return;
  3821. }
  3822. I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
  3823. I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
  3824. intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
  3825. }
  3826. static unsigned long intel_pxfreq(u32 vidfreq)
  3827. {
  3828. unsigned long freq;
  3829. int div = (vidfreq & 0x3f0000) >> 16;
  3830. int post = (vidfreq & 0x3000) >> 12;
  3831. int pre = (vidfreq & 0x7);
  3832. if (!pre)
  3833. return 0;
  3834. freq = ((div * 133333) / ((1<<post) * pre));
  3835. return freq;
  3836. }
  3837. static const struct cparams {
  3838. u16 i;
  3839. u16 t;
  3840. u16 m;
  3841. u16 c;
  3842. } cparams[] = {
  3843. { 1, 1333, 301, 28664 },
  3844. { 1, 1066, 294, 24460 },
  3845. { 1, 800, 294, 25192 },
  3846. { 0, 1333, 276, 27605 },
  3847. { 0, 1066, 276, 27605 },
  3848. { 0, 800, 231, 23784 },
  3849. };
  3850. static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
  3851. {
  3852. u64 total_count, diff, ret;
  3853. u32 count1, count2, count3, m = 0, c = 0;
  3854. unsigned long now = jiffies_to_msecs(jiffies), diff1;
  3855. int i;
  3856. assert_spin_locked(&mchdev_lock);
  3857. diff1 = now - dev_priv->ips.last_time1;
  3858. /* Prevent division-by-zero if we are asking too fast.
  3859. * Also, we don't get interesting results if we are polling
  3860. * faster than once in 10ms, so just return the saved value
  3861. * in such cases.
  3862. */
  3863. if (diff1 <= 10)
  3864. return dev_priv->ips.chipset_power;
  3865. count1 = I915_READ(DMIEC);
  3866. count2 = I915_READ(DDREC);
  3867. count3 = I915_READ(CSIEC);
  3868. total_count = count1 + count2 + count3;
  3869. /* FIXME: handle per-counter overflow */
  3870. if (total_count < dev_priv->ips.last_count1) {
  3871. diff = ~0UL - dev_priv->ips.last_count1;
  3872. diff += total_count;
  3873. } else {
  3874. diff = total_count - dev_priv->ips.last_count1;
  3875. }
  3876. for (i = 0; i < ARRAY_SIZE(cparams); i++) {
  3877. if (cparams[i].i == dev_priv->ips.c_m &&
  3878. cparams[i].t == dev_priv->ips.r_t) {
  3879. m = cparams[i].m;
  3880. c = cparams[i].c;
  3881. break;
  3882. }
  3883. }
  3884. diff = div_u64(diff, diff1);
  3885. ret = ((m * diff) + c);
  3886. ret = div_u64(ret, 10);
  3887. dev_priv->ips.last_count1 = total_count;
  3888. dev_priv->ips.last_time1 = now;
  3889. dev_priv->ips.chipset_power = ret;
  3890. return ret;
  3891. }
  3892. unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
  3893. {
  3894. struct drm_device *dev = dev_priv->dev;
  3895. unsigned long val;
  3896. if (INTEL_INFO(dev)->gen != 5)
  3897. return 0;
  3898. spin_lock_irq(&mchdev_lock);
  3899. val = __i915_chipset_val(dev_priv);
  3900. spin_unlock_irq(&mchdev_lock);
  3901. return val;
  3902. }
  3903. unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
  3904. {
  3905. unsigned long m, x, b;
  3906. u32 tsfs;
  3907. tsfs = I915_READ(TSFS);
  3908. m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
  3909. x = I915_READ8(TR1);
  3910. b = tsfs & TSFS_INTR_MASK;
  3911. return ((m * x) / 127) - b;
  3912. }
  3913. static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
  3914. {
  3915. struct drm_device *dev = dev_priv->dev;
  3916. static const struct v_table {
  3917. u16 vd; /* in .1 mil */
  3918. u16 vm; /* in .1 mil */
  3919. } v_table[] = {
  3920. { 0, 0, },
  3921. { 375, 0, },
  3922. { 500, 0, },
  3923. { 625, 0, },
  3924. { 750, 0, },
  3925. { 875, 0, },
  3926. { 1000, 0, },
  3927. { 1125, 0, },
  3928. { 4125, 3000, },
  3929. { 4125, 3000, },
  3930. { 4125, 3000, },
  3931. { 4125, 3000, },
  3932. { 4125, 3000, },
  3933. { 4125, 3000, },
  3934. { 4125, 3000, },
  3935. { 4125, 3000, },
  3936. { 4125, 3000, },
  3937. { 4125, 3000, },
  3938. { 4125, 3000, },
  3939. { 4125, 3000, },
  3940. { 4125, 3000, },
  3941. { 4125, 3000, },
  3942. { 4125, 3000, },
  3943. { 4125, 3000, },
  3944. { 4125, 3000, },
  3945. { 4125, 3000, },
  3946. { 4125, 3000, },
  3947. { 4125, 3000, },
  3948. { 4125, 3000, },
  3949. { 4125, 3000, },
  3950. { 4125, 3000, },
  3951. { 4125, 3000, },
  3952. { 4250, 3125, },
  3953. { 4375, 3250, },
  3954. { 4500, 3375, },
  3955. { 4625, 3500, },
  3956. { 4750, 3625, },
  3957. { 4875, 3750, },
  3958. { 5000, 3875, },
  3959. { 5125, 4000, },
  3960. { 5250, 4125, },
  3961. { 5375, 4250, },
  3962. { 5500, 4375, },
  3963. { 5625, 4500, },
  3964. { 5750, 4625, },
  3965. { 5875, 4750, },
  3966. { 6000, 4875, },
  3967. { 6125, 5000, },
  3968. { 6250, 5125, },
  3969. { 6375, 5250, },
  3970. { 6500, 5375, },
  3971. { 6625, 5500, },
  3972. { 6750, 5625, },
  3973. { 6875, 5750, },
  3974. { 7000, 5875, },
  3975. { 7125, 6000, },
  3976. { 7250, 6125, },
  3977. { 7375, 6250, },
  3978. { 7500, 6375, },
  3979. { 7625, 6500, },
  3980. { 7750, 6625, },
  3981. { 7875, 6750, },
  3982. { 8000, 6875, },
  3983. { 8125, 7000, },
  3984. { 8250, 7125, },
  3985. { 8375, 7250, },
  3986. { 8500, 7375, },
  3987. { 8625, 7500, },
  3988. { 8750, 7625, },
  3989. { 8875, 7750, },
  3990. { 9000, 7875, },
  3991. { 9125, 8000, },
  3992. { 9250, 8125, },
  3993. { 9375, 8250, },
  3994. { 9500, 8375, },
  3995. { 9625, 8500, },
  3996. { 9750, 8625, },
  3997. { 9875, 8750, },
  3998. { 10000, 8875, },
  3999. { 10125, 9000, },
  4000. { 10250, 9125, },
  4001. { 10375, 9250, },
  4002. { 10500, 9375, },
  4003. { 10625, 9500, },
  4004. { 10750, 9625, },
  4005. { 10875, 9750, },
  4006. { 11000, 9875, },
  4007. { 11125, 10000, },
  4008. { 11250, 10125, },
  4009. { 11375, 10250, },
  4010. { 11500, 10375, },
  4011. { 11625, 10500, },
  4012. { 11750, 10625, },
  4013. { 11875, 10750, },
  4014. { 12000, 10875, },
  4015. { 12125, 11000, },
  4016. { 12250, 11125, },
  4017. { 12375, 11250, },
  4018. { 12500, 11375, },
  4019. { 12625, 11500, },
  4020. { 12750, 11625, },
  4021. { 12875, 11750, },
  4022. { 13000, 11875, },
  4023. { 13125, 12000, },
  4024. { 13250, 12125, },
  4025. { 13375, 12250, },
  4026. { 13500, 12375, },
  4027. { 13625, 12500, },
  4028. { 13750, 12625, },
  4029. { 13875, 12750, },
  4030. { 14000, 12875, },
  4031. { 14125, 13000, },
  4032. { 14250, 13125, },
  4033. { 14375, 13250, },
  4034. { 14500, 13375, },
  4035. { 14625, 13500, },
  4036. { 14750, 13625, },
  4037. { 14875, 13750, },
  4038. { 15000, 13875, },
  4039. { 15125, 14000, },
  4040. { 15250, 14125, },
  4041. { 15375, 14250, },
  4042. { 15500, 14375, },
  4043. { 15625, 14500, },
  4044. { 15750, 14625, },
  4045. { 15875, 14750, },
  4046. { 16000, 14875, },
  4047. { 16125, 15000, },
  4048. };
  4049. if (INTEL_INFO(dev)->is_mobile)
  4050. return v_table[pxvid].vm;
  4051. else
  4052. return v_table[pxvid].vd;
  4053. }
  4054. static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
  4055. {
  4056. u64 now, diff, diffms;
  4057. u32 count;
  4058. assert_spin_locked(&mchdev_lock);
  4059. now = ktime_get_raw_ns();
  4060. diffms = now - dev_priv->ips.last_time2;
  4061. do_div(diffms, NSEC_PER_MSEC);
  4062. /* Don't divide by 0 */
  4063. if (!diffms)
  4064. return;
  4065. count = I915_READ(GFXEC);
  4066. if (count < dev_priv->ips.last_count2) {
  4067. diff = ~0UL - dev_priv->ips.last_count2;
  4068. diff += count;
  4069. } else {
  4070. diff = count - dev_priv->ips.last_count2;
  4071. }
  4072. dev_priv->ips.last_count2 = count;
  4073. dev_priv->ips.last_time2 = now;
  4074. /* More magic constants... */
  4075. diff = diff * 1181;
  4076. diff = div_u64(diff, diffms * 10);
  4077. dev_priv->ips.gfx_power = diff;
  4078. }
  4079. void i915_update_gfx_val(struct drm_i915_private *dev_priv)
  4080. {
  4081. struct drm_device *dev = dev_priv->dev;
  4082. if (INTEL_INFO(dev)->gen != 5)
  4083. return;
  4084. spin_lock_irq(&mchdev_lock);
  4085. __i915_update_gfx_val(dev_priv);
  4086. spin_unlock_irq(&mchdev_lock);
  4087. }
  4088. static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
  4089. {
  4090. unsigned long t, corr, state1, corr2, state2;
  4091. u32 pxvid, ext_v;
  4092. assert_spin_locked(&mchdev_lock);
  4093. pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
  4094. pxvid = (pxvid >> 24) & 0x7f;
  4095. ext_v = pvid_to_extvid(dev_priv, pxvid);
  4096. state1 = ext_v;
  4097. t = i915_mch_val(dev_priv);
  4098. /* Revel in the empirically derived constants */
  4099. /* Correction factor in 1/100000 units */
  4100. if (t > 80)
  4101. corr = ((t * 2349) + 135940);
  4102. else if (t >= 50)
  4103. corr = ((t * 964) + 29317);
  4104. else /* < 50 */
  4105. corr = ((t * 301) + 1004);
  4106. corr = corr * ((150142 * state1) / 10000 - 78642);
  4107. corr /= 100000;
  4108. corr2 = (corr * dev_priv->ips.corr);
  4109. state2 = (corr2 * state1) / 10000;
  4110. state2 /= 100; /* convert to mW */
  4111. __i915_update_gfx_val(dev_priv);
  4112. return dev_priv->ips.gfx_power + state2;
  4113. }
  4114. unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
  4115. {
  4116. struct drm_device *dev = dev_priv->dev;
  4117. unsigned long val;
  4118. if (INTEL_INFO(dev)->gen != 5)
  4119. return 0;
  4120. spin_lock_irq(&mchdev_lock);
  4121. val = __i915_gfx_val(dev_priv);
  4122. spin_unlock_irq(&mchdev_lock);
  4123. return val;
  4124. }
  4125. /**
  4126. * i915_read_mch_val - return value for IPS use
  4127. *
  4128. * Calculate and return a value for the IPS driver to use when deciding whether
  4129. * we have thermal and power headroom to increase CPU or GPU power budget.
  4130. */
  4131. unsigned long i915_read_mch_val(void)
  4132. {
  4133. struct drm_i915_private *dev_priv;
  4134. unsigned long chipset_val, graphics_val, ret = 0;
  4135. spin_lock_irq(&mchdev_lock);
  4136. if (!i915_mch_dev)
  4137. goto out_unlock;
  4138. dev_priv = i915_mch_dev;
  4139. chipset_val = __i915_chipset_val(dev_priv);
  4140. graphics_val = __i915_gfx_val(dev_priv);
  4141. ret = chipset_val + graphics_val;
  4142. out_unlock:
  4143. spin_unlock_irq(&mchdev_lock);
  4144. return ret;
  4145. }
  4146. EXPORT_SYMBOL_GPL(i915_read_mch_val);
  4147. /**
  4148. * i915_gpu_raise - raise GPU frequency limit
  4149. *
  4150. * Raise the limit; IPS indicates we have thermal headroom.
  4151. */
  4152. bool i915_gpu_raise(void)
  4153. {
  4154. struct drm_i915_private *dev_priv;
  4155. bool ret = true;
  4156. spin_lock_irq(&mchdev_lock);
  4157. if (!i915_mch_dev) {
  4158. ret = false;
  4159. goto out_unlock;
  4160. }
  4161. dev_priv = i915_mch_dev;
  4162. if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
  4163. dev_priv->ips.max_delay--;
  4164. out_unlock:
  4165. spin_unlock_irq(&mchdev_lock);
  4166. return ret;
  4167. }
  4168. EXPORT_SYMBOL_GPL(i915_gpu_raise);
  4169. /**
  4170. * i915_gpu_lower - lower GPU frequency limit
  4171. *
  4172. * IPS indicates we're close to a thermal limit, so throttle back the GPU
  4173. * frequency maximum.
  4174. */
  4175. bool i915_gpu_lower(void)
  4176. {
  4177. struct drm_i915_private *dev_priv;
  4178. bool ret = true;
  4179. spin_lock_irq(&mchdev_lock);
  4180. if (!i915_mch_dev) {
  4181. ret = false;
  4182. goto out_unlock;
  4183. }
  4184. dev_priv = i915_mch_dev;
  4185. if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
  4186. dev_priv->ips.max_delay++;
  4187. out_unlock:
  4188. spin_unlock_irq(&mchdev_lock);
  4189. return ret;
  4190. }
  4191. EXPORT_SYMBOL_GPL(i915_gpu_lower);
  4192. /**
  4193. * i915_gpu_busy - indicate GPU business to IPS
  4194. *
  4195. * Tell the IPS driver whether or not the GPU is busy.
  4196. */
  4197. bool i915_gpu_busy(void)
  4198. {
  4199. struct drm_i915_private *dev_priv;
  4200. struct intel_engine_cs *ring;
  4201. bool ret = false;
  4202. int i;
  4203. spin_lock_irq(&mchdev_lock);
  4204. if (!i915_mch_dev)
  4205. goto out_unlock;
  4206. dev_priv = i915_mch_dev;
  4207. for_each_ring(ring, dev_priv, i)
  4208. ret |= !list_empty(&ring->request_list);
  4209. out_unlock:
  4210. spin_unlock_irq(&mchdev_lock);
  4211. return ret;
  4212. }
  4213. EXPORT_SYMBOL_GPL(i915_gpu_busy);
  4214. /**
  4215. * i915_gpu_turbo_disable - disable graphics turbo
  4216. *
  4217. * Disable graphics turbo by resetting the max frequency and setting the
  4218. * current frequency to the default.
  4219. */
  4220. bool i915_gpu_turbo_disable(void)
  4221. {
  4222. struct drm_i915_private *dev_priv;
  4223. bool ret = true;
  4224. spin_lock_irq(&mchdev_lock);
  4225. if (!i915_mch_dev) {
  4226. ret = false;
  4227. goto out_unlock;
  4228. }
  4229. dev_priv = i915_mch_dev;
  4230. dev_priv->ips.max_delay = dev_priv->ips.fstart;
  4231. if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
  4232. ret = false;
  4233. out_unlock:
  4234. spin_unlock_irq(&mchdev_lock);
  4235. return ret;
  4236. }
  4237. EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
  4238. /**
  4239. * Tells the intel_ips driver that the i915 driver is now loaded, if
  4240. * IPS got loaded first.
  4241. *
  4242. * This awkward dance is so that neither module has to depend on the
  4243. * other in order for IPS to do the appropriate communication of
  4244. * GPU turbo limits to i915.
  4245. */
  4246. static void
  4247. ips_ping_for_i915_load(void)
  4248. {
  4249. void (*link)(void);
  4250. link = symbol_get(ips_link_to_i915_driver);
  4251. if (link) {
  4252. link();
  4253. symbol_put(ips_link_to_i915_driver);
  4254. }
  4255. }
  4256. void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
  4257. {
  4258. /* We only register the i915 ips part with intel-ips once everything is
  4259. * set up, to avoid intel-ips sneaking in and reading bogus values. */
  4260. spin_lock_irq(&mchdev_lock);
  4261. i915_mch_dev = dev_priv;
  4262. spin_unlock_irq(&mchdev_lock);
  4263. ips_ping_for_i915_load();
  4264. }
  4265. void intel_gpu_ips_teardown(void)
  4266. {
  4267. spin_lock_irq(&mchdev_lock);
  4268. i915_mch_dev = NULL;
  4269. spin_unlock_irq(&mchdev_lock);
  4270. }
  4271. static void intel_init_emon(struct drm_device *dev)
  4272. {
  4273. struct drm_i915_private *dev_priv = dev->dev_private;
  4274. u32 lcfuse;
  4275. u8 pxw[16];
  4276. int i;
  4277. /* Disable to program */
  4278. I915_WRITE(ECR, 0);
  4279. POSTING_READ(ECR);
  4280. /* Program energy weights for various events */
  4281. I915_WRITE(SDEW, 0x15040d00);
  4282. I915_WRITE(CSIEW0, 0x007f0000);
  4283. I915_WRITE(CSIEW1, 0x1e220004);
  4284. I915_WRITE(CSIEW2, 0x04000004);
  4285. for (i = 0; i < 5; i++)
  4286. I915_WRITE(PEW + (i * 4), 0);
  4287. for (i = 0; i < 3; i++)
  4288. I915_WRITE(DEW + (i * 4), 0);
  4289. /* Program P-state weights to account for frequency power adjustment */
  4290. for (i = 0; i < 16; i++) {
  4291. u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
  4292. unsigned long freq = intel_pxfreq(pxvidfreq);
  4293. unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
  4294. PXVFREQ_PX_SHIFT;
  4295. unsigned long val;
  4296. val = vid * vid;
  4297. val *= (freq / 1000);
  4298. val *= 255;
  4299. val /= (127*127*900);
  4300. if (val > 0xff)
  4301. DRM_ERROR("bad pxval: %ld\n", val);
  4302. pxw[i] = val;
  4303. }
  4304. /* Render standby states get 0 weight */
  4305. pxw[14] = 0;
  4306. pxw[15] = 0;
  4307. for (i = 0; i < 4; i++) {
  4308. u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
  4309. (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
  4310. I915_WRITE(PXW + (i * 4), val);
  4311. }
  4312. /* Adjust magic regs to magic values (more experimental results) */
  4313. I915_WRITE(OGW0, 0);
  4314. I915_WRITE(OGW1, 0);
  4315. I915_WRITE(EG0, 0x00007f00);
  4316. I915_WRITE(EG1, 0x0000000e);
  4317. I915_WRITE(EG2, 0x000e0000);
  4318. I915_WRITE(EG3, 0x68000300);
  4319. I915_WRITE(EG4, 0x42000000);
  4320. I915_WRITE(EG5, 0x00140031);
  4321. I915_WRITE(EG6, 0);
  4322. I915_WRITE(EG7, 0);
  4323. for (i = 0; i < 8; i++)
  4324. I915_WRITE(PXWL + (i * 4), 0);
  4325. /* Enable PMON + select events */
  4326. I915_WRITE(ECR, 0x80000019);
  4327. lcfuse = I915_READ(LCFUSE02);
  4328. dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
  4329. }
  4330. void intel_init_gt_powersave(struct drm_device *dev)
  4331. {
  4332. i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
  4333. if (IS_CHERRYVIEW(dev))
  4334. cherryview_init_gt_powersave(dev);
  4335. else if (IS_VALLEYVIEW(dev))
  4336. valleyview_init_gt_powersave(dev);
  4337. }
  4338. void intel_cleanup_gt_powersave(struct drm_device *dev)
  4339. {
  4340. if (IS_CHERRYVIEW(dev))
  4341. return;
  4342. else if (IS_VALLEYVIEW(dev))
  4343. valleyview_cleanup_gt_powersave(dev);
  4344. }
  4345. /**
  4346. * intel_suspend_gt_powersave - suspend PM work and helper threads
  4347. * @dev: drm device
  4348. *
  4349. * We don't want to disable RC6 or other features here, we just want
  4350. * to make sure any work we've queued has finished and won't bother
  4351. * us while we're suspended.
  4352. */
  4353. void intel_suspend_gt_powersave(struct drm_device *dev)
  4354. {
  4355. struct drm_i915_private *dev_priv = dev->dev_private;
  4356. /* Interrupts should be disabled already to avoid re-arming. */
  4357. WARN_ON(intel_irqs_enabled(dev_priv));
  4358. flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  4359. cancel_work_sync(&dev_priv->rps.work);
  4360. /* Force GPU to min freq during suspend */
  4361. gen6_rps_idle(dev_priv);
  4362. }
  4363. void intel_disable_gt_powersave(struct drm_device *dev)
  4364. {
  4365. struct drm_i915_private *dev_priv = dev->dev_private;
  4366. /* Interrupts should be disabled already to avoid re-arming. */
  4367. WARN_ON(intel_irqs_enabled(dev_priv));
  4368. if (IS_IRONLAKE_M(dev)) {
  4369. ironlake_disable_drps(dev);
  4370. ironlake_disable_rc6(dev);
  4371. } else if (INTEL_INFO(dev)->gen >= 6) {
  4372. intel_suspend_gt_powersave(dev);
  4373. mutex_lock(&dev_priv->rps.hw_lock);
  4374. if (IS_CHERRYVIEW(dev))
  4375. cherryview_disable_rps(dev);
  4376. else if (IS_VALLEYVIEW(dev))
  4377. valleyview_disable_rps(dev);
  4378. else
  4379. gen6_disable_rps(dev);
  4380. dev_priv->rps.enabled = false;
  4381. mutex_unlock(&dev_priv->rps.hw_lock);
  4382. }
  4383. }
  4384. static void intel_gen6_powersave_work(struct work_struct *work)
  4385. {
  4386. struct drm_i915_private *dev_priv =
  4387. container_of(work, struct drm_i915_private,
  4388. rps.delayed_resume_work.work);
  4389. struct drm_device *dev = dev_priv->dev;
  4390. mutex_lock(&dev_priv->rps.hw_lock);
  4391. if (IS_CHERRYVIEW(dev)) {
  4392. cherryview_enable_rps(dev);
  4393. } else if (IS_VALLEYVIEW(dev)) {
  4394. valleyview_enable_rps(dev);
  4395. } else if (IS_BROADWELL(dev)) {
  4396. gen8_enable_rps(dev);
  4397. __gen6_update_ring_freq(dev);
  4398. } else {
  4399. gen6_enable_rps(dev);
  4400. __gen6_update_ring_freq(dev);
  4401. }
  4402. dev_priv->rps.enabled = true;
  4403. mutex_unlock(&dev_priv->rps.hw_lock);
  4404. intel_runtime_pm_put(dev_priv);
  4405. }
  4406. void intel_enable_gt_powersave(struct drm_device *dev)
  4407. {
  4408. struct drm_i915_private *dev_priv = dev->dev_private;
  4409. if (IS_IRONLAKE_M(dev)) {
  4410. mutex_lock(&dev->struct_mutex);
  4411. ironlake_enable_drps(dev);
  4412. ironlake_enable_rc6(dev);
  4413. intel_init_emon(dev);
  4414. mutex_unlock(&dev->struct_mutex);
  4415. } else if (INTEL_INFO(dev)->gen >= 6) {
  4416. /*
  4417. * PCU communication is slow and this doesn't need to be
  4418. * done at any specific time, so do this out of our fast path
  4419. * to make resume and init faster.
  4420. *
  4421. * We depend on the HW RC6 power context save/restore
  4422. * mechanism when entering D3 through runtime PM suspend. So
  4423. * disable RPM until RPS/RC6 is properly setup. We can only
  4424. * get here via the driver load/system resume/runtime resume
  4425. * paths, so the _noresume version is enough (and in case of
  4426. * runtime resume it's necessary).
  4427. */
  4428. if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
  4429. round_jiffies_up_relative(HZ)))
  4430. intel_runtime_pm_get_noresume(dev_priv);
  4431. }
  4432. }
  4433. void intel_reset_gt_powersave(struct drm_device *dev)
  4434. {
  4435. struct drm_i915_private *dev_priv = dev->dev_private;
  4436. dev_priv->rps.enabled = false;
  4437. intel_enable_gt_powersave(dev);
  4438. }
  4439. static void ibx_init_clock_gating(struct drm_device *dev)
  4440. {
  4441. struct drm_i915_private *dev_priv = dev->dev_private;
  4442. /*
  4443. * On Ibex Peak and Cougar Point, we need to disable clock
  4444. * gating for the panel power sequencer or it will fail to
  4445. * start up when no ports are active.
  4446. */
  4447. I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
  4448. }
  4449. static void g4x_disable_trickle_feed(struct drm_device *dev)
  4450. {
  4451. struct drm_i915_private *dev_priv = dev->dev_private;
  4452. int pipe;
  4453. for_each_pipe(dev_priv, pipe) {
  4454. I915_WRITE(DSPCNTR(pipe),
  4455. I915_READ(DSPCNTR(pipe)) |
  4456. DISPPLANE_TRICKLE_FEED_DISABLE);
  4457. intel_flush_primary_plane(dev_priv, pipe);
  4458. }
  4459. }
  4460. static void ilk_init_lp_watermarks(struct drm_device *dev)
  4461. {
  4462. struct drm_i915_private *dev_priv = dev->dev_private;
  4463. I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
  4464. I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
  4465. I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
  4466. /*
  4467. * Don't touch WM1S_LP_EN here.
  4468. * Doing so could cause underruns.
  4469. */
  4470. }
  4471. static void ironlake_init_clock_gating(struct drm_device *dev)
  4472. {
  4473. struct drm_i915_private *dev_priv = dev->dev_private;
  4474. uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  4475. /*
  4476. * Required for FBC
  4477. * WaFbcDisableDpfcClockGating:ilk
  4478. */
  4479. dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
  4480. ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
  4481. ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
  4482. I915_WRITE(PCH_3DCGDIS0,
  4483. MARIUNIT_CLOCK_GATE_DISABLE |
  4484. SVSMUNIT_CLOCK_GATE_DISABLE);
  4485. I915_WRITE(PCH_3DCGDIS1,
  4486. VFMUNIT_CLOCK_GATE_DISABLE);
  4487. /*
  4488. * According to the spec the following bits should be set in
  4489. * order to enable memory self-refresh
  4490. * The bit 22/21 of 0x42004
  4491. * The bit 5 of 0x42020
  4492. * The bit 15 of 0x45000
  4493. */
  4494. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4495. (I915_READ(ILK_DISPLAY_CHICKEN2) |
  4496. ILK_DPARB_GATE | ILK_VSDPFD_FULL));
  4497. dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
  4498. I915_WRITE(DISP_ARB_CTL,
  4499. (I915_READ(DISP_ARB_CTL) |
  4500. DISP_FBC_WM_DIS));
  4501. ilk_init_lp_watermarks(dev);
  4502. /*
  4503. * Based on the document from hardware guys the following bits
  4504. * should be set unconditionally in order to enable FBC.
  4505. * The bit 22 of 0x42000
  4506. * The bit 22 of 0x42004
  4507. * The bit 7,8,9 of 0x42020.
  4508. */
  4509. if (IS_IRONLAKE_M(dev)) {
  4510. /* WaFbcAsynchFlipDisableFbcQueue:ilk */
  4511. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  4512. I915_READ(ILK_DISPLAY_CHICKEN1) |
  4513. ILK_FBCQ_DIS);
  4514. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4515. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4516. ILK_DPARB_GATE);
  4517. }
  4518. I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
  4519. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4520. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4521. ILK_ELPIN_409_SELECT);
  4522. I915_WRITE(_3D_CHICKEN2,
  4523. _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
  4524. _3D_CHICKEN2_WM_READ_PIPELINED);
  4525. /* WaDisableRenderCachePipelinedFlush:ilk */
  4526. I915_WRITE(CACHE_MODE_0,
  4527. _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
  4528. /* WaDisable_RenderCache_OperationalFlush:ilk */
  4529. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4530. g4x_disable_trickle_feed(dev);
  4531. ibx_init_clock_gating(dev);
  4532. }
  4533. static void cpt_init_clock_gating(struct drm_device *dev)
  4534. {
  4535. struct drm_i915_private *dev_priv = dev->dev_private;
  4536. int pipe;
  4537. uint32_t val;
  4538. /*
  4539. * On Ibex Peak and Cougar Point, we need to disable clock
  4540. * gating for the panel power sequencer or it will fail to
  4541. * start up when no ports are active.
  4542. */
  4543. I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
  4544. PCH_DPLUNIT_CLOCK_GATE_DISABLE |
  4545. PCH_CPUNIT_CLOCK_GATE_DISABLE);
  4546. I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
  4547. DPLS_EDP_PPS_FIX_DIS);
  4548. /* The below fixes the weird display corruption, a few pixels shifted
  4549. * downward, on (only) LVDS of some HP laptops with IVY.
  4550. */
  4551. for_each_pipe(dev_priv, pipe) {
  4552. val = I915_READ(TRANS_CHICKEN2(pipe));
  4553. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  4554. val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
  4555. if (dev_priv->vbt.fdi_rx_polarity_inverted)
  4556. val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
  4557. val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
  4558. val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
  4559. val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
  4560. I915_WRITE(TRANS_CHICKEN2(pipe), val);
  4561. }
  4562. /* WADP0ClockGatingDisable */
  4563. for_each_pipe(dev_priv, pipe) {
  4564. I915_WRITE(TRANS_CHICKEN1(pipe),
  4565. TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  4566. }
  4567. }
  4568. static void gen6_check_mch_setup(struct drm_device *dev)
  4569. {
  4570. struct drm_i915_private *dev_priv = dev->dev_private;
  4571. uint32_t tmp;
  4572. tmp = I915_READ(MCH_SSKPD);
  4573. if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
  4574. DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
  4575. tmp);
  4576. }
  4577. static void gen6_init_clock_gating(struct drm_device *dev)
  4578. {
  4579. struct drm_i915_private *dev_priv = dev->dev_private;
  4580. uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  4581. I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
  4582. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4583. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4584. ILK_ELPIN_409_SELECT);
  4585. /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
  4586. I915_WRITE(_3D_CHICKEN,
  4587. _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
  4588. /* WaSetupGtModeTdRowDispatch:snb */
  4589. if (IS_SNB_GT1(dev))
  4590. I915_WRITE(GEN6_GT_MODE,
  4591. _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
  4592. /* WaDisable_RenderCache_OperationalFlush:snb */
  4593. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4594. /*
  4595. * BSpec recoomends 8x4 when MSAA is used,
  4596. * however in practice 16x4 seems fastest.
  4597. *
  4598. * Note that PS/WM thread counts depend on the WIZ hashing
  4599. * disable bit, which we don't touch here, but it's good
  4600. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  4601. */
  4602. I915_WRITE(GEN6_GT_MODE,
  4603. GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
  4604. ilk_init_lp_watermarks(dev);
  4605. I915_WRITE(CACHE_MODE_0,
  4606. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  4607. I915_WRITE(GEN6_UCGCTL1,
  4608. I915_READ(GEN6_UCGCTL1) |
  4609. GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
  4610. GEN6_CSUNIT_CLOCK_GATE_DISABLE);
  4611. /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
  4612. * gating disable must be set. Failure to set it results in
  4613. * flickering pixels due to Z write ordering failures after
  4614. * some amount of runtime in the Mesa "fire" demo, and Unigine
  4615. * Sanctuary and Tropics, and apparently anything else with
  4616. * alpha test or pixel discard.
  4617. *
  4618. * According to the spec, bit 11 (RCCUNIT) must also be set,
  4619. * but we didn't debug actual testcases to find it out.
  4620. *
  4621. * WaDisableRCCUnitClockGating:snb
  4622. * WaDisableRCPBUnitClockGating:snb
  4623. */
  4624. I915_WRITE(GEN6_UCGCTL2,
  4625. GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
  4626. GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
  4627. /* WaStripsFansDisableFastClipPerformanceFix:snb */
  4628. I915_WRITE(_3D_CHICKEN3,
  4629. _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
  4630. /*
  4631. * Bspec says:
  4632. * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
  4633. * 3DSTATE_SF number of SF output attributes is more than 16."
  4634. */
  4635. I915_WRITE(_3D_CHICKEN3,
  4636. _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
  4637. /*
  4638. * According to the spec the following bits should be
  4639. * set in order to enable memory self-refresh and fbc:
  4640. * The bit21 and bit22 of 0x42000
  4641. * The bit21 and bit22 of 0x42004
  4642. * The bit5 and bit7 of 0x42020
  4643. * The bit14 of 0x70180
  4644. * The bit14 of 0x71180
  4645. *
  4646. * WaFbcAsynchFlipDisableFbcQueue:snb
  4647. */
  4648. I915_WRITE(ILK_DISPLAY_CHICKEN1,
  4649. I915_READ(ILK_DISPLAY_CHICKEN1) |
  4650. ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
  4651. I915_WRITE(ILK_DISPLAY_CHICKEN2,
  4652. I915_READ(ILK_DISPLAY_CHICKEN2) |
  4653. ILK_DPARB_GATE | ILK_VSDPFD_FULL);
  4654. I915_WRITE(ILK_DSPCLK_GATE_D,
  4655. I915_READ(ILK_DSPCLK_GATE_D) |
  4656. ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
  4657. ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
  4658. g4x_disable_trickle_feed(dev);
  4659. cpt_init_clock_gating(dev);
  4660. gen6_check_mch_setup(dev);
  4661. }
  4662. static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
  4663. {
  4664. uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
  4665. /*
  4666. * WaVSThreadDispatchOverride:ivb,vlv
  4667. *
  4668. * This actually overrides the dispatch
  4669. * mode for all thread types.
  4670. */
  4671. reg &= ~GEN7_FF_SCHED_MASK;
  4672. reg |= GEN7_FF_TS_SCHED_HW;
  4673. reg |= GEN7_FF_VS_SCHED_HW;
  4674. reg |= GEN7_FF_DS_SCHED_HW;
  4675. I915_WRITE(GEN7_FF_THREAD_MODE, reg);
  4676. }
  4677. static void lpt_init_clock_gating(struct drm_device *dev)
  4678. {
  4679. struct drm_i915_private *dev_priv = dev->dev_private;
  4680. /*
  4681. * TODO: this bit should only be enabled when really needed, then
  4682. * disabled when not needed anymore in order to save power.
  4683. */
  4684. if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
  4685. I915_WRITE(SOUTH_DSPCLK_GATE_D,
  4686. I915_READ(SOUTH_DSPCLK_GATE_D) |
  4687. PCH_LP_PARTITION_LEVEL_DISABLE);
  4688. /* WADPOClockGatingDisable:hsw */
  4689. I915_WRITE(_TRANSA_CHICKEN1,
  4690. I915_READ(_TRANSA_CHICKEN1) |
  4691. TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  4692. }
  4693. static void lpt_suspend_hw(struct drm_device *dev)
  4694. {
  4695. struct drm_i915_private *dev_priv = dev->dev_private;
  4696. if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
  4697. uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
  4698. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  4699. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  4700. }
  4701. }
  4702. static void broadwell_init_clock_gating(struct drm_device *dev)
  4703. {
  4704. struct drm_i915_private *dev_priv = dev->dev_private;
  4705. enum pipe pipe;
  4706. I915_WRITE(WM3_LP_ILK, 0);
  4707. I915_WRITE(WM2_LP_ILK, 0);
  4708. I915_WRITE(WM1_LP_ILK, 0);
  4709. /* WaSwitchSolVfFArbitrationPriority:bdw */
  4710. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
  4711. /* WaPsrDPAMaskVBlankInSRD:bdw */
  4712. I915_WRITE(CHICKEN_PAR1_1,
  4713. I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
  4714. /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
  4715. for_each_pipe(dev_priv, pipe) {
  4716. I915_WRITE(CHICKEN_PIPESL_1(pipe),
  4717. I915_READ(CHICKEN_PIPESL_1(pipe)) |
  4718. BDW_DPRS_MASK_VBLANK_SRD);
  4719. }
  4720. /* WaVSRefCountFullforceMissDisable:bdw */
  4721. /* WaDSRefCountFullforceMissDisable:bdw */
  4722. I915_WRITE(GEN7_FF_THREAD_MODE,
  4723. I915_READ(GEN7_FF_THREAD_MODE) &
  4724. ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
  4725. I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
  4726. _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
  4727. /* WaDisableSDEUnitClockGating:bdw */
  4728. I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
  4729. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  4730. lpt_init_clock_gating(dev);
  4731. }
  4732. static void haswell_init_clock_gating(struct drm_device *dev)
  4733. {
  4734. struct drm_i915_private *dev_priv = dev->dev_private;
  4735. ilk_init_lp_watermarks(dev);
  4736. /* L3 caching of data atomics doesn't work -- disable it. */
  4737. I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
  4738. I915_WRITE(HSW_ROW_CHICKEN3,
  4739. _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
  4740. /* This is required by WaCatErrorRejectionIssue:hsw */
  4741. I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  4742. I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  4743. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  4744. /* WaVSRefCountFullforceMissDisable:hsw */
  4745. I915_WRITE(GEN7_FF_THREAD_MODE,
  4746. I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
  4747. /* WaDisable_RenderCache_OperationalFlush:hsw */
  4748. I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4749. /* enable HiZ Raw Stall Optimization */
  4750. I915_WRITE(CACHE_MODE_0_GEN7,
  4751. _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
  4752. /* WaDisable4x2SubspanOptimization:hsw */
  4753. I915_WRITE(CACHE_MODE_1,
  4754. _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  4755. /*
  4756. * BSpec recommends 8x4 when MSAA is used,
  4757. * however in practice 16x4 seems fastest.
  4758. *
  4759. * Note that PS/WM thread counts depend on the WIZ hashing
  4760. * disable bit, which we don't touch here, but it's good
  4761. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  4762. */
  4763. I915_WRITE(GEN7_GT_MODE,
  4764. GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
  4765. /* WaSwitchSolVfFArbitrationPriority:hsw */
  4766. I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
  4767. /* WaRsPkgCStateDisplayPMReq:hsw */
  4768. I915_WRITE(CHICKEN_PAR1_1,
  4769. I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
  4770. lpt_init_clock_gating(dev);
  4771. }
  4772. static void ivybridge_init_clock_gating(struct drm_device *dev)
  4773. {
  4774. struct drm_i915_private *dev_priv = dev->dev_private;
  4775. uint32_t snpcr;
  4776. ilk_init_lp_watermarks(dev);
  4777. I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
  4778. /* WaDisableEarlyCull:ivb */
  4779. I915_WRITE(_3D_CHICKEN3,
  4780. _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
  4781. /* WaDisableBackToBackFlipFix:ivb */
  4782. I915_WRITE(IVB_CHICKEN3,
  4783. CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
  4784. CHICKEN3_DGMG_DONE_FIX_DISABLE);
  4785. /* WaDisablePSDDualDispatchEnable:ivb */
  4786. if (IS_IVB_GT1(dev))
  4787. I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
  4788. _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
  4789. /* WaDisable_RenderCache_OperationalFlush:ivb */
  4790. I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4791. /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
  4792. I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
  4793. GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
  4794. /* WaApplyL3ControlAndL3ChickenMode:ivb */
  4795. I915_WRITE(GEN7_L3CNTLREG1,
  4796. GEN7_WA_FOR_GEN7_L3_CONTROL);
  4797. I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
  4798. GEN7_WA_L3_CHICKEN_MODE);
  4799. if (IS_IVB_GT1(dev))
  4800. I915_WRITE(GEN7_ROW_CHICKEN2,
  4801. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4802. else {
  4803. /* must write both registers */
  4804. I915_WRITE(GEN7_ROW_CHICKEN2,
  4805. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4806. I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
  4807. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4808. }
  4809. /* WaForceL3Serialization:ivb */
  4810. I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
  4811. ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
  4812. /*
  4813. * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
  4814. * This implements the WaDisableRCZUnitClockGating:ivb workaround.
  4815. */
  4816. I915_WRITE(GEN6_UCGCTL2,
  4817. GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  4818. /* This is required by WaCatErrorRejectionIssue:ivb */
  4819. I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  4820. I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  4821. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  4822. g4x_disable_trickle_feed(dev);
  4823. gen7_setup_fixed_func_scheduler(dev_priv);
  4824. if (0) { /* causes HiZ corruption on ivb:gt1 */
  4825. /* enable HiZ Raw Stall Optimization */
  4826. I915_WRITE(CACHE_MODE_0_GEN7,
  4827. _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
  4828. }
  4829. /* WaDisable4x2SubspanOptimization:ivb */
  4830. I915_WRITE(CACHE_MODE_1,
  4831. _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  4832. /*
  4833. * BSpec recommends 8x4 when MSAA is used,
  4834. * however in practice 16x4 seems fastest.
  4835. *
  4836. * Note that PS/WM thread counts depend on the WIZ hashing
  4837. * disable bit, which we don't touch here, but it's good
  4838. * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
  4839. */
  4840. I915_WRITE(GEN7_GT_MODE,
  4841. GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
  4842. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  4843. snpcr &= ~GEN6_MBC_SNPCR_MASK;
  4844. snpcr |= GEN6_MBC_SNPCR_MED;
  4845. I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
  4846. if (!HAS_PCH_NOP(dev))
  4847. cpt_init_clock_gating(dev);
  4848. gen6_check_mch_setup(dev);
  4849. }
  4850. static void valleyview_init_clock_gating(struct drm_device *dev)
  4851. {
  4852. struct drm_i915_private *dev_priv = dev->dev_private;
  4853. I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  4854. /* WaDisableEarlyCull:vlv */
  4855. I915_WRITE(_3D_CHICKEN3,
  4856. _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
  4857. /* WaDisableBackToBackFlipFix:vlv */
  4858. I915_WRITE(IVB_CHICKEN3,
  4859. CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
  4860. CHICKEN3_DGMG_DONE_FIX_DISABLE);
  4861. /* WaPsdDispatchEnable:vlv */
  4862. /* WaDisablePSDDualDispatchEnable:vlv */
  4863. I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
  4864. _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
  4865. GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
  4866. /* WaDisable_RenderCache_OperationalFlush:vlv */
  4867. I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4868. /* WaForceL3Serialization:vlv */
  4869. I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
  4870. ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
  4871. /* WaDisableDopClockGating:vlv */
  4872. I915_WRITE(GEN7_ROW_CHICKEN2,
  4873. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  4874. /* This is required by WaCatErrorRejectionIssue:vlv */
  4875. I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  4876. I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  4877. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  4878. gen7_setup_fixed_func_scheduler(dev_priv);
  4879. /*
  4880. * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
  4881. * This implements the WaDisableRCZUnitClockGating:vlv workaround.
  4882. */
  4883. I915_WRITE(GEN6_UCGCTL2,
  4884. GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  4885. /* WaDisableL3Bank2xClockGate:vlv
  4886. * Disabling L3 clock gating- MMIO 940c[25] = 1
  4887. * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
  4888. I915_WRITE(GEN7_UCGCTL4,
  4889. I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
  4890. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  4891. /*
  4892. * BSpec says this must be set, even though
  4893. * WaDisable4x2SubspanOptimization isn't listed for VLV.
  4894. */
  4895. I915_WRITE(CACHE_MODE_1,
  4896. _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  4897. /*
  4898. * WaIncreaseL3CreditsForVLVB0:vlv
  4899. * This is the hardware default actually.
  4900. */
  4901. I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
  4902. /*
  4903. * WaDisableVLVClockGating_VBIIssue:vlv
  4904. * Disable clock gating on th GCFG unit to prevent a delay
  4905. * in the reporting of vblank events.
  4906. */
  4907. I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
  4908. }
  4909. static void cherryview_init_clock_gating(struct drm_device *dev)
  4910. {
  4911. struct drm_i915_private *dev_priv = dev->dev_private;
  4912. I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  4913. I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  4914. /* WaVSRefCountFullforceMissDisable:chv */
  4915. /* WaDSRefCountFullforceMissDisable:chv */
  4916. I915_WRITE(GEN7_FF_THREAD_MODE,
  4917. I915_READ(GEN7_FF_THREAD_MODE) &
  4918. ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
  4919. /* WaDisableSemaphoreAndSyncFlipWait:chv */
  4920. I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
  4921. _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
  4922. /* WaDisableCSUnitClockGating:chv */
  4923. I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
  4924. GEN6_CSUNIT_CLOCK_GATE_DISABLE);
  4925. /* WaDisableSDEUnitClockGating:chv */
  4926. I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
  4927. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  4928. /* WaDisableGunitClockGating:chv (pre-production hw) */
  4929. I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
  4930. GINT_DIS);
  4931. /* WaDisableFfDopClockGating:chv (pre-production hw) */
  4932. I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
  4933. _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
  4934. /* WaDisableDopClockGating:chv (pre-production hw) */
  4935. I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
  4936. GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
  4937. }
  4938. static void g4x_init_clock_gating(struct drm_device *dev)
  4939. {
  4940. struct drm_i915_private *dev_priv = dev->dev_private;
  4941. uint32_t dspclk_gate;
  4942. I915_WRITE(RENCLK_GATE_D1, 0);
  4943. I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
  4944. GS_UNIT_CLOCK_GATE_DISABLE |
  4945. CL_UNIT_CLOCK_GATE_DISABLE);
  4946. I915_WRITE(RAMCLK_GATE_D, 0);
  4947. dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
  4948. OVRUNIT_CLOCK_GATE_DISABLE |
  4949. OVCUNIT_CLOCK_GATE_DISABLE;
  4950. if (IS_GM45(dev))
  4951. dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
  4952. I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
  4953. /* WaDisableRenderCachePipelinedFlush */
  4954. I915_WRITE(CACHE_MODE_0,
  4955. _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
  4956. /* WaDisable_RenderCache_OperationalFlush:g4x */
  4957. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4958. g4x_disable_trickle_feed(dev);
  4959. }
  4960. static void crestline_init_clock_gating(struct drm_device *dev)
  4961. {
  4962. struct drm_i915_private *dev_priv = dev->dev_private;
  4963. I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
  4964. I915_WRITE(RENCLK_GATE_D2, 0);
  4965. I915_WRITE(DSPCLK_GATE_D, 0);
  4966. I915_WRITE(RAMCLK_GATE_D, 0);
  4967. I915_WRITE16(DEUC, 0);
  4968. I915_WRITE(MI_ARB_STATE,
  4969. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  4970. /* WaDisable_RenderCache_OperationalFlush:gen4 */
  4971. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4972. }
  4973. static void broadwater_init_clock_gating(struct drm_device *dev)
  4974. {
  4975. struct drm_i915_private *dev_priv = dev->dev_private;
  4976. I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
  4977. I965_RCC_CLOCK_GATE_DISABLE |
  4978. I965_RCPB_CLOCK_GATE_DISABLE |
  4979. I965_ISC_CLOCK_GATE_DISABLE |
  4980. I965_FBC_CLOCK_GATE_DISABLE);
  4981. I915_WRITE(RENCLK_GATE_D2, 0);
  4982. I915_WRITE(MI_ARB_STATE,
  4983. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  4984. /* WaDisable_RenderCache_OperationalFlush:gen4 */
  4985. I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
  4986. }
  4987. static void gen3_init_clock_gating(struct drm_device *dev)
  4988. {
  4989. struct drm_i915_private *dev_priv = dev->dev_private;
  4990. u32 dstate = I915_READ(D_STATE);
  4991. dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
  4992. DSTATE_DOT_CLOCK_GATING;
  4993. I915_WRITE(D_STATE, dstate);
  4994. if (IS_PINEVIEW(dev))
  4995. I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
  4996. /* IIR "flip pending" means done if this bit is set */
  4997. I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
  4998. /* interrupts should cause a wake up from C3 */
  4999. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
  5000. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  5001. I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  5002. I915_WRITE(MI_ARB_STATE,
  5003. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  5004. }
  5005. static void i85x_init_clock_gating(struct drm_device *dev)
  5006. {
  5007. struct drm_i915_private *dev_priv = dev->dev_private;
  5008. I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
  5009. /* interrupts should cause a wake up from C3 */
  5010. I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
  5011. _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
  5012. I915_WRITE(MEM_MODE,
  5013. _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
  5014. }
  5015. static void i830_init_clock_gating(struct drm_device *dev)
  5016. {
  5017. struct drm_i915_private *dev_priv = dev->dev_private;
  5018. I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
  5019. I915_WRITE(MEM_MODE,
  5020. _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
  5021. _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
  5022. }
  5023. void intel_init_clock_gating(struct drm_device *dev)
  5024. {
  5025. struct drm_i915_private *dev_priv = dev->dev_private;
  5026. dev_priv->display.init_clock_gating(dev);
  5027. }
  5028. void intel_suspend_hw(struct drm_device *dev)
  5029. {
  5030. if (HAS_PCH_LPT(dev))
  5031. lpt_suspend_hw(dev);
  5032. }
  5033. static void intel_init_fbc(struct drm_i915_private *dev_priv)
  5034. {
  5035. if (!HAS_FBC(dev_priv)) {
  5036. dev_priv->fbc.enabled = false;
  5037. return;
  5038. }
  5039. if (INTEL_INFO(dev_priv)->gen >= 7) {
  5040. dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
  5041. dev_priv->display.enable_fbc = gen7_enable_fbc;
  5042. dev_priv->display.disable_fbc = ironlake_disable_fbc;
  5043. } else if (INTEL_INFO(dev_priv)->gen >= 5) {
  5044. dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
  5045. dev_priv->display.enable_fbc = ironlake_enable_fbc;
  5046. dev_priv->display.disable_fbc = ironlake_disable_fbc;
  5047. } else if (IS_GM45(dev_priv)) {
  5048. dev_priv->display.fbc_enabled = g4x_fbc_enabled;
  5049. dev_priv->display.enable_fbc = g4x_enable_fbc;
  5050. dev_priv->display.disable_fbc = g4x_disable_fbc;
  5051. } else {
  5052. dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
  5053. dev_priv->display.enable_fbc = i8xx_enable_fbc;
  5054. dev_priv->display.disable_fbc = i8xx_disable_fbc;
  5055. /* This value was pulled out of someone's hat */
  5056. I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
  5057. }
  5058. dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
  5059. }
  5060. /* Set up chip specific power management-related functions */
  5061. void intel_init_pm(struct drm_device *dev)
  5062. {
  5063. struct drm_i915_private *dev_priv = dev->dev_private;
  5064. intel_init_fbc(dev_priv);
  5065. /* For cxsr */
  5066. if (IS_PINEVIEW(dev))
  5067. i915_pineview_get_mem_freq(dev);
  5068. else if (IS_GEN5(dev))
  5069. i915_ironlake_get_mem_freq(dev);
  5070. /* For FIFO watermark updates */
  5071. if (IS_GEN9(dev)) {
  5072. dev_priv->display.init_clock_gating = gen9_init_clock_gating;
  5073. } else if (HAS_PCH_SPLIT(dev)) {
  5074. ilk_setup_wm_latency(dev);
  5075. if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
  5076. dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
  5077. (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
  5078. dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
  5079. dev_priv->display.update_wm = ilk_update_wm;
  5080. dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
  5081. } else {
  5082. DRM_DEBUG_KMS("Failed to read display plane latency. "
  5083. "Disable CxSR\n");
  5084. }
  5085. if (IS_GEN5(dev))
  5086. dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
  5087. else if (IS_GEN6(dev))
  5088. dev_priv->display.init_clock_gating = gen6_init_clock_gating;
  5089. else if (IS_IVYBRIDGE(dev))
  5090. dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
  5091. else if (IS_HASWELL(dev))
  5092. dev_priv->display.init_clock_gating = haswell_init_clock_gating;
  5093. else if (INTEL_INFO(dev)->gen == 8)
  5094. dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
  5095. } else if (IS_CHERRYVIEW(dev)) {
  5096. dev_priv->display.update_wm = cherryview_update_wm;
  5097. dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
  5098. dev_priv->display.init_clock_gating =
  5099. cherryview_init_clock_gating;
  5100. } else if (IS_VALLEYVIEW(dev)) {
  5101. dev_priv->display.update_wm = valleyview_update_wm;
  5102. dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
  5103. dev_priv->display.init_clock_gating =
  5104. valleyview_init_clock_gating;
  5105. } else if (IS_PINEVIEW(dev)) {
  5106. if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
  5107. dev_priv->is_ddr3,
  5108. dev_priv->fsb_freq,
  5109. dev_priv->mem_freq)) {
  5110. DRM_INFO("failed to find known CxSR latency "
  5111. "(found ddr%s fsb freq %d, mem freq %d), "
  5112. "disabling CxSR\n",
  5113. (dev_priv->is_ddr3 == 1) ? "3" : "2",
  5114. dev_priv->fsb_freq, dev_priv->mem_freq);
  5115. /* Disable CxSR and never update its watermark again */
  5116. intel_set_memory_cxsr(dev_priv, false);
  5117. dev_priv->display.update_wm = NULL;
  5118. } else
  5119. dev_priv->display.update_wm = pineview_update_wm;
  5120. dev_priv->display.init_clock_gating = gen3_init_clock_gating;
  5121. } else if (IS_G4X(dev)) {
  5122. dev_priv->display.update_wm = g4x_update_wm;
  5123. dev_priv->display.init_clock_gating = g4x_init_clock_gating;
  5124. } else if (IS_GEN4(dev)) {
  5125. dev_priv->display.update_wm = i965_update_wm;
  5126. if (IS_CRESTLINE(dev))
  5127. dev_priv->display.init_clock_gating = crestline_init_clock_gating;
  5128. else if (IS_BROADWATER(dev))
  5129. dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
  5130. } else if (IS_GEN3(dev)) {
  5131. dev_priv->display.update_wm = i9xx_update_wm;
  5132. dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
  5133. dev_priv->display.init_clock_gating = gen3_init_clock_gating;
  5134. } else if (IS_GEN2(dev)) {
  5135. if (INTEL_INFO(dev)->num_pipes == 1) {
  5136. dev_priv->display.update_wm = i845_update_wm;
  5137. dev_priv->display.get_fifo_size = i845_get_fifo_size;
  5138. } else {
  5139. dev_priv->display.update_wm = i9xx_update_wm;
  5140. dev_priv->display.get_fifo_size = i830_get_fifo_size;
  5141. }
  5142. if (IS_I85X(dev) || IS_I865G(dev))
  5143. dev_priv->display.init_clock_gating = i85x_init_clock_gating;
  5144. else
  5145. dev_priv->display.init_clock_gating = i830_init_clock_gating;
  5146. } else {
  5147. DRM_ERROR("unexpected fall-through in intel_init_pm\n");
  5148. }
  5149. }
  5150. int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
  5151. {
  5152. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  5153. if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
  5154. DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
  5155. return -EAGAIN;
  5156. }
  5157. I915_WRITE(GEN6_PCODE_DATA, *val);
  5158. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
  5159. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  5160. 500)) {
  5161. DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
  5162. return -ETIMEDOUT;
  5163. }
  5164. *val = I915_READ(GEN6_PCODE_DATA);
  5165. I915_WRITE(GEN6_PCODE_DATA, 0);
  5166. return 0;
  5167. }
  5168. int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
  5169. {
  5170. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  5171. if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
  5172. DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
  5173. return -EAGAIN;
  5174. }
  5175. I915_WRITE(GEN6_PCODE_DATA, val);
  5176. I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
  5177. if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
  5178. 500)) {
  5179. DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
  5180. return -ETIMEDOUT;
  5181. }
  5182. I915_WRITE(GEN6_PCODE_DATA, 0);
  5183. return 0;
  5184. }
  5185. static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
  5186. {
  5187. int div;
  5188. /* 4 x czclk */
  5189. switch (dev_priv->mem_freq) {
  5190. case 800:
  5191. div = 10;
  5192. break;
  5193. case 1066:
  5194. div = 12;
  5195. break;
  5196. case 1333:
  5197. div = 16;
  5198. break;
  5199. default:
  5200. return -1;
  5201. }
  5202. return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
  5203. }
  5204. static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
  5205. {
  5206. int mul;
  5207. /* 4 x czclk */
  5208. switch (dev_priv->mem_freq) {
  5209. case 800:
  5210. mul = 10;
  5211. break;
  5212. case 1066:
  5213. mul = 12;
  5214. break;
  5215. case 1333:
  5216. mul = 16;
  5217. break;
  5218. default:
  5219. return -1;
  5220. }
  5221. return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
  5222. }
  5223. static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
  5224. {
  5225. int div, freq;
  5226. switch (dev_priv->rps.cz_freq) {
  5227. case 200:
  5228. div = 5;
  5229. break;
  5230. case 267:
  5231. div = 6;
  5232. break;
  5233. case 320:
  5234. case 333:
  5235. case 400:
  5236. div = 8;
  5237. break;
  5238. default:
  5239. return -1;
  5240. }
  5241. freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
  5242. return freq;
  5243. }
  5244. static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
  5245. {
  5246. int mul, opcode;
  5247. switch (dev_priv->rps.cz_freq) {
  5248. case 200:
  5249. mul = 5;
  5250. break;
  5251. case 267:
  5252. mul = 6;
  5253. break;
  5254. case 320:
  5255. case 333:
  5256. case 400:
  5257. mul = 8;
  5258. break;
  5259. default:
  5260. return -1;
  5261. }
  5262. /* CHV needs even values */
  5263. opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
  5264. return opcode;
  5265. }
  5266. int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
  5267. {
  5268. int ret = -1;
  5269. if (IS_CHERRYVIEW(dev_priv->dev))
  5270. ret = chv_gpu_freq(dev_priv, val);
  5271. else if (IS_VALLEYVIEW(dev_priv->dev))
  5272. ret = byt_gpu_freq(dev_priv, val);
  5273. return ret;
  5274. }
  5275. int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
  5276. {
  5277. int ret = -1;
  5278. if (IS_CHERRYVIEW(dev_priv->dev))
  5279. ret = chv_freq_opcode(dev_priv, val);
  5280. else if (IS_VALLEYVIEW(dev_priv->dev))
  5281. ret = byt_freq_opcode(dev_priv, val);
  5282. return ret;
  5283. }
  5284. void intel_pm_setup(struct drm_device *dev)
  5285. {
  5286. struct drm_i915_private *dev_priv = dev->dev_private;
  5287. mutex_init(&dev_priv->rps.hw_lock);
  5288. INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
  5289. intel_gen6_powersave_work);
  5290. dev_priv->pm.suspended = false;
  5291. }