intel_display.c 486 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389
  1. /*
  2. * Copyright © 2006-2007 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. */
  26. #include <linux/dmi.h>
  27. #include <linux/module.h>
  28. #include <linux/input.h>
  29. #include <linux/i2c.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/vgaarb.h>
  33. #include <drm/drm_edid.h>
  34. #include <drm/drmP.h>
  35. #include "intel_drv.h"
  36. #include "intel_frontbuffer.h"
  37. #include <drm/i915_drm.h>
  38. #include "i915_drv.h"
  39. #include "intel_dsi.h"
  40. #include "i915_trace.h"
  41. #include <drm/drm_atomic.h>
  42. #include <drm/drm_atomic_helper.h>
  43. #include <drm/drm_dp_helper.h>
  44. #include <drm/drm_crtc_helper.h>
  45. #include <drm/drm_plane_helper.h>
  46. #include <drm/drm_rect.h>
  47. #include <linux/dma_remapping.h>
  48. #include <linux/reservation.h>
  49. static bool is_mmio_work(struct intel_flip_work *work)
  50. {
  51. return work->mmio_work.func;
  52. }
  53. /* Primary plane formats for gen <= 3 */
  54. static const uint32_t i8xx_primary_formats[] = {
  55. DRM_FORMAT_C8,
  56. DRM_FORMAT_RGB565,
  57. DRM_FORMAT_XRGB1555,
  58. DRM_FORMAT_XRGB8888,
  59. };
  60. /* Primary plane formats for gen >= 4 */
  61. static const uint32_t i965_primary_formats[] = {
  62. DRM_FORMAT_C8,
  63. DRM_FORMAT_RGB565,
  64. DRM_FORMAT_XRGB8888,
  65. DRM_FORMAT_XBGR8888,
  66. DRM_FORMAT_XRGB2101010,
  67. DRM_FORMAT_XBGR2101010,
  68. };
  69. static const uint32_t skl_primary_formats[] = {
  70. DRM_FORMAT_C8,
  71. DRM_FORMAT_RGB565,
  72. DRM_FORMAT_XRGB8888,
  73. DRM_FORMAT_XBGR8888,
  74. DRM_FORMAT_ARGB8888,
  75. DRM_FORMAT_ABGR8888,
  76. DRM_FORMAT_XRGB2101010,
  77. DRM_FORMAT_XBGR2101010,
  78. DRM_FORMAT_YUYV,
  79. DRM_FORMAT_YVYU,
  80. DRM_FORMAT_UYVY,
  81. DRM_FORMAT_VYUY,
  82. };
  83. /* Cursor formats */
  84. static const uint32_t intel_cursor_formats[] = {
  85. DRM_FORMAT_ARGB8888,
  86. };
  87. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  88. struct intel_crtc_state *pipe_config);
  89. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  90. struct intel_crtc_state *pipe_config);
  91. static int intel_framebuffer_init(struct drm_device *dev,
  92. struct intel_framebuffer *ifb,
  93. struct drm_mode_fb_cmd2 *mode_cmd,
  94. struct drm_i915_gem_object *obj);
  95. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  96. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  97. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
  98. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  99. struct intel_link_m_n *m_n,
  100. struct intel_link_m_n *m2_n2);
  101. static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  102. static void haswell_set_pipeconf(struct drm_crtc *crtc);
  103. static void haswell_set_pipemisc(struct drm_crtc *crtc);
  104. static void vlv_prepare_pll(struct intel_crtc *crtc,
  105. const struct intel_crtc_state *pipe_config);
  106. static void chv_prepare_pll(struct intel_crtc *crtc,
  107. const struct intel_crtc_state *pipe_config);
  108. static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  109. static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  110. static void skl_init_scalers(struct drm_i915_private *dev_priv,
  111. struct intel_crtc *crtc,
  112. struct intel_crtc_state *crtc_state);
  113. static void skylake_pfit_enable(struct intel_crtc *crtc);
  114. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
  115. static void ironlake_pfit_enable(struct intel_crtc *crtc);
  116. static void intel_modeset_setup_hw_state(struct drm_device *dev);
  117. static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
  118. static int ilk_max_pixel_rate(struct drm_atomic_state *state);
  119. static int bxt_calc_cdclk(int max_pixclk);
  120. struct intel_limit {
  121. struct {
  122. int min, max;
  123. } dot, vco, n, m, m1, m2, p, p1;
  124. struct {
  125. int dot_limit;
  126. int p2_slow, p2_fast;
  127. } p2;
  128. };
  129. /* returns HPLL frequency in kHz */
  130. static int valleyview_get_vco(struct drm_i915_private *dev_priv)
  131. {
  132. int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
  133. /* Obtain SKU information */
  134. mutex_lock(&dev_priv->sb_lock);
  135. hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
  136. CCK_FUSE_HPLL_FREQ_MASK;
  137. mutex_unlock(&dev_priv->sb_lock);
  138. return vco_freq[hpll_freq] * 1000;
  139. }
  140. int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
  141. const char *name, u32 reg, int ref_freq)
  142. {
  143. u32 val;
  144. int divider;
  145. mutex_lock(&dev_priv->sb_lock);
  146. val = vlv_cck_read(dev_priv, reg);
  147. mutex_unlock(&dev_priv->sb_lock);
  148. divider = val & CCK_FREQUENCY_VALUES;
  149. WARN((val & CCK_FREQUENCY_STATUS) !=
  150. (divider << CCK_FREQUENCY_STATUS_SHIFT),
  151. "%s change in progress\n", name);
  152. return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  153. }
  154. static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
  155. const char *name, u32 reg)
  156. {
  157. if (dev_priv->hpll_freq == 0)
  158. dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
  159. return vlv_get_cck_clock(dev_priv, name, reg,
  160. dev_priv->hpll_freq);
  161. }
  162. static int
  163. intel_pch_rawclk(struct drm_i915_private *dev_priv)
  164. {
  165. return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
  166. }
  167. static int
  168. intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
  169. {
  170. /* RAWCLK_FREQ_VLV register updated from power well code */
  171. return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
  172. CCK_DISPLAY_REF_CLOCK_CONTROL);
  173. }
  174. static int
  175. intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
  176. {
  177. uint32_t clkcfg;
  178. /* hrawclock is 1/4 the FSB frequency */
  179. clkcfg = I915_READ(CLKCFG);
  180. switch (clkcfg & CLKCFG_FSB_MASK) {
  181. case CLKCFG_FSB_400:
  182. return 100000;
  183. case CLKCFG_FSB_533:
  184. return 133333;
  185. case CLKCFG_FSB_667:
  186. return 166667;
  187. case CLKCFG_FSB_800:
  188. return 200000;
  189. case CLKCFG_FSB_1067:
  190. return 266667;
  191. case CLKCFG_FSB_1333:
  192. return 333333;
  193. /* these two are just a guess; one of them might be right */
  194. case CLKCFG_FSB_1600:
  195. case CLKCFG_FSB_1600_ALT:
  196. return 400000;
  197. default:
  198. return 133333;
  199. }
  200. }
  201. void intel_update_rawclk(struct drm_i915_private *dev_priv)
  202. {
  203. if (HAS_PCH_SPLIT(dev_priv))
  204. dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
  205. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  206. dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
  207. else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
  208. dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
  209. else
  210. return; /* no rawclk on other platforms, or no need to know it */
  211. DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
  212. }
  213. static void intel_update_czclk(struct drm_i915_private *dev_priv)
  214. {
  215. if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
  216. return;
  217. dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
  218. CCK_CZ_CLOCK_CONTROL);
  219. DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
  220. }
  221. static inline u32 /* units of 100MHz */
  222. intel_fdi_link_freq(struct drm_i915_private *dev_priv,
  223. const struct intel_crtc_state *pipe_config)
  224. {
  225. if (HAS_DDI(dev_priv))
  226. return pipe_config->port_clock; /* SPLL */
  227. else if (IS_GEN5(dev_priv))
  228. return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
  229. else
  230. return 270000;
  231. }
  232. static const struct intel_limit intel_limits_i8xx_dac = {
  233. .dot = { .min = 25000, .max = 350000 },
  234. .vco = { .min = 908000, .max = 1512000 },
  235. .n = { .min = 2, .max = 16 },
  236. .m = { .min = 96, .max = 140 },
  237. .m1 = { .min = 18, .max = 26 },
  238. .m2 = { .min = 6, .max = 16 },
  239. .p = { .min = 4, .max = 128 },
  240. .p1 = { .min = 2, .max = 33 },
  241. .p2 = { .dot_limit = 165000,
  242. .p2_slow = 4, .p2_fast = 2 },
  243. };
  244. static const struct intel_limit intel_limits_i8xx_dvo = {
  245. .dot = { .min = 25000, .max = 350000 },
  246. .vco = { .min = 908000, .max = 1512000 },
  247. .n = { .min = 2, .max = 16 },
  248. .m = { .min = 96, .max = 140 },
  249. .m1 = { .min = 18, .max = 26 },
  250. .m2 = { .min = 6, .max = 16 },
  251. .p = { .min = 4, .max = 128 },
  252. .p1 = { .min = 2, .max = 33 },
  253. .p2 = { .dot_limit = 165000,
  254. .p2_slow = 4, .p2_fast = 4 },
  255. };
  256. static const struct intel_limit intel_limits_i8xx_lvds = {
  257. .dot = { .min = 25000, .max = 350000 },
  258. .vco = { .min = 908000, .max = 1512000 },
  259. .n = { .min = 2, .max = 16 },
  260. .m = { .min = 96, .max = 140 },
  261. .m1 = { .min = 18, .max = 26 },
  262. .m2 = { .min = 6, .max = 16 },
  263. .p = { .min = 4, .max = 128 },
  264. .p1 = { .min = 1, .max = 6 },
  265. .p2 = { .dot_limit = 165000,
  266. .p2_slow = 14, .p2_fast = 7 },
  267. };
  268. static const struct intel_limit intel_limits_i9xx_sdvo = {
  269. .dot = { .min = 20000, .max = 400000 },
  270. .vco = { .min = 1400000, .max = 2800000 },
  271. .n = { .min = 1, .max = 6 },
  272. .m = { .min = 70, .max = 120 },
  273. .m1 = { .min = 8, .max = 18 },
  274. .m2 = { .min = 3, .max = 7 },
  275. .p = { .min = 5, .max = 80 },
  276. .p1 = { .min = 1, .max = 8 },
  277. .p2 = { .dot_limit = 200000,
  278. .p2_slow = 10, .p2_fast = 5 },
  279. };
  280. static const struct intel_limit intel_limits_i9xx_lvds = {
  281. .dot = { .min = 20000, .max = 400000 },
  282. .vco = { .min = 1400000, .max = 2800000 },
  283. .n = { .min = 1, .max = 6 },
  284. .m = { .min = 70, .max = 120 },
  285. .m1 = { .min = 8, .max = 18 },
  286. .m2 = { .min = 3, .max = 7 },
  287. .p = { .min = 7, .max = 98 },
  288. .p1 = { .min = 1, .max = 8 },
  289. .p2 = { .dot_limit = 112000,
  290. .p2_slow = 14, .p2_fast = 7 },
  291. };
  292. static const struct intel_limit intel_limits_g4x_sdvo = {
  293. .dot = { .min = 25000, .max = 270000 },
  294. .vco = { .min = 1750000, .max = 3500000},
  295. .n = { .min = 1, .max = 4 },
  296. .m = { .min = 104, .max = 138 },
  297. .m1 = { .min = 17, .max = 23 },
  298. .m2 = { .min = 5, .max = 11 },
  299. .p = { .min = 10, .max = 30 },
  300. .p1 = { .min = 1, .max = 3},
  301. .p2 = { .dot_limit = 270000,
  302. .p2_slow = 10,
  303. .p2_fast = 10
  304. },
  305. };
  306. static const struct intel_limit intel_limits_g4x_hdmi = {
  307. .dot = { .min = 22000, .max = 400000 },
  308. .vco = { .min = 1750000, .max = 3500000},
  309. .n = { .min = 1, .max = 4 },
  310. .m = { .min = 104, .max = 138 },
  311. .m1 = { .min = 16, .max = 23 },
  312. .m2 = { .min = 5, .max = 11 },
  313. .p = { .min = 5, .max = 80 },
  314. .p1 = { .min = 1, .max = 8},
  315. .p2 = { .dot_limit = 165000,
  316. .p2_slow = 10, .p2_fast = 5 },
  317. };
  318. static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
  319. .dot = { .min = 20000, .max = 115000 },
  320. .vco = { .min = 1750000, .max = 3500000 },
  321. .n = { .min = 1, .max = 3 },
  322. .m = { .min = 104, .max = 138 },
  323. .m1 = { .min = 17, .max = 23 },
  324. .m2 = { .min = 5, .max = 11 },
  325. .p = { .min = 28, .max = 112 },
  326. .p1 = { .min = 2, .max = 8 },
  327. .p2 = { .dot_limit = 0,
  328. .p2_slow = 14, .p2_fast = 14
  329. },
  330. };
  331. static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
  332. .dot = { .min = 80000, .max = 224000 },
  333. .vco = { .min = 1750000, .max = 3500000 },
  334. .n = { .min = 1, .max = 3 },
  335. .m = { .min = 104, .max = 138 },
  336. .m1 = { .min = 17, .max = 23 },
  337. .m2 = { .min = 5, .max = 11 },
  338. .p = { .min = 14, .max = 42 },
  339. .p1 = { .min = 2, .max = 6 },
  340. .p2 = { .dot_limit = 0,
  341. .p2_slow = 7, .p2_fast = 7
  342. },
  343. };
  344. static const struct intel_limit intel_limits_pineview_sdvo = {
  345. .dot = { .min = 20000, .max = 400000},
  346. .vco = { .min = 1700000, .max = 3500000 },
  347. /* Pineview's Ncounter is a ring counter */
  348. .n = { .min = 3, .max = 6 },
  349. .m = { .min = 2, .max = 256 },
  350. /* Pineview only has one combined m divider, which we treat as m2. */
  351. .m1 = { .min = 0, .max = 0 },
  352. .m2 = { .min = 0, .max = 254 },
  353. .p = { .min = 5, .max = 80 },
  354. .p1 = { .min = 1, .max = 8 },
  355. .p2 = { .dot_limit = 200000,
  356. .p2_slow = 10, .p2_fast = 5 },
  357. };
  358. static const struct intel_limit intel_limits_pineview_lvds = {
  359. .dot = { .min = 20000, .max = 400000 },
  360. .vco = { .min = 1700000, .max = 3500000 },
  361. .n = { .min = 3, .max = 6 },
  362. .m = { .min = 2, .max = 256 },
  363. .m1 = { .min = 0, .max = 0 },
  364. .m2 = { .min = 0, .max = 254 },
  365. .p = { .min = 7, .max = 112 },
  366. .p1 = { .min = 1, .max = 8 },
  367. .p2 = { .dot_limit = 112000,
  368. .p2_slow = 14, .p2_fast = 14 },
  369. };
  370. /* Ironlake / Sandybridge
  371. *
  372. * We calculate clock using (register_value + 2) for N/M1/M2, so here
  373. * the range value for them is (actual_value - 2).
  374. */
  375. static const struct intel_limit intel_limits_ironlake_dac = {
  376. .dot = { .min = 25000, .max = 350000 },
  377. .vco = { .min = 1760000, .max = 3510000 },
  378. .n = { .min = 1, .max = 5 },
  379. .m = { .min = 79, .max = 127 },
  380. .m1 = { .min = 12, .max = 22 },
  381. .m2 = { .min = 5, .max = 9 },
  382. .p = { .min = 5, .max = 80 },
  383. .p1 = { .min = 1, .max = 8 },
  384. .p2 = { .dot_limit = 225000,
  385. .p2_slow = 10, .p2_fast = 5 },
  386. };
  387. static const struct intel_limit intel_limits_ironlake_single_lvds = {
  388. .dot = { .min = 25000, .max = 350000 },
  389. .vco = { .min = 1760000, .max = 3510000 },
  390. .n = { .min = 1, .max = 3 },
  391. .m = { .min = 79, .max = 118 },
  392. .m1 = { .min = 12, .max = 22 },
  393. .m2 = { .min = 5, .max = 9 },
  394. .p = { .min = 28, .max = 112 },
  395. .p1 = { .min = 2, .max = 8 },
  396. .p2 = { .dot_limit = 225000,
  397. .p2_slow = 14, .p2_fast = 14 },
  398. };
  399. static const struct intel_limit intel_limits_ironlake_dual_lvds = {
  400. .dot = { .min = 25000, .max = 350000 },
  401. .vco = { .min = 1760000, .max = 3510000 },
  402. .n = { .min = 1, .max = 3 },
  403. .m = { .min = 79, .max = 127 },
  404. .m1 = { .min = 12, .max = 22 },
  405. .m2 = { .min = 5, .max = 9 },
  406. .p = { .min = 14, .max = 56 },
  407. .p1 = { .min = 2, .max = 8 },
  408. .p2 = { .dot_limit = 225000,
  409. .p2_slow = 7, .p2_fast = 7 },
  410. };
  411. /* LVDS 100mhz refclk limits. */
  412. static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
  413. .dot = { .min = 25000, .max = 350000 },
  414. .vco = { .min = 1760000, .max = 3510000 },
  415. .n = { .min = 1, .max = 2 },
  416. .m = { .min = 79, .max = 126 },
  417. .m1 = { .min = 12, .max = 22 },
  418. .m2 = { .min = 5, .max = 9 },
  419. .p = { .min = 28, .max = 112 },
  420. .p1 = { .min = 2, .max = 8 },
  421. .p2 = { .dot_limit = 225000,
  422. .p2_slow = 14, .p2_fast = 14 },
  423. };
  424. static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
  425. .dot = { .min = 25000, .max = 350000 },
  426. .vco = { .min = 1760000, .max = 3510000 },
  427. .n = { .min = 1, .max = 3 },
  428. .m = { .min = 79, .max = 126 },
  429. .m1 = { .min = 12, .max = 22 },
  430. .m2 = { .min = 5, .max = 9 },
  431. .p = { .min = 14, .max = 42 },
  432. .p1 = { .min = 2, .max = 6 },
  433. .p2 = { .dot_limit = 225000,
  434. .p2_slow = 7, .p2_fast = 7 },
  435. };
  436. static const struct intel_limit intel_limits_vlv = {
  437. /*
  438. * These are the data rate limits (measured in fast clocks)
  439. * since those are the strictest limits we have. The fast
  440. * clock and actual rate limits are more relaxed, so checking
  441. * them would make no difference.
  442. */
  443. .dot = { .min = 25000 * 5, .max = 270000 * 5 },
  444. .vco = { .min = 4000000, .max = 6000000 },
  445. .n = { .min = 1, .max = 7 },
  446. .m1 = { .min = 2, .max = 3 },
  447. .m2 = { .min = 11, .max = 156 },
  448. .p1 = { .min = 2, .max = 3 },
  449. .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  450. };
  451. static const struct intel_limit intel_limits_chv = {
  452. /*
  453. * These are the data rate limits (measured in fast clocks)
  454. * since those are the strictest limits we have. The fast
  455. * clock and actual rate limits are more relaxed, so checking
  456. * them would make no difference.
  457. */
  458. .dot = { .min = 25000 * 5, .max = 540000 * 5},
  459. .vco = { .min = 4800000, .max = 6480000 },
  460. .n = { .min = 1, .max = 1 },
  461. .m1 = { .min = 2, .max = 2 },
  462. .m2 = { .min = 24 << 22, .max = 175 << 22 },
  463. .p1 = { .min = 2, .max = 4 },
  464. .p2 = { .p2_slow = 1, .p2_fast = 14 },
  465. };
  466. static const struct intel_limit intel_limits_bxt = {
  467. /* FIXME: find real dot limits */
  468. .dot = { .min = 0, .max = INT_MAX },
  469. .vco = { .min = 4800000, .max = 6700000 },
  470. .n = { .min = 1, .max = 1 },
  471. .m1 = { .min = 2, .max = 2 },
  472. /* FIXME: find real m2 limits */
  473. .m2 = { .min = 2 << 22, .max = 255 << 22 },
  474. .p1 = { .min = 2, .max = 4 },
  475. .p2 = { .p2_slow = 1, .p2_fast = 20 },
  476. };
  477. static bool
  478. needs_modeset(struct drm_crtc_state *state)
  479. {
  480. return drm_atomic_crtc_needs_modeset(state);
  481. }
  482. /*
  483. * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  484. * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
  485. * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
  486. * The helpers' return value is the rate of the clock that is fed to the
  487. * display engine's pipe which can be the above fast dot clock rate or a
  488. * divided-down version of it.
  489. */
  490. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  491. static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
  492. {
  493. clock->m = clock->m2 + 2;
  494. clock->p = clock->p1 * clock->p2;
  495. if (WARN_ON(clock->n == 0 || clock->p == 0))
  496. return 0;
  497. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  498. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  499. return clock->dot;
  500. }
  501. static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
  502. {
  503. return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  504. }
  505. static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
  506. {
  507. clock->m = i9xx_dpll_compute_m(clock);
  508. clock->p = clock->p1 * clock->p2;
  509. if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
  510. return 0;
  511. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
  512. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  513. return clock->dot;
  514. }
  515. static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
  516. {
  517. clock->m = clock->m1 * clock->m2;
  518. clock->p = clock->p1 * clock->p2;
  519. if (WARN_ON(clock->n == 0 || clock->p == 0))
  520. return 0;
  521. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  522. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  523. return clock->dot / 5;
  524. }
  525. int chv_calc_dpll_params(int refclk, struct dpll *clock)
  526. {
  527. clock->m = clock->m1 * clock->m2;
  528. clock->p = clock->p1 * clock->p2;
  529. if (WARN_ON(clock->n == 0 || clock->p == 0))
  530. return 0;
  531. clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
  532. clock->n << 22);
  533. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  534. return clock->dot / 5;
  535. }
  536. #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
  537. /**
  538. * Returns whether the given set of divisors are valid for a given refclk with
  539. * the given connectors.
  540. */
  541. static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
  542. const struct intel_limit *limit,
  543. const struct dpll *clock)
  544. {
  545. if (clock->n < limit->n.min || limit->n.max < clock->n)
  546. INTELPllInvalid("n out of range\n");
  547. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  548. INTELPllInvalid("p1 out of range\n");
  549. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  550. INTELPllInvalid("m2 out of range\n");
  551. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  552. INTELPllInvalid("m1 out of range\n");
  553. if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
  554. !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
  555. if (clock->m1 <= clock->m2)
  556. INTELPllInvalid("m1 <= m2\n");
  557. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  558. !IS_BROXTON(dev_priv)) {
  559. if (clock->p < limit->p.min || limit->p.max < clock->p)
  560. INTELPllInvalid("p out of range\n");
  561. if (clock->m < limit->m.min || limit->m.max < clock->m)
  562. INTELPllInvalid("m out of range\n");
  563. }
  564. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  565. INTELPllInvalid("vco out of range\n");
  566. /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  567. * connector, etc., rather than just a single range.
  568. */
  569. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  570. INTELPllInvalid("dot out of range\n");
  571. return true;
  572. }
  573. static int
  574. i9xx_select_p2_div(const struct intel_limit *limit,
  575. const struct intel_crtc_state *crtc_state,
  576. int target)
  577. {
  578. struct drm_device *dev = crtc_state->base.crtc->dev;
  579. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  580. /*
  581. * For LVDS just rely on its current settings for dual-channel.
  582. * We haven't figured out how to reliably set up different
  583. * single/dual channel state, if we even can.
  584. */
  585. if (intel_is_dual_link_lvds(dev))
  586. return limit->p2.p2_fast;
  587. else
  588. return limit->p2.p2_slow;
  589. } else {
  590. if (target < limit->p2.dot_limit)
  591. return limit->p2.p2_slow;
  592. else
  593. return limit->p2.p2_fast;
  594. }
  595. }
  596. /*
  597. * Returns a set of divisors for the desired target clock with the given
  598. * refclk, or FALSE. The returned values represent the clock equation:
  599. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  600. *
  601. * Target and reference clocks are specified in kHz.
  602. *
  603. * If match_clock is provided, then best_clock P divider must match the P
  604. * divider from @match_clock used for LVDS downclocking.
  605. */
  606. static bool
  607. i9xx_find_best_dpll(const struct intel_limit *limit,
  608. struct intel_crtc_state *crtc_state,
  609. int target, int refclk, struct dpll *match_clock,
  610. struct dpll *best_clock)
  611. {
  612. struct drm_device *dev = crtc_state->base.crtc->dev;
  613. struct dpll clock;
  614. int err = target;
  615. memset(best_clock, 0, sizeof(*best_clock));
  616. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  617. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  618. clock.m1++) {
  619. for (clock.m2 = limit->m2.min;
  620. clock.m2 <= limit->m2.max; clock.m2++) {
  621. if (clock.m2 >= clock.m1)
  622. break;
  623. for (clock.n = limit->n.min;
  624. clock.n <= limit->n.max; clock.n++) {
  625. for (clock.p1 = limit->p1.min;
  626. clock.p1 <= limit->p1.max; clock.p1++) {
  627. int this_err;
  628. i9xx_calc_dpll_params(refclk, &clock);
  629. if (!intel_PLL_is_valid(to_i915(dev),
  630. limit,
  631. &clock))
  632. continue;
  633. if (match_clock &&
  634. clock.p != match_clock->p)
  635. continue;
  636. this_err = abs(clock.dot - target);
  637. if (this_err < err) {
  638. *best_clock = clock;
  639. err = this_err;
  640. }
  641. }
  642. }
  643. }
  644. }
  645. return (err != target);
  646. }
  647. /*
  648. * Returns a set of divisors for the desired target clock with the given
  649. * refclk, or FALSE. The returned values represent the clock equation:
  650. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  651. *
  652. * Target and reference clocks are specified in kHz.
  653. *
  654. * If match_clock is provided, then best_clock P divider must match the P
  655. * divider from @match_clock used for LVDS downclocking.
  656. */
  657. static bool
  658. pnv_find_best_dpll(const struct intel_limit *limit,
  659. struct intel_crtc_state *crtc_state,
  660. int target, int refclk, struct dpll *match_clock,
  661. struct dpll *best_clock)
  662. {
  663. struct drm_device *dev = crtc_state->base.crtc->dev;
  664. struct dpll clock;
  665. int err = target;
  666. memset(best_clock, 0, sizeof(*best_clock));
  667. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  668. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  669. clock.m1++) {
  670. for (clock.m2 = limit->m2.min;
  671. clock.m2 <= limit->m2.max; clock.m2++) {
  672. for (clock.n = limit->n.min;
  673. clock.n <= limit->n.max; clock.n++) {
  674. for (clock.p1 = limit->p1.min;
  675. clock.p1 <= limit->p1.max; clock.p1++) {
  676. int this_err;
  677. pnv_calc_dpll_params(refclk, &clock);
  678. if (!intel_PLL_is_valid(to_i915(dev),
  679. limit,
  680. &clock))
  681. continue;
  682. if (match_clock &&
  683. clock.p != match_clock->p)
  684. continue;
  685. this_err = abs(clock.dot - target);
  686. if (this_err < err) {
  687. *best_clock = clock;
  688. err = this_err;
  689. }
  690. }
  691. }
  692. }
  693. }
  694. return (err != target);
  695. }
  696. /*
  697. * Returns a set of divisors for the desired target clock with the given
  698. * refclk, or FALSE. The returned values represent the clock equation:
  699. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  700. *
  701. * Target and reference clocks are specified in kHz.
  702. *
  703. * If match_clock is provided, then best_clock P divider must match the P
  704. * divider from @match_clock used for LVDS downclocking.
  705. */
  706. static bool
  707. g4x_find_best_dpll(const struct intel_limit *limit,
  708. struct intel_crtc_state *crtc_state,
  709. int target, int refclk, struct dpll *match_clock,
  710. struct dpll *best_clock)
  711. {
  712. struct drm_device *dev = crtc_state->base.crtc->dev;
  713. struct dpll clock;
  714. int max_n;
  715. bool found = false;
  716. /* approximately equals target * 0.00585 */
  717. int err_most = (target >> 8) + (target >> 9);
  718. memset(best_clock, 0, sizeof(*best_clock));
  719. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  720. max_n = limit->n.max;
  721. /* based on hardware requirement, prefer smaller n to precision */
  722. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  723. /* based on hardware requirement, prefere larger m1,m2 */
  724. for (clock.m1 = limit->m1.max;
  725. clock.m1 >= limit->m1.min; clock.m1--) {
  726. for (clock.m2 = limit->m2.max;
  727. clock.m2 >= limit->m2.min; clock.m2--) {
  728. for (clock.p1 = limit->p1.max;
  729. clock.p1 >= limit->p1.min; clock.p1--) {
  730. int this_err;
  731. i9xx_calc_dpll_params(refclk, &clock);
  732. if (!intel_PLL_is_valid(to_i915(dev),
  733. limit,
  734. &clock))
  735. continue;
  736. this_err = abs(clock.dot - target);
  737. if (this_err < err_most) {
  738. *best_clock = clock;
  739. err_most = this_err;
  740. max_n = clock.n;
  741. found = true;
  742. }
  743. }
  744. }
  745. }
  746. }
  747. return found;
  748. }
  749. /*
  750. * Check if the calculated PLL configuration is more optimal compared to the
  751. * best configuration and error found so far. Return the calculated error.
  752. */
  753. static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
  754. const struct dpll *calculated_clock,
  755. const struct dpll *best_clock,
  756. unsigned int best_error_ppm,
  757. unsigned int *error_ppm)
  758. {
  759. /*
  760. * For CHV ignore the error and consider only the P value.
  761. * Prefer a bigger P value based on HW requirements.
  762. */
  763. if (IS_CHERRYVIEW(to_i915(dev))) {
  764. *error_ppm = 0;
  765. return calculated_clock->p > best_clock->p;
  766. }
  767. if (WARN_ON_ONCE(!target_freq))
  768. return false;
  769. *error_ppm = div_u64(1000000ULL *
  770. abs(target_freq - calculated_clock->dot),
  771. target_freq);
  772. /*
  773. * Prefer a better P value over a better (smaller) error if the error
  774. * is small. Ensure this preference for future configurations too by
  775. * setting the error to 0.
  776. */
  777. if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
  778. *error_ppm = 0;
  779. return true;
  780. }
  781. return *error_ppm + 10 < best_error_ppm;
  782. }
  783. /*
  784. * Returns a set of divisors for the desired target clock with the given
  785. * refclk, or FALSE. The returned values represent the clock equation:
  786. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  787. */
  788. static bool
  789. vlv_find_best_dpll(const struct intel_limit *limit,
  790. struct intel_crtc_state *crtc_state,
  791. int target, int refclk, struct dpll *match_clock,
  792. struct dpll *best_clock)
  793. {
  794. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  795. struct drm_device *dev = crtc->base.dev;
  796. struct dpll clock;
  797. unsigned int bestppm = 1000000;
  798. /* min update 19.2 MHz */
  799. int max_n = min(limit->n.max, refclk / 19200);
  800. bool found = false;
  801. target *= 5; /* fast clock */
  802. memset(best_clock, 0, sizeof(*best_clock));
  803. /* based on hardware requirement, prefer smaller n to precision */
  804. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  805. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  806. for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
  807. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  808. clock.p = clock.p1 * clock.p2;
  809. /* based on hardware requirement, prefer bigger m1,m2 values */
  810. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  811. unsigned int ppm;
  812. clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
  813. refclk * clock.m1);
  814. vlv_calc_dpll_params(refclk, &clock);
  815. if (!intel_PLL_is_valid(to_i915(dev),
  816. limit,
  817. &clock))
  818. continue;
  819. if (!vlv_PLL_is_optimal(dev, target,
  820. &clock,
  821. best_clock,
  822. bestppm, &ppm))
  823. continue;
  824. *best_clock = clock;
  825. bestppm = ppm;
  826. found = true;
  827. }
  828. }
  829. }
  830. }
  831. return found;
  832. }
  833. /*
  834. * Returns a set of divisors for the desired target clock with the given
  835. * refclk, or FALSE. The returned values represent the clock equation:
  836. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  837. */
  838. static bool
  839. chv_find_best_dpll(const struct intel_limit *limit,
  840. struct intel_crtc_state *crtc_state,
  841. int target, int refclk, struct dpll *match_clock,
  842. struct dpll *best_clock)
  843. {
  844. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  845. struct drm_device *dev = crtc->base.dev;
  846. unsigned int best_error_ppm;
  847. struct dpll clock;
  848. uint64_t m2;
  849. int found = false;
  850. memset(best_clock, 0, sizeof(*best_clock));
  851. best_error_ppm = 1000000;
  852. /*
  853. * Based on hardware doc, the n always set to 1, and m1 always
  854. * set to 2. If requires to support 200Mhz refclk, we need to
  855. * revisit this because n may not 1 anymore.
  856. */
  857. clock.n = 1, clock.m1 = 2;
  858. target *= 5; /* fast clock */
  859. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  860. for (clock.p2 = limit->p2.p2_fast;
  861. clock.p2 >= limit->p2.p2_slow;
  862. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  863. unsigned int error_ppm;
  864. clock.p = clock.p1 * clock.p2;
  865. m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
  866. clock.n) << 22, refclk * clock.m1);
  867. if (m2 > INT_MAX/clock.m1)
  868. continue;
  869. clock.m2 = m2;
  870. chv_calc_dpll_params(refclk, &clock);
  871. if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
  872. continue;
  873. if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
  874. best_error_ppm, &error_ppm))
  875. continue;
  876. *best_clock = clock;
  877. best_error_ppm = error_ppm;
  878. found = true;
  879. }
  880. }
  881. return found;
  882. }
  883. bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
  884. struct dpll *best_clock)
  885. {
  886. int refclk = 100000;
  887. const struct intel_limit *limit = &intel_limits_bxt;
  888. return chv_find_best_dpll(limit, crtc_state,
  889. target_clock, refclk, NULL, best_clock);
  890. }
  891. bool intel_crtc_active(struct intel_crtc *crtc)
  892. {
  893. /* Be paranoid as we can arrive here with only partial
  894. * state retrieved from the hardware during setup.
  895. *
  896. * We can ditch the adjusted_mode.crtc_clock check as soon
  897. * as Haswell has gained clock readout/fastboot support.
  898. *
  899. * We can ditch the crtc->primary->fb check as soon as we can
  900. * properly reconstruct framebuffers.
  901. *
  902. * FIXME: The intel_crtc->active here should be switched to
  903. * crtc->state->active once we have proper CRTC states wired up
  904. * for atomic.
  905. */
  906. return crtc->active && crtc->base.primary->state->fb &&
  907. crtc->config->base.adjusted_mode.crtc_clock;
  908. }
  909. enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
  910. enum pipe pipe)
  911. {
  912. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  913. return crtc->config->cpu_transcoder;
  914. }
  915. static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
  916. {
  917. i915_reg_t reg = PIPEDSL(pipe);
  918. u32 line1, line2;
  919. u32 line_mask;
  920. if (IS_GEN2(dev_priv))
  921. line_mask = DSL_LINEMASK_GEN2;
  922. else
  923. line_mask = DSL_LINEMASK_GEN3;
  924. line1 = I915_READ(reg) & line_mask;
  925. msleep(5);
  926. line2 = I915_READ(reg) & line_mask;
  927. return line1 == line2;
  928. }
  929. /*
  930. * intel_wait_for_pipe_off - wait for pipe to turn off
  931. * @crtc: crtc whose pipe to wait for
  932. *
  933. * After disabling a pipe, we can't wait for vblank in the usual way,
  934. * spinning on the vblank interrupt status bit, since we won't actually
  935. * see an interrupt when the pipe is disabled.
  936. *
  937. * On Gen4 and above:
  938. * wait for the pipe register state bit to turn off
  939. *
  940. * Otherwise:
  941. * wait for the display line value to settle (it usually
  942. * ends up stopping at the start of the next frame).
  943. *
  944. */
  945. static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
  946. {
  947. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  948. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  949. enum pipe pipe = crtc->pipe;
  950. if (INTEL_GEN(dev_priv) >= 4) {
  951. i915_reg_t reg = PIPECONF(cpu_transcoder);
  952. /* Wait for the Pipe State to go off */
  953. if (intel_wait_for_register(dev_priv,
  954. reg, I965_PIPECONF_ACTIVE, 0,
  955. 100))
  956. WARN(1, "pipe_off wait timed out\n");
  957. } else {
  958. /* Wait for the display line to settle */
  959. if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
  960. WARN(1, "pipe_off wait timed out\n");
  961. }
  962. }
  963. /* Only for pre-ILK configs */
  964. void assert_pll(struct drm_i915_private *dev_priv,
  965. enum pipe pipe, bool state)
  966. {
  967. u32 val;
  968. bool cur_state;
  969. val = I915_READ(DPLL(pipe));
  970. cur_state = !!(val & DPLL_VCO_ENABLE);
  971. I915_STATE_WARN(cur_state != state,
  972. "PLL state assertion failure (expected %s, current %s)\n",
  973. onoff(state), onoff(cur_state));
  974. }
  975. /* XXX: the dsi pll is shared between MIPI DSI ports */
  976. void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
  977. {
  978. u32 val;
  979. bool cur_state;
  980. mutex_lock(&dev_priv->sb_lock);
  981. val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
  982. mutex_unlock(&dev_priv->sb_lock);
  983. cur_state = val & DSI_PLL_VCO_EN;
  984. I915_STATE_WARN(cur_state != state,
  985. "DSI PLL state assertion failure (expected %s, current %s)\n",
  986. onoff(state), onoff(cur_state));
  987. }
  988. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  989. enum pipe pipe, bool state)
  990. {
  991. bool cur_state;
  992. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  993. pipe);
  994. if (HAS_DDI(dev_priv)) {
  995. /* DDI does not have a specific FDI_TX register */
  996. u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  997. cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
  998. } else {
  999. u32 val = I915_READ(FDI_TX_CTL(pipe));
  1000. cur_state = !!(val & FDI_TX_ENABLE);
  1001. }
  1002. I915_STATE_WARN(cur_state != state,
  1003. "FDI TX state assertion failure (expected %s, current %s)\n",
  1004. onoff(state), onoff(cur_state));
  1005. }
  1006. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  1007. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  1008. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  1009. enum pipe pipe, bool state)
  1010. {
  1011. u32 val;
  1012. bool cur_state;
  1013. val = I915_READ(FDI_RX_CTL(pipe));
  1014. cur_state = !!(val & FDI_RX_ENABLE);
  1015. I915_STATE_WARN(cur_state != state,
  1016. "FDI RX state assertion failure (expected %s, current %s)\n",
  1017. onoff(state), onoff(cur_state));
  1018. }
  1019. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  1020. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  1021. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  1022. enum pipe pipe)
  1023. {
  1024. u32 val;
  1025. /* ILK FDI PLL is always enabled */
  1026. if (IS_GEN5(dev_priv))
  1027. return;
  1028. /* On Haswell, DDI ports are responsible for the FDI PLL setup */
  1029. if (HAS_DDI(dev_priv))
  1030. return;
  1031. val = I915_READ(FDI_TX_CTL(pipe));
  1032. I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  1033. }
  1034. void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
  1035. enum pipe pipe, bool state)
  1036. {
  1037. u32 val;
  1038. bool cur_state;
  1039. val = I915_READ(FDI_RX_CTL(pipe));
  1040. cur_state = !!(val & FDI_RX_PLL_ENABLE);
  1041. I915_STATE_WARN(cur_state != state,
  1042. "FDI RX PLL assertion failure (expected %s, current %s)\n",
  1043. onoff(state), onoff(cur_state));
  1044. }
  1045. void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
  1046. {
  1047. i915_reg_t pp_reg;
  1048. u32 val;
  1049. enum pipe panel_pipe = PIPE_A;
  1050. bool locked = true;
  1051. if (WARN_ON(HAS_DDI(dev_priv)))
  1052. return;
  1053. if (HAS_PCH_SPLIT(dev_priv)) {
  1054. u32 port_sel;
  1055. pp_reg = PP_CONTROL(0);
  1056. port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
  1057. if (port_sel == PANEL_PORT_SELECT_LVDS &&
  1058. I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
  1059. panel_pipe = PIPE_B;
  1060. /* XXX: else fix for eDP */
  1061. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1062. /* presumably write lock depends on pipe, not port select */
  1063. pp_reg = PP_CONTROL(pipe);
  1064. panel_pipe = pipe;
  1065. } else {
  1066. pp_reg = PP_CONTROL(0);
  1067. if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
  1068. panel_pipe = PIPE_B;
  1069. }
  1070. val = I915_READ(pp_reg);
  1071. if (!(val & PANEL_POWER_ON) ||
  1072. ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
  1073. locked = false;
  1074. I915_STATE_WARN(panel_pipe == pipe && locked,
  1075. "panel assertion failure, pipe %c regs locked\n",
  1076. pipe_name(pipe));
  1077. }
  1078. static void assert_cursor(struct drm_i915_private *dev_priv,
  1079. enum pipe pipe, bool state)
  1080. {
  1081. bool cur_state;
  1082. if (IS_845G(dev_priv) || IS_I865G(dev_priv))
  1083. cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
  1084. else
  1085. cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  1086. I915_STATE_WARN(cur_state != state,
  1087. "cursor on pipe %c assertion failure (expected %s, current %s)\n",
  1088. pipe_name(pipe), onoff(state), onoff(cur_state));
  1089. }
  1090. #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
  1091. #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
  1092. void assert_pipe(struct drm_i915_private *dev_priv,
  1093. enum pipe pipe, bool state)
  1094. {
  1095. bool cur_state;
  1096. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1097. pipe);
  1098. enum intel_display_power_domain power_domain;
  1099. /* if we need the pipe quirk it must be always on */
  1100. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1101. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1102. state = true;
  1103. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  1104. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  1105. u32 val = I915_READ(PIPECONF(cpu_transcoder));
  1106. cur_state = !!(val & PIPECONF_ENABLE);
  1107. intel_display_power_put(dev_priv, power_domain);
  1108. } else {
  1109. cur_state = false;
  1110. }
  1111. I915_STATE_WARN(cur_state != state,
  1112. "pipe %c assertion failure (expected %s, current %s)\n",
  1113. pipe_name(pipe), onoff(state), onoff(cur_state));
  1114. }
  1115. static void assert_plane(struct drm_i915_private *dev_priv,
  1116. enum plane plane, bool state)
  1117. {
  1118. u32 val;
  1119. bool cur_state;
  1120. val = I915_READ(DSPCNTR(plane));
  1121. cur_state = !!(val & DISPLAY_PLANE_ENABLE);
  1122. I915_STATE_WARN(cur_state != state,
  1123. "plane %c assertion failure (expected %s, current %s)\n",
  1124. plane_name(plane), onoff(state), onoff(cur_state));
  1125. }
  1126. #define assert_plane_enabled(d, p) assert_plane(d, p, true)
  1127. #define assert_plane_disabled(d, p) assert_plane(d, p, false)
  1128. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  1129. enum pipe pipe)
  1130. {
  1131. int i;
  1132. /* Primary planes are fixed to pipes on gen4+ */
  1133. if (INTEL_GEN(dev_priv) >= 4) {
  1134. u32 val = I915_READ(DSPCNTR(pipe));
  1135. I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
  1136. "plane %c assertion failure, should be disabled but not\n",
  1137. plane_name(pipe));
  1138. return;
  1139. }
  1140. /* Need to check both planes against the pipe */
  1141. for_each_pipe(dev_priv, i) {
  1142. u32 val = I915_READ(DSPCNTR(i));
  1143. enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  1144. DISPPLANE_SEL_PIPE_SHIFT;
  1145. I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  1146. "plane %c assertion failure, should be off on pipe %c but is still active\n",
  1147. plane_name(i), pipe_name(pipe));
  1148. }
  1149. }
  1150. static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
  1151. enum pipe pipe)
  1152. {
  1153. int sprite;
  1154. if (INTEL_GEN(dev_priv) >= 9) {
  1155. for_each_sprite(dev_priv, pipe, sprite) {
  1156. u32 val = I915_READ(PLANE_CTL(pipe, sprite));
  1157. I915_STATE_WARN(val & PLANE_CTL_ENABLE,
  1158. "plane %d assertion failure, should be off on pipe %c but is still active\n",
  1159. sprite, pipe_name(pipe));
  1160. }
  1161. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1162. for_each_sprite(dev_priv, pipe, sprite) {
  1163. u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
  1164. I915_STATE_WARN(val & SP_ENABLE,
  1165. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1166. sprite_name(pipe, sprite), pipe_name(pipe));
  1167. }
  1168. } else if (INTEL_GEN(dev_priv) >= 7) {
  1169. u32 val = I915_READ(SPRCTL(pipe));
  1170. I915_STATE_WARN(val & SPRITE_ENABLE,
  1171. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1172. plane_name(pipe), pipe_name(pipe));
  1173. } else if (INTEL_GEN(dev_priv) >= 5) {
  1174. u32 val = I915_READ(DVSCNTR(pipe));
  1175. I915_STATE_WARN(val & DVS_ENABLE,
  1176. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1177. plane_name(pipe), pipe_name(pipe));
  1178. }
  1179. }
  1180. static void assert_vblank_disabled(struct drm_crtc *crtc)
  1181. {
  1182. if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
  1183. drm_crtc_vblank_put(crtc);
  1184. }
  1185. void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
  1186. enum pipe pipe)
  1187. {
  1188. u32 val;
  1189. bool enabled;
  1190. val = I915_READ(PCH_TRANSCONF(pipe));
  1191. enabled = !!(val & TRANS_ENABLE);
  1192. I915_STATE_WARN(enabled,
  1193. "transcoder assertion failed, should be off on pipe %c but is still active\n",
  1194. pipe_name(pipe));
  1195. }
  1196. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  1197. enum pipe pipe, u32 port_sel, u32 val)
  1198. {
  1199. if ((val & DP_PORT_EN) == 0)
  1200. return false;
  1201. if (HAS_PCH_CPT(dev_priv)) {
  1202. u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
  1203. if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  1204. return false;
  1205. } else if (IS_CHERRYVIEW(dev_priv)) {
  1206. if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
  1207. return false;
  1208. } else {
  1209. if ((val & DP_PIPE_MASK) != (pipe << 30))
  1210. return false;
  1211. }
  1212. return true;
  1213. }
  1214. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  1215. enum pipe pipe, u32 val)
  1216. {
  1217. if ((val & SDVO_ENABLE) == 0)
  1218. return false;
  1219. if (HAS_PCH_CPT(dev_priv)) {
  1220. if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
  1221. return false;
  1222. } else if (IS_CHERRYVIEW(dev_priv)) {
  1223. if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
  1224. return false;
  1225. } else {
  1226. if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
  1227. return false;
  1228. }
  1229. return true;
  1230. }
  1231. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  1232. enum pipe pipe, u32 val)
  1233. {
  1234. if ((val & LVDS_PORT_EN) == 0)
  1235. return false;
  1236. if (HAS_PCH_CPT(dev_priv)) {
  1237. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1238. return false;
  1239. } else {
  1240. if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  1241. return false;
  1242. }
  1243. return true;
  1244. }
  1245. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  1246. enum pipe pipe, u32 val)
  1247. {
  1248. if ((val & ADPA_DAC_ENABLE) == 0)
  1249. return false;
  1250. if (HAS_PCH_CPT(dev_priv)) {
  1251. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1252. return false;
  1253. } else {
  1254. if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  1255. return false;
  1256. }
  1257. return true;
  1258. }
  1259. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  1260. enum pipe pipe, i915_reg_t reg,
  1261. u32 port_sel)
  1262. {
  1263. u32 val = I915_READ(reg);
  1264. I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  1265. "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1266. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1267. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
  1268. && (val & DP_PIPEB_SELECT),
  1269. "IBX PCH dp port still using transcoder B\n");
  1270. }
  1271. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  1272. enum pipe pipe, i915_reg_t reg)
  1273. {
  1274. u32 val = I915_READ(reg);
  1275. I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
  1276. "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
  1277. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1278. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
  1279. && (val & SDVO_PIPE_B_SELECT),
  1280. "IBX PCH hdmi port still using transcoder B\n");
  1281. }
  1282. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  1283. enum pipe pipe)
  1284. {
  1285. u32 val;
  1286. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1287. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1288. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1289. val = I915_READ(PCH_ADPA);
  1290. I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
  1291. "PCH VGA enabled on transcoder %c, should be disabled\n",
  1292. pipe_name(pipe));
  1293. val = I915_READ(PCH_LVDS);
  1294. I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
  1295. "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1296. pipe_name(pipe));
  1297. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
  1298. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
  1299. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
  1300. }
  1301. static void _vlv_enable_pll(struct intel_crtc *crtc,
  1302. const struct intel_crtc_state *pipe_config)
  1303. {
  1304. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1305. enum pipe pipe = crtc->pipe;
  1306. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1307. POSTING_READ(DPLL(pipe));
  1308. udelay(150);
  1309. if (intel_wait_for_register(dev_priv,
  1310. DPLL(pipe),
  1311. DPLL_LOCK_VLV,
  1312. DPLL_LOCK_VLV,
  1313. 1))
  1314. DRM_ERROR("DPLL %d failed to lock\n", pipe);
  1315. }
  1316. static void vlv_enable_pll(struct intel_crtc *crtc,
  1317. const struct intel_crtc_state *pipe_config)
  1318. {
  1319. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1320. enum pipe pipe = crtc->pipe;
  1321. assert_pipe_disabled(dev_priv, pipe);
  1322. /* PLL is protected by panel, make sure we can write it */
  1323. assert_panel_unlocked(dev_priv, pipe);
  1324. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1325. _vlv_enable_pll(crtc, pipe_config);
  1326. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1327. POSTING_READ(DPLL_MD(pipe));
  1328. }
  1329. static void _chv_enable_pll(struct intel_crtc *crtc,
  1330. const struct intel_crtc_state *pipe_config)
  1331. {
  1332. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1333. enum pipe pipe = crtc->pipe;
  1334. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1335. u32 tmp;
  1336. mutex_lock(&dev_priv->sb_lock);
  1337. /* Enable back the 10bit clock to display controller */
  1338. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1339. tmp |= DPIO_DCLKP_EN;
  1340. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
  1341. mutex_unlock(&dev_priv->sb_lock);
  1342. /*
  1343. * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
  1344. */
  1345. udelay(1);
  1346. /* Enable PLL */
  1347. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1348. /* Check PLL is locked */
  1349. if (intel_wait_for_register(dev_priv,
  1350. DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
  1351. 1))
  1352. DRM_ERROR("PLL %d failed to lock\n", pipe);
  1353. }
  1354. static void chv_enable_pll(struct intel_crtc *crtc,
  1355. const struct intel_crtc_state *pipe_config)
  1356. {
  1357. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1358. enum pipe pipe = crtc->pipe;
  1359. assert_pipe_disabled(dev_priv, pipe);
  1360. /* PLL is protected by panel, make sure we can write it */
  1361. assert_panel_unlocked(dev_priv, pipe);
  1362. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1363. _chv_enable_pll(crtc, pipe_config);
  1364. if (pipe != PIPE_A) {
  1365. /*
  1366. * WaPixelRepeatModeFixForC0:chv
  1367. *
  1368. * DPLLCMD is AWOL. Use chicken bits to propagate
  1369. * the value from DPLLBMD to either pipe B or C.
  1370. */
  1371. I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
  1372. I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
  1373. I915_WRITE(CBR4_VLV, 0);
  1374. dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
  1375. /*
  1376. * DPLLB VGA mode also seems to cause problems.
  1377. * We should always have it disabled.
  1378. */
  1379. WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
  1380. } else {
  1381. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1382. POSTING_READ(DPLL_MD(pipe));
  1383. }
  1384. }
  1385. static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
  1386. {
  1387. struct intel_crtc *crtc;
  1388. int count = 0;
  1389. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1390. count += crtc->base.state->active &&
  1391. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
  1392. }
  1393. return count;
  1394. }
  1395. static void i9xx_enable_pll(struct intel_crtc *crtc)
  1396. {
  1397. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1398. i915_reg_t reg = DPLL(crtc->pipe);
  1399. u32 dpll = crtc->config->dpll_hw_state.dpll;
  1400. assert_pipe_disabled(dev_priv, crtc->pipe);
  1401. /* PLL is protected by panel, make sure we can write it */
  1402. if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
  1403. assert_panel_unlocked(dev_priv, crtc->pipe);
  1404. /* Enable DVO 2x clock on both PLLs if necessary */
  1405. if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
  1406. /*
  1407. * It appears to be important that we don't enable this
  1408. * for the current pipe before otherwise configuring the
  1409. * PLL. No idea how this should be handled if multiple
  1410. * DVO outputs are enabled simultaneosly.
  1411. */
  1412. dpll |= DPLL_DVO_2X_MODE;
  1413. I915_WRITE(DPLL(!crtc->pipe),
  1414. I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
  1415. }
  1416. /*
  1417. * Apparently we need to have VGA mode enabled prior to changing
  1418. * the P1/P2 dividers. Otherwise the DPLL will keep using the old
  1419. * dividers, even though the register value does change.
  1420. */
  1421. I915_WRITE(reg, 0);
  1422. I915_WRITE(reg, dpll);
  1423. /* Wait for the clocks to stabilize. */
  1424. POSTING_READ(reg);
  1425. udelay(150);
  1426. if (INTEL_GEN(dev_priv) >= 4) {
  1427. I915_WRITE(DPLL_MD(crtc->pipe),
  1428. crtc->config->dpll_hw_state.dpll_md);
  1429. } else {
  1430. /* The pixel multiplier can only be updated once the
  1431. * DPLL is enabled and the clocks are stable.
  1432. *
  1433. * So write it again.
  1434. */
  1435. I915_WRITE(reg, dpll);
  1436. }
  1437. /* We do this three times for luck */
  1438. I915_WRITE(reg, dpll);
  1439. POSTING_READ(reg);
  1440. udelay(150); /* wait for warmup */
  1441. I915_WRITE(reg, dpll);
  1442. POSTING_READ(reg);
  1443. udelay(150); /* wait for warmup */
  1444. I915_WRITE(reg, dpll);
  1445. POSTING_READ(reg);
  1446. udelay(150); /* wait for warmup */
  1447. }
  1448. /**
  1449. * i9xx_disable_pll - disable a PLL
  1450. * @dev_priv: i915 private structure
  1451. * @pipe: pipe PLL to disable
  1452. *
  1453. * Disable the PLL for @pipe, making sure the pipe is off first.
  1454. *
  1455. * Note! This is for pre-ILK only.
  1456. */
  1457. static void i9xx_disable_pll(struct intel_crtc *crtc)
  1458. {
  1459. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1460. enum pipe pipe = crtc->pipe;
  1461. /* Disable DVO 2x clock on both PLLs if necessary */
  1462. if (IS_I830(dev_priv) &&
  1463. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
  1464. !intel_num_dvo_pipes(dev_priv)) {
  1465. I915_WRITE(DPLL(PIPE_B),
  1466. I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
  1467. I915_WRITE(DPLL(PIPE_A),
  1468. I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
  1469. }
  1470. /* Don't disable pipe or pipe PLLs if needed */
  1471. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1472. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1473. return;
  1474. /* Make sure the pipe isn't still relying on us */
  1475. assert_pipe_disabled(dev_priv, pipe);
  1476. I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
  1477. POSTING_READ(DPLL(pipe));
  1478. }
  1479. static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1480. {
  1481. u32 val;
  1482. /* Make sure the pipe isn't still relying on us */
  1483. assert_pipe_disabled(dev_priv, pipe);
  1484. val = DPLL_INTEGRATED_REF_CLK_VLV |
  1485. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1486. if (pipe != PIPE_A)
  1487. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1488. I915_WRITE(DPLL(pipe), val);
  1489. POSTING_READ(DPLL(pipe));
  1490. }
  1491. static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1492. {
  1493. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1494. u32 val;
  1495. /* Make sure the pipe isn't still relying on us */
  1496. assert_pipe_disabled(dev_priv, pipe);
  1497. val = DPLL_SSC_REF_CLK_CHV |
  1498. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1499. if (pipe != PIPE_A)
  1500. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1501. I915_WRITE(DPLL(pipe), val);
  1502. POSTING_READ(DPLL(pipe));
  1503. mutex_lock(&dev_priv->sb_lock);
  1504. /* Disable 10bit clock to display controller */
  1505. val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1506. val &= ~DPIO_DCLKP_EN;
  1507. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
  1508. mutex_unlock(&dev_priv->sb_lock);
  1509. }
  1510. void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  1511. struct intel_digital_port *dport,
  1512. unsigned int expected_mask)
  1513. {
  1514. u32 port_mask;
  1515. i915_reg_t dpll_reg;
  1516. switch (dport->port) {
  1517. case PORT_B:
  1518. port_mask = DPLL_PORTB_READY_MASK;
  1519. dpll_reg = DPLL(0);
  1520. break;
  1521. case PORT_C:
  1522. port_mask = DPLL_PORTC_READY_MASK;
  1523. dpll_reg = DPLL(0);
  1524. expected_mask <<= 4;
  1525. break;
  1526. case PORT_D:
  1527. port_mask = DPLL_PORTD_READY_MASK;
  1528. dpll_reg = DPIO_PHY_STATUS;
  1529. break;
  1530. default:
  1531. BUG();
  1532. }
  1533. if (intel_wait_for_register(dev_priv,
  1534. dpll_reg, port_mask, expected_mask,
  1535. 1000))
  1536. WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
  1537. port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
  1538. }
  1539. static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1540. enum pipe pipe)
  1541. {
  1542. struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
  1543. pipe);
  1544. i915_reg_t reg;
  1545. uint32_t val, pipeconf_val;
  1546. /* Make sure PCH DPLL is enabled */
  1547. assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
  1548. /* FDI must be feeding us bits for PCH ports */
  1549. assert_fdi_tx_enabled(dev_priv, pipe);
  1550. assert_fdi_rx_enabled(dev_priv, pipe);
  1551. if (HAS_PCH_CPT(dev_priv)) {
  1552. /* Workaround: Set the timing override bit before enabling the
  1553. * pch transcoder. */
  1554. reg = TRANS_CHICKEN2(pipe);
  1555. val = I915_READ(reg);
  1556. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1557. I915_WRITE(reg, val);
  1558. }
  1559. reg = PCH_TRANSCONF(pipe);
  1560. val = I915_READ(reg);
  1561. pipeconf_val = I915_READ(PIPECONF(pipe));
  1562. if (HAS_PCH_IBX(dev_priv)) {
  1563. /*
  1564. * Make the BPC in transcoder be consistent with
  1565. * that in pipeconf reg. For HDMI we must use 8bpc
  1566. * here for both 8bpc and 12bpc.
  1567. */
  1568. val &= ~PIPECONF_BPC_MASK;
  1569. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
  1570. val |= PIPECONF_8BPC;
  1571. else
  1572. val |= pipeconf_val & PIPECONF_BPC_MASK;
  1573. }
  1574. val &= ~TRANS_INTERLACE_MASK;
  1575. if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
  1576. if (HAS_PCH_IBX(dev_priv) &&
  1577. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  1578. val |= TRANS_LEGACY_INTERLACED_ILK;
  1579. else
  1580. val |= TRANS_INTERLACED;
  1581. else
  1582. val |= TRANS_PROGRESSIVE;
  1583. I915_WRITE(reg, val | TRANS_ENABLE);
  1584. if (intel_wait_for_register(dev_priv,
  1585. reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
  1586. 100))
  1587. DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
  1588. }
  1589. static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1590. enum transcoder cpu_transcoder)
  1591. {
  1592. u32 val, pipeconf_val;
  1593. /* FDI must be feeding us bits for PCH ports */
  1594. assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
  1595. assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
  1596. /* Workaround: set timing override bit. */
  1597. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1598. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1599. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1600. val = TRANS_ENABLE;
  1601. pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
  1602. if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
  1603. PIPECONF_INTERLACED_ILK)
  1604. val |= TRANS_INTERLACED;
  1605. else
  1606. val |= TRANS_PROGRESSIVE;
  1607. I915_WRITE(LPT_TRANSCONF, val);
  1608. if (intel_wait_for_register(dev_priv,
  1609. LPT_TRANSCONF,
  1610. TRANS_STATE_ENABLE,
  1611. TRANS_STATE_ENABLE,
  1612. 100))
  1613. DRM_ERROR("Failed to enable PCH transcoder\n");
  1614. }
  1615. static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
  1616. enum pipe pipe)
  1617. {
  1618. i915_reg_t reg;
  1619. uint32_t val;
  1620. /* FDI relies on the transcoder */
  1621. assert_fdi_tx_disabled(dev_priv, pipe);
  1622. assert_fdi_rx_disabled(dev_priv, pipe);
  1623. /* Ports must be off as well */
  1624. assert_pch_ports_disabled(dev_priv, pipe);
  1625. reg = PCH_TRANSCONF(pipe);
  1626. val = I915_READ(reg);
  1627. val &= ~TRANS_ENABLE;
  1628. I915_WRITE(reg, val);
  1629. /* wait for PCH transcoder off, transcoder state */
  1630. if (intel_wait_for_register(dev_priv,
  1631. reg, TRANS_STATE_ENABLE, 0,
  1632. 50))
  1633. DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
  1634. if (HAS_PCH_CPT(dev_priv)) {
  1635. /* Workaround: Clear the timing override chicken bit again. */
  1636. reg = TRANS_CHICKEN2(pipe);
  1637. val = I915_READ(reg);
  1638. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1639. I915_WRITE(reg, val);
  1640. }
  1641. }
  1642. void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  1643. {
  1644. u32 val;
  1645. val = I915_READ(LPT_TRANSCONF);
  1646. val &= ~TRANS_ENABLE;
  1647. I915_WRITE(LPT_TRANSCONF, val);
  1648. /* wait for PCH transcoder off, transcoder state */
  1649. if (intel_wait_for_register(dev_priv,
  1650. LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
  1651. 50))
  1652. DRM_ERROR("Failed to disable PCH transcoder\n");
  1653. /* Workaround: clear timing override bit. */
  1654. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1655. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1656. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1657. }
  1658. enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
  1659. {
  1660. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1661. WARN_ON(!crtc->config->has_pch_encoder);
  1662. if (HAS_PCH_LPT(dev_priv))
  1663. return TRANSCODER_A;
  1664. else
  1665. return (enum transcoder) crtc->pipe;
  1666. }
  1667. /**
  1668. * intel_enable_pipe - enable a pipe, asserting requirements
  1669. * @crtc: crtc responsible for the pipe
  1670. *
  1671. * Enable @crtc's pipe, making sure that various hardware specific requirements
  1672. * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  1673. */
  1674. static void intel_enable_pipe(struct intel_crtc *crtc)
  1675. {
  1676. struct drm_device *dev = crtc->base.dev;
  1677. struct drm_i915_private *dev_priv = to_i915(dev);
  1678. enum pipe pipe = crtc->pipe;
  1679. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1680. i915_reg_t reg;
  1681. u32 val;
  1682. DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
  1683. assert_planes_disabled(dev_priv, pipe);
  1684. assert_cursor_disabled(dev_priv, pipe);
  1685. assert_sprites_disabled(dev_priv, pipe);
  1686. /*
  1687. * A pipe without a PLL won't actually be able to drive bits from
  1688. * a plane. On ILK+ the pipe PLLs are integrated, so we don't
  1689. * need the check.
  1690. */
  1691. if (HAS_GMCH_DISPLAY(dev_priv)) {
  1692. if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
  1693. assert_dsi_pll_enabled(dev_priv);
  1694. else
  1695. assert_pll_enabled(dev_priv, pipe);
  1696. } else {
  1697. if (crtc->config->has_pch_encoder) {
  1698. /* if driving the PCH, we need FDI enabled */
  1699. assert_fdi_rx_pll_enabled(dev_priv,
  1700. (enum pipe) intel_crtc_pch_transcoder(crtc));
  1701. assert_fdi_tx_pll_enabled(dev_priv,
  1702. (enum pipe) cpu_transcoder);
  1703. }
  1704. /* FIXME: assert CPU port conditions for SNB+ */
  1705. }
  1706. reg = PIPECONF(cpu_transcoder);
  1707. val = I915_READ(reg);
  1708. if (val & PIPECONF_ENABLE) {
  1709. WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1710. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
  1711. return;
  1712. }
  1713. I915_WRITE(reg, val | PIPECONF_ENABLE);
  1714. POSTING_READ(reg);
  1715. /*
  1716. * Until the pipe starts DSL will read as 0, which would cause
  1717. * an apparent vblank timestamp jump, which messes up also the
  1718. * frame count when it's derived from the timestamps. So let's
  1719. * wait for the pipe to start properly before we call
  1720. * drm_crtc_vblank_on()
  1721. */
  1722. if (dev->max_vblank_count == 0 &&
  1723. wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
  1724. DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
  1725. }
  1726. /**
  1727. * intel_disable_pipe - disable a pipe, asserting requirements
  1728. * @crtc: crtc whose pipes is to be disabled
  1729. *
  1730. * Disable the pipe of @crtc, making sure that various hardware
  1731. * specific requirements are met, if applicable, e.g. plane
  1732. * disabled, panel fitter off, etc.
  1733. *
  1734. * Will wait until the pipe has shut down before returning.
  1735. */
  1736. static void intel_disable_pipe(struct intel_crtc *crtc)
  1737. {
  1738. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1739. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1740. enum pipe pipe = crtc->pipe;
  1741. i915_reg_t reg;
  1742. u32 val;
  1743. DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
  1744. /*
  1745. * Make sure planes won't keep trying to pump pixels to us,
  1746. * or we might hang the display.
  1747. */
  1748. assert_planes_disabled(dev_priv, pipe);
  1749. assert_cursor_disabled(dev_priv, pipe);
  1750. assert_sprites_disabled(dev_priv, pipe);
  1751. reg = PIPECONF(cpu_transcoder);
  1752. val = I915_READ(reg);
  1753. if ((val & PIPECONF_ENABLE) == 0)
  1754. return;
  1755. /*
  1756. * Double wide has implications for planes
  1757. * so best keep it disabled when not needed.
  1758. */
  1759. if (crtc->config->double_wide)
  1760. val &= ~PIPECONF_DOUBLE_WIDE;
  1761. /* Don't disable pipe or pipe PLLs if needed */
  1762. if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
  1763. !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1764. val &= ~PIPECONF_ENABLE;
  1765. I915_WRITE(reg, val);
  1766. if ((val & PIPECONF_ENABLE) == 0)
  1767. intel_wait_for_pipe_off(crtc);
  1768. }
  1769. static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
  1770. {
  1771. return IS_GEN2(dev_priv) ? 2048 : 4096;
  1772. }
  1773. static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
  1774. uint64_t fb_modifier, unsigned int cpp)
  1775. {
  1776. switch (fb_modifier) {
  1777. case DRM_FORMAT_MOD_NONE:
  1778. return cpp;
  1779. case I915_FORMAT_MOD_X_TILED:
  1780. if (IS_GEN2(dev_priv))
  1781. return 128;
  1782. else
  1783. return 512;
  1784. case I915_FORMAT_MOD_Y_TILED:
  1785. if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
  1786. return 128;
  1787. else
  1788. return 512;
  1789. case I915_FORMAT_MOD_Yf_TILED:
  1790. switch (cpp) {
  1791. case 1:
  1792. return 64;
  1793. case 2:
  1794. case 4:
  1795. return 128;
  1796. case 8:
  1797. case 16:
  1798. return 256;
  1799. default:
  1800. MISSING_CASE(cpp);
  1801. return cpp;
  1802. }
  1803. break;
  1804. default:
  1805. MISSING_CASE(fb_modifier);
  1806. return cpp;
  1807. }
  1808. }
  1809. unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
  1810. uint64_t fb_modifier, unsigned int cpp)
  1811. {
  1812. if (fb_modifier == DRM_FORMAT_MOD_NONE)
  1813. return 1;
  1814. else
  1815. return intel_tile_size(dev_priv) /
  1816. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1817. }
  1818. /* Return the tile dimensions in pixel units */
  1819. static void intel_tile_dims(const struct drm_i915_private *dev_priv,
  1820. unsigned int *tile_width,
  1821. unsigned int *tile_height,
  1822. uint64_t fb_modifier,
  1823. unsigned int cpp)
  1824. {
  1825. unsigned int tile_width_bytes =
  1826. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1827. *tile_width = tile_width_bytes / cpp;
  1828. *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
  1829. }
  1830. unsigned int
  1831. intel_fb_align_height(struct drm_device *dev, unsigned int height,
  1832. uint32_t pixel_format, uint64_t fb_modifier)
  1833. {
  1834. unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
  1835. unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
  1836. return ALIGN(height, tile_height);
  1837. }
  1838. unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
  1839. {
  1840. unsigned int size = 0;
  1841. int i;
  1842. for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
  1843. size += rot_info->plane[i].width * rot_info->plane[i].height;
  1844. return size;
  1845. }
  1846. static void
  1847. intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
  1848. const struct drm_framebuffer *fb,
  1849. unsigned int rotation)
  1850. {
  1851. if (drm_rotation_90_or_270(rotation)) {
  1852. *view = i915_ggtt_view_rotated;
  1853. view->params.rotated = to_intel_framebuffer(fb)->rot_info;
  1854. } else {
  1855. *view = i915_ggtt_view_normal;
  1856. }
  1857. }
  1858. static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
  1859. {
  1860. if (INTEL_INFO(dev_priv)->gen >= 9)
  1861. return 256 * 1024;
  1862. else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
  1863. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1864. return 128 * 1024;
  1865. else if (INTEL_INFO(dev_priv)->gen >= 4)
  1866. return 4 * 1024;
  1867. else
  1868. return 0;
  1869. }
  1870. static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
  1871. uint64_t fb_modifier)
  1872. {
  1873. switch (fb_modifier) {
  1874. case DRM_FORMAT_MOD_NONE:
  1875. return intel_linear_alignment(dev_priv);
  1876. case I915_FORMAT_MOD_X_TILED:
  1877. if (INTEL_INFO(dev_priv)->gen >= 9)
  1878. return 256 * 1024;
  1879. return 0;
  1880. case I915_FORMAT_MOD_Y_TILED:
  1881. case I915_FORMAT_MOD_Yf_TILED:
  1882. return 1 * 1024 * 1024;
  1883. default:
  1884. MISSING_CASE(fb_modifier);
  1885. return 0;
  1886. }
  1887. }
  1888. struct i915_vma *
  1889. intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1890. {
  1891. struct drm_device *dev = fb->dev;
  1892. struct drm_i915_private *dev_priv = to_i915(dev);
  1893. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1894. struct i915_ggtt_view view;
  1895. struct i915_vma *vma;
  1896. u32 alignment;
  1897. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1898. alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
  1899. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1900. /* Note that the w/a also requires 64 PTE of padding following the
  1901. * bo. We currently fill all unused PTE with the shadow page and so
  1902. * we should always have valid PTE following the scanout preventing
  1903. * the VT-d warning.
  1904. */
  1905. if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
  1906. alignment = 256 * 1024;
  1907. /*
  1908. * Global gtt pte registers are special registers which actually forward
  1909. * writes to a chunk of system memory. Which means that there is no risk
  1910. * that the register values disappear as soon as we call
  1911. * intel_runtime_pm_put(), so it is correct to wrap only the
  1912. * pin/unpin/fence and not more.
  1913. */
  1914. intel_runtime_pm_get(dev_priv);
  1915. vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
  1916. if (IS_ERR(vma))
  1917. goto err;
  1918. if (i915_vma_is_map_and_fenceable(vma)) {
  1919. /* Install a fence for tiled scan-out. Pre-i965 always needs a
  1920. * fence, whereas 965+ only requires a fence if using
  1921. * framebuffer compression. For simplicity, we always, when
  1922. * possible, install a fence as the cost is not that onerous.
  1923. *
  1924. * If we fail to fence the tiled scanout, then either the
  1925. * modeset will reject the change (which is highly unlikely as
  1926. * the affected systems, all but one, do not have unmappable
  1927. * space) or we will not be able to enable full powersaving
  1928. * techniques (also likely not to apply due to various limits
  1929. * FBC and the like impose on the size of the buffer, which
  1930. * presumably we violated anyway with this unmappable buffer).
  1931. * Anyway, it is presumably better to stumble onwards with
  1932. * something and try to run the system in a "less than optimal"
  1933. * mode that matches the user configuration.
  1934. */
  1935. if (i915_vma_get_fence(vma) == 0)
  1936. i915_vma_pin_fence(vma);
  1937. }
  1938. err:
  1939. intel_runtime_pm_put(dev_priv);
  1940. return vma;
  1941. }
  1942. void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1943. {
  1944. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1945. struct i915_ggtt_view view;
  1946. struct i915_vma *vma;
  1947. WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
  1948. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1949. vma = i915_gem_object_to_ggtt(obj, &view);
  1950. i915_vma_unpin_fence(vma);
  1951. i915_gem_object_unpin_from_display_plane(vma);
  1952. }
  1953. static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
  1954. unsigned int rotation)
  1955. {
  1956. if (drm_rotation_90_or_270(rotation))
  1957. return to_intel_framebuffer(fb)->rotated[plane].pitch;
  1958. else
  1959. return fb->pitches[plane];
  1960. }
  1961. /*
  1962. * Convert the x/y offsets into a linear offset.
  1963. * Only valid with 0/180 degree rotation, which is fine since linear
  1964. * offset is only used with linear buffers on pre-hsw and tiled buffers
  1965. * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
  1966. */
  1967. u32 intel_fb_xy_to_linear(int x, int y,
  1968. const struct intel_plane_state *state,
  1969. int plane)
  1970. {
  1971. const struct drm_framebuffer *fb = state->base.fb;
  1972. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  1973. unsigned int pitch = fb->pitches[plane];
  1974. return y * pitch + x * cpp;
  1975. }
  1976. /*
  1977. * Add the x/y offsets derived from fb->offsets[] to the user
  1978. * specified plane src x/y offsets. The resulting x/y offsets
  1979. * specify the start of scanout from the beginning of the gtt mapping.
  1980. */
  1981. void intel_add_fb_offsets(int *x, int *y,
  1982. const struct intel_plane_state *state,
  1983. int plane)
  1984. {
  1985. const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
  1986. unsigned int rotation = state->base.rotation;
  1987. if (drm_rotation_90_or_270(rotation)) {
  1988. *x += intel_fb->rotated[plane].x;
  1989. *y += intel_fb->rotated[plane].y;
  1990. } else {
  1991. *x += intel_fb->normal[plane].x;
  1992. *y += intel_fb->normal[plane].y;
  1993. }
  1994. }
  1995. /*
  1996. * Input tile dimensions and pitch must already be
  1997. * rotated to match x and y, and in pixel units.
  1998. */
  1999. static u32 _intel_adjust_tile_offset(int *x, int *y,
  2000. unsigned int tile_width,
  2001. unsigned int tile_height,
  2002. unsigned int tile_size,
  2003. unsigned int pitch_tiles,
  2004. u32 old_offset,
  2005. u32 new_offset)
  2006. {
  2007. unsigned int pitch_pixels = pitch_tiles * tile_width;
  2008. unsigned int tiles;
  2009. WARN_ON(old_offset & (tile_size - 1));
  2010. WARN_ON(new_offset & (tile_size - 1));
  2011. WARN_ON(new_offset > old_offset);
  2012. tiles = (old_offset - new_offset) / tile_size;
  2013. *y += tiles / pitch_tiles * tile_height;
  2014. *x += tiles % pitch_tiles * tile_width;
  2015. /* minimize x in case it got needlessly big */
  2016. *y += *x / pitch_pixels * tile_height;
  2017. *x %= pitch_pixels;
  2018. return new_offset;
  2019. }
  2020. /*
  2021. * Adjust the tile offset by moving the difference into
  2022. * the x/y offsets.
  2023. */
  2024. static u32 intel_adjust_tile_offset(int *x, int *y,
  2025. const struct intel_plane_state *state, int plane,
  2026. u32 old_offset, u32 new_offset)
  2027. {
  2028. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  2029. const struct drm_framebuffer *fb = state->base.fb;
  2030. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2031. unsigned int rotation = state->base.rotation;
  2032. unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
  2033. WARN_ON(new_offset > old_offset);
  2034. if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
  2035. unsigned int tile_size, tile_width, tile_height;
  2036. unsigned int pitch_tiles;
  2037. tile_size = intel_tile_size(dev_priv);
  2038. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2039. fb->modifier[plane], cpp);
  2040. if (drm_rotation_90_or_270(rotation)) {
  2041. pitch_tiles = pitch / tile_height;
  2042. swap(tile_width, tile_height);
  2043. } else {
  2044. pitch_tiles = pitch / (tile_width * cpp);
  2045. }
  2046. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2047. tile_size, pitch_tiles,
  2048. old_offset, new_offset);
  2049. } else {
  2050. old_offset += *y * pitch + *x * cpp;
  2051. *y = (old_offset - new_offset) / pitch;
  2052. *x = ((old_offset - new_offset) - *y * pitch) / cpp;
  2053. }
  2054. return new_offset;
  2055. }
  2056. /*
  2057. * Computes the linear offset to the base tile and adjusts
  2058. * x, y. bytes per pixel is assumed to be a power-of-two.
  2059. *
  2060. * In the 90/270 rotated case, x and y are assumed
  2061. * to be already rotated to match the rotated GTT view, and
  2062. * pitch is the tile_height aligned framebuffer height.
  2063. *
  2064. * This function is used when computing the derived information
  2065. * under intel_framebuffer, so using any of that information
  2066. * here is not allowed. Anything under drm_framebuffer can be
  2067. * used. This is why the user has to pass in the pitch since it
  2068. * is specified in the rotated orientation.
  2069. */
  2070. static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
  2071. int *x, int *y,
  2072. const struct drm_framebuffer *fb, int plane,
  2073. unsigned int pitch,
  2074. unsigned int rotation,
  2075. u32 alignment)
  2076. {
  2077. uint64_t fb_modifier = fb->modifier[plane];
  2078. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2079. u32 offset, offset_aligned;
  2080. if (alignment)
  2081. alignment--;
  2082. if (fb_modifier != DRM_FORMAT_MOD_NONE) {
  2083. unsigned int tile_size, tile_width, tile_height;
  2084. unsigned int tile_rows, tiles, pitch_tiles;
  2085. tile_size = intel_tile_size(dev_priv);
  2086. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2087. fb_modifier, cpp);
  2088. if (drm_rotation_90_or_270(rotation)) {
  2089. pitch_tiles = pitch / tile_height;
  2090. swap(tile_width, tile_height);
  2091. } else {
  2092. pitch_tiles = pitch / (tile_width * cpp);
  2093. }
  2094. tile_rows = *y / tile_height;
  2095. *y %= tile_height;
  2096. tiles = *x / tile_width;
  2097. *x %= tile_width;
  2098. offset = (tile_rows * pitch_tiles + tiles) * tile_size;
  2099. offset_aligned = offset & ~alignment;
  2100. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2101. tile_size, pitch_tiles,
  2102. offset, offset_aligned);
  2103. } else {
  2104. offset = *y * pitch + *x * cpp;
  2105. offset_aligned = offset & ~alignment;
  2106. *y = (offset & alignment) / pitch;
  2107. *x = ((offset & alignment) - *y * pitch) / cpp;
  2108. }
  2109. return offset_aligned;
  2110. }
  2111. u32 intel_compute_tile_offset(int *x, int *y,
  2112. const struct intel_plane_state *state,
  2113. int plane)
  2114. {
  2115. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  2116. const struct drm_framebuffer *fb = state->base.fb;
  2117. unsigned int rotation = state->base.rotation;
  2118. int pitch = intel_fb_pitch(fb, plane, rotation);
  2119. u32 alignment;
  2120. /* AUX_DIST needs only 4K alignment */
  2121. if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
  2122. alignment = 4096;
  2123. else
  2124. alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
  2125. return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
  2126. rotation, alignment);
  2127. }
  2128. /* Convert the fb->offset[] linear offset into x/y offsets */
  2129. static void intel_fb_offset_to_xy(int *x, int *y,
  2130. const struct drm_framebuffer *fb, int plane)
  2131. {
  2132. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2133. unsigned int pitch = fb->pitches[plane];
  2134. u32 linear_offset = fb->offsets[plane];
  2135. *y = linear_offset / pitch;
  2136. *x = linear_offset % pitch / cpp;
  2137. }
  2138. static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
  2139. {
  2140. switch (fb_modifier) {
  2141. case I915_FORMAT_MOD_X_TILED:
  2142. return I915_TILING_X;
  2143. case I915_FORMAT_MOD_Y_TILED:
  2144. return I915_TILING_Y;
  2145. default:
  2146. return I915_TILING_NONE;
  2147. }
  2148. }
  2149. static int
  2150. intel_fill_fb_info(struct drm_i915_private *dev_priv,
  2151. struct drm_framebuffer *fb)
  2152. {
  2153. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  2154. struct intel_rotation_info *rot_info = &intel_fb->rot_info;
  2155. u32 gtt_offset_rotated = 0;
  2156. unsigned int max_size = 0;
  2157. uint32_t format = fb->pixel_format;
  2158. int i, num_planes = drm_format_num_planes(format);
  2159. unsigned int tile_size = intel_tile_size(dev_priv);
  2160. for (i = 0; i < num_planes; i++) {
  2161. unsigned int width, height;
  2162. unsigned int cpp, size;
  2163. u32 offset;
  2164. int x, y;
  2165. cpp = drm_format_plane_cpp(format, i);
  2166. width = drm_format_plane_width(fb->width, format, i);
  2167. height = drm_format_plane_height(fb->height, format, i);
  2168. intel_fb_offset_to_xy(&x, &y, fb, i);
  2169. /*
  2170. * The fence (if used) is aligned to the start of the object
  2171. * so having the framebuffer wrap around across the edge of the
  2172. * fenced region doesn't really work. We have no API to configure
  2173. * the fence start offset within the object (nor could we probably
  2174. * on gen2/3). So it's just easier if we just require that the
  2175. * fb layout agrees with the fence layout. We already check that the
  2176. * fb stride matches the fence stride elsewhere.
  2177. */
  2178. if (i915_gem_object_is_tiled(intel_fb->obj) &&
  2179. (x + width) * cpp > fb->pitches[i]) {
  2180. DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
  2181. i, fb->offsets[i]);
  2182. return -EINVAL;
  2183. }
  2184. /*
  2185. * First pixel of the framebuffer from
  2186. * the start of the normal gtt mapping.
  2187. */
  2188. intel_fb->normal[i].x = x;
  2189. intel_fb->normal[i].y = y;
  2190. offset = _intel_compute_tile_offset(dev_priv, &x, &y,
  2191. fb, 0, fb->pitches[i],
  2192. DRM_ROTATE_0, tile_size);
  2193. offset /= tile_size;
  2194. if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
  2195. unsigned int tile_width, tile_height;
  2196. unsigned int pitch_tiles;
  2197. struct drm_rect r;
  2198. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2199. fb->modifier[i], cpp);
  2200. rot_info->plane[i].offset = offset;
  2201. rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
  2202. rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
  2203. rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
  2204. intel_fb->rotated[i].pitch =
  2205. rot_info->plane[i].height * tile_height;
  2206. /* how many tiles does this plane need */
  2207. size = rot_info->plane[i].stride * rot_info->plane[i].height;
  2208. /*
  2209. * If the plane isn't horizontally tile aligned,
  2210. * we need one more tile.
  2211. */
  2212. if (x != 0)
  2213. size++;
  2214. /* rotate the x/y offsets to match the GTT view */
  2215. r.x1 = x;
  2216. r.y1 = y;
  2217. r.x2 = x + width;
  2218. r.y2 = y + height;
  2219. drm_rect_rotate(&r,
  2220. rot_info->plane[i].width * tile_width,
  2221. rot_info->plane[i].height * tile_height,
  2222. DRM_ROTATE_270);
  2223. x = r.x1;
  2224. y = r.y1;
  2225. /* rotate the tile dimensions to match the GTT view */
  2226. pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
  2227. swap(tile_width, tile_height);
  2228. /*
  2229. * We only keep the x/y offsets, so push all of the
  2230. * gtt offset into the x/y offsets.
  2231. */
  2232. _intel_adjust_tile_offset(&x, &y, tile_size,
  2233. tile_width, tile_height, pitch_tiles,
  2234. gtt_offset_rotated * tile_size, 0);
  2235. gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
  2236. /*
  2237. * First pixel of the framebuffer from
  2238. * the start of the rotated gtt mapping.
  2239. */
  2240. intel_fb->rotated[i].x = x;
  2241. intel_fb->rotated[i].y = y;
  2242. } else {
  2243. size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
  2244. x * cpp, tile_size);
  2245. }
  2246. /* how many tiles in total needed in the bo */
  2247. max_size = max(max_size, offset + size);
  2248. }
  2249. if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
  2250. DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
  2251. max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
  2252. return -EINVAL;
  2253. }
  2254. return 0;
  2255. }
  2256. static int i9xx_format_to_fourcc(int format)
  2257. {
  2258. switch (format) {
  2259. case DISPPLANE_8BPP:
  2260. return DRM_FORMAT_C8;
  2261. case DISPPLANE_BGRX555:
  2262. return DRM_FORMAT_XRGB1555;
  2263. case DISPPLANE_BGRX565:
  2264. return DRM_FORMAT_RGB565;
  2265. default:
  2266. case DISPPLANE_BGRX888:
  2267. return DRM_FORMAT_XRGB8888;
  2268. case DISPPLANE_RGBX888:
  2269. return DRM_FORMAT_XBGR8888;
  2270. case DISPPLANE_BGRX101010:
  2271. return DRM_FORMAT_XRGB2101010;
  2272. case DISPPLANE_RGBX101010:
  2273. return DRM_FORMAT_XBGR2101010;
  2274. }
  2275. }
  2276. static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
  2277. {
  2278. switch (format) {
  2279. case PLANE_CTL_FORMAT_RGB_565:
  2280. return DRM_FORMAT_RGB565;
  2281. default:
  2282. case PLANE_CTL_FORMAT_XRGB_8888:
  2283. if (rgb_order) {
  2284. if (alpha)
  2285. return DRM_FORMAT_ABGR8888;
  2286. else
  2287. return DRM_FORMAT_XBGR8888;
  2288. } else {
  2289. if (alpha)
  2290. return DRM_FORMAT_ARGB8888;
  2291. else
  2292. return DRM_FORMAT_XRGB8888;
  2293. }
  2294. case PLANE_CTL_FORMAT_XRGB_2101010:
  2295. if (rgb_order)
  2296. return DRM_FORMAT_XBGR2101010;
  2297. else
  2298. return DRM_FORMAT_XRGB2101010;
  2299. }
  2300. }
  2301. static bool
  2302. intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  2303. struct intel_initial_plane_config *plane_config)
  2304. {
  2305. struct drm_device *dev = crtc->base.dev;
  2306. struct drm_i915_private *dev_priv = to_i915(dev);
  2307. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  2308. struct drm_i915_gem_object *obj = NULL;
  2309. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  2310. struct drm_framebuffer *fb = &plane_config->fb->base;
  2311. u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
  2312. u32 size_aligned = round_up(plane_config->base + plane_config->size,
  2313. PAGE_SIZE);
  2314. size_aligned -= base_aligned;
  2315. if (plane_config->size == 0)
  2316. return false;
  2317. /* If the FB is too big, just don't use it since fbdev is not very
  2318. * important and we should probably use that space with FBC or other
  2319. * features. */
  2320. if (size_aligned * 2 > ggtt->stolen_usable_size)
  2321. return false;
  2322. mutex_lock(&dev->struct_mutex);
  2323. obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
  2324. base_aligned,
  2325. base_aligned,
  2326. size_aligned);
  2327. if (!obj) {
  2328. mutex_unlock(&dev->struct_mutex);
  2329. return false;
  2330. }
  2331. if (plane_config->tiling == I915_TILING_X)
  2332. obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
  2333. mode_cmd.pixel_format = fb->pixel_format;
  2334. mode_cmd.width = fb->width;
  2335. mode_cmd.height = fb->height;
  2336. mode_cmd.pitches[0] = fb->pitches[0];
  2337. mode_cmd.modifier[0] = fb->modifier[0];
  2338. mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
  2339. if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
  2340. &mode_cmd, obj)) {
  2341. DRM_DEBUG_KMS("intel fb init failed\n");
  2342. goto out_unref_obj;
  2343. }
  2344. mutex_unlock(&dev->struct_mutex);
  2345. DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
  2346. return true;
  2347. out_unref_obj:
  2348. i915_gem_object_put(obj);
  2349. mutex_unlock(&dev->struct_mutex);
  2350. return false;
  2351. }
  2352. /* Update plane->state->fb to match plane->fb after driver-internal updates */
  2353. static void
  2354. update_state_fb(struct drm_plane *plane)
  2355. {
  2356. if (plane->fb == plane->state->fb)
  2357. return;
  2358. if (plane->state->fb)
  2359. drm_framebuffer_unreference(plane->state->fb);
  2360. plane->state->fb = plane->fb;
  2361. if (plane->state->fb)
  2362. drm_framebuffer_reference(plane->state->fb);
  2363. }
  2364. static void
  2365. intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
  2366. struct intel_initial_plane_config *plane_config)
  2367. {
  2368. struct drm_device *dev = intel_crtc->base.dev;
  2369. struct drm_i915_private *dev_priv = to_i915(dev);
  2370. struct drm_crtc *c;
  2371. struct intel_crtc *i;
  2372. struct drm_i915_gem_object *obj;
  2373. struct drm_plane *primary = intel_crtc->base.primary;
  2374. struct drm_plane_state *plane_state = primary->state;
  2375. struct drm_crtc_state *crtc_state = intel_crtc->base.state;
  2376. struct intel_plane *intel_plane = to_intel_plane(primary);
  2377. struct intel_plane_state *intel_state =
  2378. to_intel_plane_state(plane_state);
  2379. struct drm_framebuffer *fb;
  2380. if (!plane_config->fb)
  2381. return;
  2382. if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
  2383. fb = &plane_config->fb->base;
  2384. goto valid_fb;
  2385. }
  2386. kfree(plane_config->fb);
  2387. /*
  2388. * Failed to alloc the obj, check to see if we should share
  2389. * an fb with another CRTC instead
  2390. */
  2391. for_each_crtc(dev, c) {
  2392. i = to_intel_crtc(c);
  2393. if (c == &intel_crtc->base)
  2394. continue;
  2395. if (!i->active)
  2396. continue;
  2397. fb = c->primary->fb;
  2398. if (!fb)
  2399. continue;
  2400. obj = intel_fb_obj(fb);
  2401. if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
  2402. drm_framebuffer_reference(fb);
  2403. goto valid_fb;
  2404. }
  2405. }
  2406. /*
  2407. * We've failed to reconstruct the BIOS FB. Current display state
  2408. * indicates that the primary plane is visible, but has a NULL FB,
  2409. * which will lead to problems later if we don't fix it up. The
  2410. * simplest solution is to just disable the primary plane now and
  2411. * pretend the BIOS never had it enabled.
  2412. */
  2413. to_intel_plane_state(plane_state)->base.visible = false;
  2414. crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
  2415. intel_pre_disable_primary_noatomic(&intel_crtc->base);
  2416. intel_plane->disable_plane(primary, &intel_crtc->base);
  2417. return;
  2418. valid_fb:
  2419. plane_state->src_x = 0;
  2420. plane_state->src_y = 0;
  2421. plane_state->src_w = fb->width << 16;
  2422. plane_state->src_h = fb->height << 16;
  2423. plane_state->crtc_x = 0;
  2424. plane_state->crtc_y = 0;
  2425. plane_state->crtc_w = fb->width;
  2426. plane_state->crtc_h = fb->height;
  2427. intel_state->base.src = drm_plane_state_src(plane_state);
  2428. intel_state->base.dst = drm_plane_state_dest(plane_state);
  2429. obj = intel_fb_obj(fb);
  2430. if (i915_gem_object_is_tiled(obj))
  2431. dev_priv->preserve_bios_swizzle = true;
  2432. drm_framebuffer_reference(fb);
  2433. primary->fb = primary->state->fb = fb;
  2434. primary->crtc = primary->state->crtc = &intel_crtc->base;
  2435. intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
  2436. atomic_or(to_intel_plane(primary)->frontbuffer_bit,
  2437. &obj->frontbuffer_bits);
  2438. }
  2439. static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
  2440. unsigned int rotation)
  2441. {
  2442. int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2443. switch (fb->modifier[plane]) {
  2444. case DRM_FORMAT_MOD_NONE:
  2445. case I915_FORMAT_MOD_X_TILED:
  2446. switch (cpp) {
  2447. case 8:
  2448. return 4096;
  2449. case 4:
  2450. case 2:
  2451. case 1:
  2452. return 8192;
  2453. default:
  2454. MISSING_CASE(cpp);
  2455. break;
  2456. }
  2457. break;
  2458. case I915_FORMAT_MOD_Y_TILED:
  2459. case I915_FORMAT_MOD_Yf_TILED:
  2460. switch (cpp) {
  2461. case 8:
  2462. return 2048;
  2463. case 4:
  2464. return 4096;
  2465. case 2:
  2466. case 1:
  2467. return 8192;
  2468. default:
  2469. MISSING_CASE(cpp);
  2470. break;
  2471. }
  2472. break;
  2473. default:
  2474. MISSING_CASE(fb->modifier[plane]);
  2475. }
  2476. return 2048;
  2477. }
  2478. static int skl_check_main_surface(struct intel_plane_state *plane_state)
  2479. {
  2480. const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
  2481. const struct drm_framebuffer *fb = plane_state->base.fb;
  2482. unsigned int rotation = plane_state->base.rotation;
  2483. int x = plane_state->base.src.x1 >> 16;
  2484. int y = plane_state->base.src.y1 >> 16;
  2485. int w = drm_rect_width(&plane_state->base.src) >> 16;
  2486. int h = drm_rect_height(&plane_state->base.src) >> 16;
  2487. int max_width = skl_max_plane_width(fb, 0, rotation);
  2488. int max_height = 4096;
  2489. u32 alignment, offset, aux_offset = plane_state->aux.offset;
  2490. if (w > max_width || h > max_height) {
  2491. DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
  2492. w, h, max_width, max_height);
  2493. return -EINVAL;
  2494. }
  2495. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2496. offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
  2497. alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
  2498. /*
  2499. * AUX surface offset is specified as the distance from the
  2500. * main surface offset, and it must be non-negative. Make
  2501. * sure that is what we will get.
  2502. */
  2503. if (offset > aux_offset)
  2504. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2505. offset, aux_offset & ~(alignment - 1));
  2506. /*
  2507. * When using an X-tiled surface, the plane blows up
  2508. * if the x offset + width exceed the stride.
  2509. *
  2510. * TODO: linear and Y-tiled seem fine, Yf untested,
  2511. */
  2512. if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
  2513. int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
  2514. while ((x + w) * cpp > fb->pitches[0]) {
  2515. if (offset == 0) {
  2516. DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
  2517. return -EINVAL;
  2518. }
  2519. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2520. offset, offset - alignment);
  2521. }
  2522. }
  2523. plane_state->main.offset = offset;
  2524. plane_state->main.x = x;
  2525. plane_state->main.y = y;
  2526. return 0;
  2527. }
  2528. static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
  2529. {
  2530. const struct drm_framebuffer *fb = plane_state->base.fb;
  2531. unsigned int rotation = plane_state->base.rotation;
  2532. int max_width = skl_max_plane_width(fb, 1, rotation);
  2533. int max_height = 4096;
  2534. int x = plane_state->base.src.x1 >> 17;
  2535. int y = plane_state->base.src.y1 >> 17;
  2536. int w = drm_rect_width(&plane_state->base.src) >> 17;
  2537. int h = drm_rect_height(&plane_state->base.src) >> 17;
  2538. u32 offset;
  2539. intel_add_fb_offsets(&x, &y, plane_state, 1);
  2540. offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
  2541. /* FIXME not quite sure how/if these apply to the chroma plane */
  2542. if (w > max_width || h > max_height) {
  2543. DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
  2544. w, h, max_width, max_height);
  2545. return -EINVAL;
  2546. }
  2547. plane_state->aux.offset = offset;
  2548. plane_state->aux.x = x;
  2549. plane_state->aux.y = y;
  2550. return 0;
  2551. }
  2552. int skl_check_plane_surface(struct intel_plane_state *plane_state)
  2553. {
  2554. const struct drm_framebuffer *fb = plane_state->base.fb;
  2555. unsigned int rotation = plane_state->base.rotation;
  2556. int ret;
  2557. /* Rotate src coordinates to match rotated GTT view */
  2558. if (drm_rotation_90_or_270(rotation))
  2559. drm_rect_rotate(&plane_state->base.src,
  2560. fb->width << 16, fb->height << 16,
  2561. DRM_ROTATE_270);
  2562. /*
  2563. * Handle the AUX surface first since
  2564. * the main surface setup depends on it.
  2565. */
  2566. if (fb->pixel_format == DRM_FORMAT_NV12) {
  2567. ret = skl_check_nv12_aux_surface(plane_state);
  2568. if (ret)
  2569. return ret;
  2570. } else {
  2571. plane_state->aux.offset = ~0xfff;
  2572. plane_state->aux.x = 0;
  2573. plane_state->aux.y = 0;
  2574. }
  2575. ret = skl_check_main_surface(plane_state);
  2576. if (ret)
  2577. return ret;
  2578. return 0;
  2579. }
  2580. static void i9xx_update_primary_plane(struct drm_plane *primary,
  2581. const struct intel_crtc_state *crtc_state,
  2582. const struct intel_plane_state *plane_state)
  2583. {
  2584. struct drm_i915_private *dev_priv = to_i915(primary->dev);
  2585. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2586. struct drm_framebuffer *fb = plane_state->base.fb;
  2587. int plane = intel_crtc->plane;
  2588. u32 linear_offset;
  2589. u32 dspcntr;
  2590. i915_reg_t reg = DSPCNTR(plane);
  2591. unsigned int rotation = plane_state->base.rotation;
  2592. int x = plane_state->base.src.x1 >> 16;
  2593. int y = plane_state->base.src.y1 >> 16;
  2594. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2595. dspcntr |= DISPLAY_PLANE_ENABLE;
  2596. if (INTEL_GEN(dev_priv) < 4) {
  2597. if (intel_crtc->pipe == PIPE_B)
  2598. dspcntr |= DISPPLANE_SEL_PIPE_B;
  2599. /* pipesrc and dspsize control the size that is scaled from,
  2600. * which should always be the user's requested size.
  2601. */
  2602. I915_WRITE(DSPSIZE(plane),
  2603. ((crtc_state->pipe_src_h - 1) << 16) |
  2604. (crtc_state->pipe_src_w - 1));
  2605. I915_WRITE(DSPPOS(plane), 0);
  2606. } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
  2607. I915_WRITE(PRIMSIZE(plane),
  2608. ((crtc_state->pipe_src_h - 1) << 16) |
  2609. (crtc_state->pipe_src_w - 1));
  2610. I915_WRITE(PRIMPOS(plane), 0);
  2611. I915_WRITE(PRIMCNSTALPHA(plane), 0);
  2612. }
  2613. switch (fb->pixel_format) {
  2614. case DRM_FORMAT_C8:
  2615. dspcntr |= DISPPLANE_8BPP;
  2616. break;
  2617. case DRM_FORMAT_XRGB1555:
  2618. dspcntr |= DISPPLANE_BGRX555;
  2619. break;
  2620. case DRM_FORMAT_RGB565:
  2621. dspcntr |= DISPPLANE_BGRX565;
  2622. break;
  2623. case DRM_FORMAT_XRGB8888:
  2624. dspcntr |= DISPPLANE_BGRX888;
  2625. break;
  2626. case DRM_FORMAT_XBGR8888:
  2627. dspcntr |= DISPPLANE_RGBX888;
  2628. break;
  2629. case DRM_FORMAT_XRGB2101010:
  2630. dspcntr |= DISPPLANE_BGRX101010;
  2631. break;
  2632. case DRM_FORMAT_XBGR2101010:
  2633. dspcntr |= DISPPLANE_RGBX101010;
  2634. break;
  2635. default:
  2636. BUG();
  2637. }
  2638. if (INTEL_GEN(dev_priv) >= 4 &&
  2639. fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
  2640. dspcntr |= DISPPLANE_TILED;
  2641. if (rotation & DRM_ROTATE_180)
  2642. dspcntr |= DISPPLANE_ROTATE_180;
  2643. if (rotation & DRM_REFLECT_X)
  2644. dspcntr |= DISPPLANE_MIRROR;
  2645. if (IS_G4X(dev_priv))
  2646. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2647. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2648. if (INTEL_GEN(dev_priv) >= 4)
  2649. intel_crtc->dspaddr_offset =
  2650. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2651. if (rotation & DRM_ROTATE_180) {
  2652. x += crtc_state->pipe_src_w - 1;
  2653. y += crtc_state->pipe_src_h - 1;
  2654. } else if (rotation & DRM_REFLECT_X) {
  2655. x += crtc_state->pipe_src_w - 1;
  2656. }
  2657. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2658. if (INTEL_GEN(dev_priv) < 4)
  2659. intel_crtc->dspaddr_offset = linear_offset;
  2660. intel_crtc->adjusted_x = x;
  2661. intel_crtc->adjusted_y = y;
  2662. I915_WRITE(reg, dspcntr);
  2663. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2664. if (INTEL_GEN(dev_priv) >= 4) {
  2665. I915_WRITE(DSPSURF(plane),
  2666. intel_fb_gtt_offset(fb, rotation) +
  2667. intel_crtc->dspaddr_offset);
  2668. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2669. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2670. } else {
  2671. I915_WRITE(DSPADDR(plane),
  2672. intel_fb_gtt_offset(fb, rotation) +
  2673. intel_crtc->dspaddr_offset);
  2674. }
  2675. POSTING_READ(reg);
  2676. }
  2677. static void i9xx_disable_primary_plane(struct drm_plane *primary,
  2678. struct drm_crtc *crtc)
  2679. {
  2680. struct drm_device *dev = crtc->dev;
  2681. struct drm_i915_private *dev_priv = to_i915(dev);
  2682. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2683. int plane = intel_crtc->plane;
  2684. I915_WRITE(DSPCNTR(plane), 0);
  2685. if (INTEL_INFO(dev_priv)->gen >= 4)
  2686. I915_WRITE(DSPSURF(plane), 0);
  2687. else
  2688. I915_WRITE(DSPADDR(plane), 0);
  2689. POSTING_READ(DSPCNTR(plane));
  2690. }
  2691. static void ironlake_update_primary_plane(struct drm_plane *primary,
  2692. const struct intel_crtc_state *crtc_state,
  2693. const struct intel_plane_state *plane_state)
  2694. {
  2695. struct drm_device *dev = primary->dev;
  2696. struct drm_i915_private *dev_priv = to_i915(dev);
  2697. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2698. struct drm_framebuffer *fb = plane_state->base.fb;
  2699. int plane = intel_crtc->plane;
  2700. u32 linear_offset;
  2701. u32 dspcntr;
  2702. i915_reg_t reg = DSPCNTR(plane);
  2703. unsigned int rotation = plane_state->base.rotation;
  2704. int x = plane_state->base.src.x1 >> 16;
  2705. int y = plane_state->base.src.y1 >> 16;
  2706. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2707. dspcntr |= DISPLAY_PLANE_ENABLE;
  2708. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  2709. dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
  2710. switch (fb->pixel_format) {
  2711. case DRM_FORMAT_C8:
  2712. dspcntr |= DISPPLANE_8BPP;
  2713. break;
  2714. case DRM_FORMAT_RGB565:
  2715. dspcntr |= DISPPLANE_BGRX565;
  2716. break;
  2717. case DRM_FORMAT_XRGB8888:
  2718. dspcntr |= DISPPLANE_BGRX888;
  2719. break;
  2720. case DRM_FORMAT_XBGR8888:
  2721. dspcntr |= DISPPLANE_RGBX888;
  2722. break;
  2723. case DRM_FORMAT_XRGB2101010:
  2724. dspcntr |= DISPPLANE_BGRX101010;
  2725. break;
  2726. case DRM_FORMAT_XBGR2101010:
  2727. dspcntr |= DISPPLANE_RGBX101010;
  2728. break;
  2729. default:
  2730. BUG();
  2731. }
  2732. if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
  2733. dspcntr |= DISPPLANE_TILED;
  2734. if (rotation & DRM_ROTATE_180)
  2735. dspcntr |= DISPPLANE_ROTATE_180;
  2736. if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
  2737. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2738. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2739. intel_crtc->dspaddr_offset =
  2740. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2741. /* HSW+ does this automagically in hardware */
  2742. if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
  2743. rotation & DRM_ROTATE_180) {
  2744. x += crtc_state->pipe_src_w - 1;
  2745. y += crtc_state->pipe_src_h - 1;
  2746. }
  2747. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2748. intel_crtc->adjusted_x = x;
  2749. intel_crtc->adjusted_y = y;
  2750. I915_WRITE(reg, dspcntr);
  2751. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2752. I915_WRITE(DSPSURF(plane),
  2753. intel_fb_gtt_offset(fb, rotation) +
  2754. intel_crtc->dspaddr_offset);
  2755. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  2756. I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
  2757. } else {
  2758. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2759. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2760. }
  2761. POSTING_READ(reg);
  2762. }
  2763. u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
  2764. uint64_t fb_modifier, uint32_t pixel_format)
  2765. {
  2766. if (fb_modifier == DRM_FORMAT_MOD_NONE) {
  2767. return 64;
  2768. } else {
  2769. int cpp = drm_format_plane_cpp(pixel_format, 0);
  2770. return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  2771. }
  2772. }
  2773. u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
  2774. unsigned int rotation)
  2775. {
  2776. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  2777. struct i915_ggtt_view view;
  2778. struct i915_vma *vma;
  2779. intel_fill_fb_ggtt_view(&view, fb, rotation);
  2780. vma = i915_gem_object_to_ggtt(obj, &view);
  2781. if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
  2782. view.type))
  2783. return -1;
  2784. return i915_ggtt_offset(vma);
  2785. }
  2786. static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
  2787. {
  2788. struct drm_device *dev = intel_crtc->base.dev;
  2789. struct drm_i915_private *dev_priv = to_i915(dev);
  2790. I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
  2791. I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
  2792. I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
  2793. }
  2794. /*
  2795. * This function detaches (aka. unbinds) unused scalers in hardware
  2796. */
  2797. static void skl_detach_scalers(struct intel_crtc *intel_crtc)
  2798. {
  2799. struct intel_crtc_scaler_state *scaler_state;
  2800. int i;
  2801. scaler_state = &intel_crtc->config->scaler_state;
  2802. /* loop through and disable scalers that aren't in use */
  2803. for (i = 0; i < intel_crtc->num_scalers; i++) {
  2804. if (!scaler_state->scalers[i].in_use)
  2805. skl_detach_scaler(intel_crtc, i);
  2806. }
  2807. }
  2808. u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
  2809. unsigned int rotation)
  2810. {
  2811. const struct drm_i915_private *dev_priv = to_i915(fb->dev);
  2812. u32 stride = intel_fb_pitch(fb, plane, rotation);
  2813. /*
  2814. * The stride is either expressed as a multiple of 64 bytes chunks for
  2815. * linear buffers or in number of tiles for tiled buffers.
  2816. */
  2817. if (drm_rotation_90_or_270(rotation)) {
  2818. int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2819. stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
  2820. } else {
  2821. stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
  2822. fb->pixel_format);
  2823. }
  2824. return stride;
  2825. }
  2826. u32 skl_plane_ctl_format(uint32_t pixel_format)
  2827. {
  2828. switch (pixel_format) {
  2829. case DRM_FORMAT_C8:
  2830. return PLANE_CTL_FORMAT_INDEXED;
  2831. case DRM_FORMAT_RGB565:
  2832. return PLANE_CTL_FORMAT_RGB_565;
  2833. case DRM_FORMAT_XBGR8888:
  2834. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
  2835. case DRM_FORMAT_XRGB8888:
  2836. return PLANE_CTL_FORMAT_XRGB_8888;
  2837. /*
  2838. * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
  2839. * to be already pre-multiplied. We need to add a knob (or a different
  2840. * DRM_FORMAT) for user-space to configure that.
  2841. */
  2842. case DRM_FORMAT_ABGR8888:
  2843. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
  2844. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2845. case DRM_FORMAT_ARGB8888:
  2846. return PLANE_CTL_FORMAT_XRGB_8888 |
  2847. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2848. case DRM_FORMAT_XRGB2101010:
  2849. return PLANE_CTL_FORMAT_XRGB_2101010;
  2850. case DRM_FORMAT_XBGR2101010:
  2851. return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
  2852. case DRM_FORMAT_YUYV:
  2853. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
  2854. case DRM_FORMAT_YVYU:
  2855. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
  2856. case DRM_FORMAT_UYVY:
  2857. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
  2858. case DRM_FORMAT_VYUY:
  2859. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
  2860. default:
  2861. MISSING_CASE(pixel_format);
  2862. }
  2863. return 0;
  2864. }
  2865. u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
  2866. {
  2867. switch (fb_modifier) {
  2868. case DRM_FORMAT_MOD_NONE:
  2869. break;
  2870. case I915_FORMAT_MOD_X_TILED:
  2871. return PLANE_CTL_TILED_X;
  2872. case I915_FORMAT_MOD_Y_TILED:
  2873. return PLANE_CTL_TILED_Y;
  2874. case I915_FORMAT_MOD_Yf_TILED:
  2875. return PLANE_CTL_TILED_YF;
  2876. default:
  2877. MISSING_CASE(fb_modifier);
  2878. }
  2879. return 0;
  2880. }
  2881. u32 skl_plane_ctl_rotation(unsigned int rotation)
  2882. {
  2883. switch (rotation) {
  2884. case DRM_ROTATE_0:
  2885. break;
  2886. /*
  2887. * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
  2888. * while i915 HW rotation is clockwise, thats why this swapping.
  2889. */
  2890. case DRM_ROTATE_90:
  2891. return PLANE_CTL_ROTATE_270;
  2892. case DRM_ROTATE_180:
  2893. return PLANE_CTL_ROTATE_180;
  2894. case DRM_ROTATE_270:
  2895. return PLANE_CTL_ROTATE_90;
  2896. default:
  2897. MISSING_CASE(rotation);
  2898. }
  2899. return 0;
  2900. }
  2901. static void skylake_update_primary_plane(struct drm_plane *plane,
  2902. const struct intel_crtc_state *crtc_state,
  2903. const struct intel_plane_state *plane_state)
  2904. {
  2905. struct drm_device *dev = plane->dev;
  2906. struct drm_i915_private *dev_priv = to_i915(dev);
  2907. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2908. struct drm_framebuffer *fb = plane_state->base.fb;
  2909. enum plane_id plane_id = to_intel_plane(plane)->id;
  2910. enum pipe pipe = to_intel_plane(plane)->pipe;
  2911. u32 plane_ctl;
  2912. unsigned int rotation = plane_state->base.rotation;
  2913. u32 stride = skl_plane_stride(fb, 0, rotation);
  2914. u32 surf_addr = plane_state->main.offset;
  2915. int scaler_id = plane_state->scaler_id;
  2916. int src_x = plane_state->main.x;
  2917. int src_y = plane_state->main.y;
  2918. int src_w = drm_rect_width(&plane_state->base.src) >> 16;
  2919. int src_h = drm_rect_height(&plane_state->base.src) >> 16;
  2920. int dst_x = plane_state->base.dst.x1;
  2921. int dst_y = plane_state->base.dst.y1;
  2922. int dst_w = drm_rect_width(&plane_state->base.dst);
  2923. int dst_h = drm_rect_height(&plane_state->base.dst);
  2924. plane_ctl = PLANE_CTL_ENABLE |
  2925. PLANE_CTL_PIPE_GAMMA_ENABLE |
  2926. PLANE_CTL_PIPE_CSC_ENABLE;
  2927. plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
  2928. plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
  2929. plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
  2930. plane_ctl |= skl_plane_ctl_rotation(rotation);
  2931. /* Sizes are 0 based */
  2932. src_w--;
  2933. src_h--;
  2934. dst_w--;
  2935. dst_h--;
  2936. intel_crtc->dspaddr_offset = surf_addr;
  2937. intel_crtc->adjusted_x = src_x;
  2938. intel_crtc->adjusted_y = src_y;
  2939. I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
  2940. I915_WRITE(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
  2941. I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
  2942. I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
  2943. if (scaler_id >= 0) {
  2944. uint32_t ps_ctrl = 0;
  2945. WARN_ON(!dst_w || !dst_h);
  2946. ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
  2947. crtc_state->scaler_state.scalers[scaler_id].mode;
  2948. I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
  2949. I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
  2950. I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
  2951. I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
  2952. I915_WRITE(PLANE_POS(pipe, plane_id), 0);
  2953. } else {
  2954. I915_WRITE(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
  2955. }
  2956. I915_WRITE(PLANE_SURF(pipe, plane_id),
  2957. intel_fb_gtt_offset(fb, rotation) + surf_addr);
  2958. POSTING_READ(PLANE_SURF(pipe, plane_id));
  2959. }
  2960. static void skylake_disable_primary_plane(struct drm_plane *primary,
  2961. struct drm_crtc *crtc)
  2962. {
  2963. struct drm_device *dev = crtc->dev;
  2964. struct drm_i915_private *dev_priv = to_i915(dev);
  2965. enum plane_id plane_id = to_intel_plane(primary)->id;
  2966. enum pipe pipe = to_intel_plane(primary)->pipe;
  2967. I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
  2968. I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
  2969. POSTING_READ(PLANE_SURF(pipe, plane_id));
  2970. }
  2971. /* Assume fb object is pinned & idle & fenced and just update base pointers */
  2972. static int
  2973. intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  2974. int x, int y, enum mode_set_atomic state)
  2975. {
  2976. /* Support for kgdboc is disabled, this needs a major rework. */
  2977. DRM_ERROR("legacy panic handler not supported any more.\n");
  2978. return -ENODEV;
  2979. }
  2980. static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
  2981. {
  2982. struct intel_crtc *crtc;
  2983. for_each_intel_crtc(&dev_priv->drm, crtc)
  2984. intel_finish_page_flip_cs(dev_priv, crtc->pipe);
  2985. }
  2986. static void intel_update_primary_planes(struct drm_device *dev)
  2987. {
  2988. struct drm_crtc *crtc;
  2989. for_each_crtc(dev, crtc) {
  2990. struct intel_plane *plane = to_intel_plane(crtc->primary);
  2991. struct intel_plane_state *plane_state =
  2992. to_intel_plane_state(plane->base.state);
  2993. if (plane_state->base.visible)
  2994. plane->update_plane(&plane->base,
  2995. to_intel_crtc_state(crtc->state),
  2996. plane_state);
  2997. }
  2998. }
  2999. static int
  3000. __intel_display_resume(struct drm_device *dev,
  3001. struct drm_atomic_state *state)
  3002. {
  3003. struct drm_crtc_state *crtc_state;
  3004. struct drm_crtc *crtc;
  3005. int i, ret;
  3006. intel_modeset_setup_hw_state(dev);
  3007. i915_redisable_vga(to_i915(dev));
  3008. if (!state)
  3009. return 0;
  3010. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  3011. /*
  3012. * Force recalculation even if we restore
  3013. * current state. With fast modeset this may not result
  3014. * in a modeset when the state is compatible.
  3015. */
  3016. crtc_state->mode_changed = true;
  3017. }
  3018. /* ignore any reset values/BIOS leftovers in the WM registers */
  3019. to_intel_atomic_state(state)->skip_intermediate_wm = true;
  3020. ret = drm_atomic_commit(state);
  3021. WARN_ON(ret == -EDEADLK);
  3022. return ret;
  3023. }
  3024. static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
  3025. {
  3026. return intel_has_gpu_reset(dev_priv) &&
  3027. INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
  3028. }
  3029. void intel_prepare_reset(struct drm_i915_private *dev_priv)
  3030. {
  3031. struct drm_device *dev = &dev_priv->drm;
  3032. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3033. struct drm_atomic_state *state;
  3034. int ret;
  3035. /*
  3036. * Need mode_config.mutex so that we don't
  3037. * trample ongoing ->detect() and whatnot.
  3038. */
  3039. mutex_lock(&dev->mode_config.mutex);
  3040. drm_modeset_acquire_init(ctx, 0);
  3041. while (1) {
  3042. ret = drm_modeset_lock_all_ctx(dev, ctx);
  3043. if (ret != -EDEADLK)
  3044. break;
  3045. drm_modeset_backoff(ctx);
  3046. }
  3047. /* reset doesn't touch the display, but flips might get nuked anyway, */
  3048. if (!i915.force_reset_modeset_test &&
  3049. !gpu_reset_clobbers_display(dev_priv))
  3050. return;
  3051. /*
  3052. * Disabling the crtcs gracefully seems nicer. Also the
  3053. * g33 docs say we should at least disable all the planes.
  3054. */
  3055. state = drm_atomic_helper_duplicate_state(dev, ctx);
  3056. if (IS_ERR(state)) {
  3057. ret = PTR_ERR(state);
  3058. state = NULL;
  3059. DRM_ERROR("Duplicating state failed with %i\n", ret);
  3060. goto err;
  3061. }
  3062. ret = drm_atomic_helper_disable_all(dev, ctx);
  3063. if (ret) {
  3064. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  3065. goto err;
  3066. }
  3067. dev_priv->modeset_restore_state = state;
  3068. state->acquire_ctx = ctx;
  3069. return;
  3070. err:
  3071. drm_atomic_state_put(state);
  3072. }
  3073. void intel_finish_reset(struct drm_i915_private *dev_priv)
  3074. {
  3075. struct drm_device *dev = &dev_priv->drm;
  3076. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3077. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  3078. int ret;
  3079. /*
  3080. * Flips in the rings will be nuked by the reset,
  3081. * so complete all pending flips so that user space
  3082. * will get its events and not get stuck.
  3083. */
  3084. intel_complete_page_flips(dev_priv);
  3085. dev_priv->modeset_restore_state = NULL;
  3086. /* reset doesn't touch the display */
  3087. if (!gpu_reset_clobbers_display(dev_priv)) {
  3088. if (!state) {
  3089. /*
  3090. * Flips in the rings have been nuked by the reset,
  3091. * so update the base address of all primary
  3092. * planes to the the last fb to make sure we're
  3093. * showing the correct fb after a reset.
  3094. *
  3095. * FIXME: Atomic will make this obsolete since we won't schedule
  3096. * CS-based flips (which might get lost in gpu resets) any more.
  3097. */
  3098. intel_update_primary_planes(dev);
  3099. } else {
  3100. ret = __intel_display_resume(dev, state);
  3101. if (ret)
  3102. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3103. }
  3104. } else {
  3105. /*
  3106. * The display has been reset as well,
  3107. * so need a full re-initialization.
  3108. */
  3109. intel_runtime_pm_disable_interrupts(dev_priv);
  3110. intel_runtime_pm_enable_interrupts(dev_priv);
  3111. intel_pps_unlock_regs_wa(dev_priv);
  3112. intel_modeset_init_hw(dev);
  3113. spin_lock_irq(&dev_priv->irq_lock);
  3114. if (dev_priv->display.hpd_irq_setup)
  3115. dev_priv->display.hpd_irq_setup(dev_priv);
  3116. spin_unlock_irq(&dev_priv->irq_lock);
  3117. ret = __intel_display_resume(dev, state);
  3118. if (ret)
  3119. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3120. intel_hpd_init(dev_priv);
  3121. }
  3122. if (state)
  3123. drm_atomic_state_put(state);
  3124. drm_modeset_drop_locks(ctx);
  3125. drm_modeset_acquire_fini(ctx);
  3126. mutex_unlock(&dev->mode_config.mutex);
  3127. }
  3128. static bool abort_flip_on_reset(struct intel_crtc *crtc)
  3129. {
  3130. struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
  3131. if (i915_reset_in_progress(error))
  3132. return true;
  3133. if (crtc->reset_count != i915_reset_count(error))
  3134. return true;
  3135. return false;
  3136. }
  3137. static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
  3138. {
  3139. struct drm_device *dev = crtc->dev;
  3140. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3141. bool pending;
  3142. if (abort_flip_on_reset(intel_crtc))
  3143. return false;
  3144. spin_lock_irq(&dev->event_lock);
  3145. pending = to_intel_crtc(crtc)->flip_work != NULL;
  3146. spin_unlock_irq(&dev->event_lock);
  3147. return pending;
  3148. }
  3149. static void intel_update_pipe_config(struct intel_crtc *crtc,
  3150. struct intel_crtc_state *old_crtc_state)
  3151. {
  3152. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  3153. struct intel_crtc_state *pipe_config =
  3154. to_intel_crtc_state(crtc->base.state);
  3155. /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
  3156. crtc->base.mode = crtc->base.state->mode;
  3157. DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
  3158. old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
  3159. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  3160. /*
  3161. * Update pipe size and adjust fitter if needed: the reason for this is
  3162. * that in compute_mode_changes we check the native mode (not the pfit
  3163. * mode) to see if we can flip rather than do a full mode set. In the
  3164. * fastboot case, we'll flip, but if we don't update the pipesrc and
  3165. * pfit state, we'll end up with a big fb scanned out into the wrong
  3166. * sized surface.
  3167. */
  3168. I915_WRITE(PIPESRC(crtc->pipe),
  3169. ((pipe_config->pipe_src_w - 1) << 16) |
  3170. (pipe_config->pipe_src_h - 1));
  3171. /* on skylake this is done by detaching scalers */
  3172. if (INTEL_GEN(dev_priv) >= 9) {
  3173. skl_detach_scalers(crtc);
  3174. if (pipe_config->pch_pfit.enabled)
  3175. skylake_pfit_enable(crtc);
  3176. } else if (HAS_PCH_SPLIT(dev_priv)) {
  3177. if (pipe_config->pch_pfit.enabled)
  3178. ironlake_pfit_enable(crtc);
  3179. else if (old_crtc_state->pch_pfit.enabled)
  3180. ironlake_pfit_disable(crtc, true);
  3181. }
  3182. }
  3183. static void intel_fdi_normal_train(struct drm_crtc *crtc)
  3184. {
  3185. struct drm_device *dev = crtc->dev;
  3186. struct drm_i915_private *dev_priv = to_i915(dev);
  3187. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3188. int pipe = intel_crtc->pipe;
  3189. i915_reg_t reg;
  3190. u32 temp;
  3191. /* enable normal train */
  3192. reg = FDI_TX_CTL(pipe);
  3193. temp = I915_READ(reg);
  3194. if (IS_IVYBRIDGE(dev_priv)) {
  3195. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3196. temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  3197. } else {
  3198. temp &= ~FDI_LINK_TRAIN_NONE;
  3199. temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
  3200. }
  3201. I915_WRITE(reg, temp);
  3202. reg = FDI_RX_CTL(pipe);
  3203. temp = I915_READ(reg);
  3204. if (HAS_PCH_CPT(dev_priv)) {
  3205. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3206. temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  3207. } else {
  3208. temp &= ~FDI_LINK_TRAIN_NONE;
  3209. temp |= FDI_LINK_TRAIN_NONE;
  3210. }
  3211. I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
  3212. /* wait one idle pattern time */
  3213. POSTING_READ(reg);
  3214. udelay(1000);
  3215. /* IVB wants error correction enabled */
  3216. if (IS_IVYBRIDGE(dev_priv))
  3217. I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  3218. FDI_FE_ERRC_ENABLE);
  3219. }
  3220. /* The FDI link training functions for ILK/Ibexpeak. */
  3221. static void ironlake_fdi_link_train(struct drm_crtc *crtc)
  3222. {
  3223. struct drm_device *dev = crtc->dev;
  3224. struct drm_i915_private *dev_priv = to_i915(dev);
  3225. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3226. int pipe = intel_crtc->pipe;
  3227. i915_reg_t reg;
  3228. u32 temp, tries;
  3229. /* FDI needs bits from pipe first */
  3230. assert_pipe_enabled(dev_priv, pipe);
  3231. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3232. for train result */
  3233. reg = FDI_RX_IMR(pipe);
  3234. temp = I915_READ(reg);
  3235. temp &= ~FDI_RX_SYMBOL_LOCK;
  3236. temp &= ~FDI_RX_BIT_LOCK;
  3237. I915_WRITE(reg, temp);
  3238. I915_READ(reg);
  3239. udelay(150);
  3240. /* enable CPU FDI TX and PCH FDI RX */
  3241. reg = FDI_TX_CTL(pipe);
  3242. temp = I915_READ(reg);
  3243. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3244. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3245. temp &= ~FDI_LINK_TRAIN_NONE;
  3246. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3247. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3248. reg = FDI_RX_CTL(pipe);
  3249. temp = I915_READ(reg);
  3250. temp &= ~FDI_LINK_TRAIN_NONE;
  3251. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3252. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3253. POSTING_READ(reg);
  3254. udelay(150);
  3255. /* Ironlake workaround, enable clock pointer after FDI enable*/
  3256. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3257. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
  3258. FDI_RX_PHASE_SYNC_POINTER_EN);
  3259. reg = FDI_RX_IIR(pipe);
  3260. for (tries = 0; tries < 5; tries++) {
  3261. temp = I915_READ(reg);
  3262. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3263. if ((temp & FDI_RX_BIT_LOCK)) {
  3264. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3265. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3266. break;
  3267. }
  3268. }
  3269. if (tries == 5)
  3270. DRM_ERROR("FDI train 1 fail!\n");
  3271. /* Train 2 */
  3272. reg = FDI_TX_CTL(pipe);
  3273. temp = I915_READ(reg);
  3274. temp &= ~FDI_LINK_TRAIN_NONE;
  3275. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3276. I915_WRITE(reg, temp);
  3277. reg = FDI_RX_CTL(pipe);
  3278. temp = I915_READ(reg);
  3279. temp &= ~FDI_LINK_TRAIN_NONE;
  3280. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3281. I915_WRITE(reg, temp);
  3282. POSTING_READ(reg);
  3283. udelay(150);
  3284. reg = FDI_RX_IIR(pipe);
  3285. for (tries = 0; tries < 5; tries++) {
  3286. temp = I915_READ(reg);
  3287. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3288. if (temp & FDI_RX_SYMBOL_LOCK) {
  3289. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3290. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3291. break;
  3292. }
  3293. }
  3294. if (tries == 5)
  3295. DRM_ERROR("FDI train 2 fail!\n");
  3296. DRM_DEBUG_KMS("FDI train done\n");
  3297. }
  3298. static const int snb_b_fdi_train_param[] = {
  3299. FDI_LINK_TRAIN_400MV_0DB_SNB_B,
  3300. FDI_LINK_TRAIN_400MV_6DB_SNB_B,
  3301. FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
  3302. FDI_LINK_TRAIN_800MV_0DB_SNB_B,
  3303. };
  3304. /* The FDI link training functions for SNB/Cougarpoint. */
  3305. static void gen6_fdi_link_train(struct drm_crtc *crtc)
  3306. {
  3307. struct drm_device *dev = crtc->dev;
  3308. struct drm_i915_private *dev_priv = to_i915(dev);
  3309. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3310. int pipe = intel_crtc->pipe;
  3311. i915_reg_t reg;
  3312. u32 temp, i, retry;
  3313. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3314. for train result */
  3315. reg = FDI_RX_IMR(pipe);
  3316. temp = I915_READ(reg);
  3317. temp &= ~FDI_RX_SYMBOL_LOCK;
  3318. temp &= ~FDI_RX_BIT_LOCK;
  3319. I915_WRITE(reg, temp);
  3320. POSTING_READ(reg);
  3321. udelay(150);
  3322. /* enable CPU FDI TX and PCH FDI RX */
  3323. reg = FDI_TX_CTL(pipe);
  3324. temp = I915_READ(reg);
  3325. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3326. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3327. temp &= ~FDI_LINK_TRAIN_NONE;
  3328. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3329. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3330. /* SNB-B */
  3331. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3332. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3333. I915_WRITE(FDI_RX_MISC(pipe),
  3334. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3335. reg = FDI_RX_CTL(pipe);
  3336. temp = I915_READ(reg);
  3337. if (HAS_PCH_CPT(dev_priv)) {
  3338. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3339. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3340. } else {
  3341. temp &= ~FDI_LINK_TRAIN_NONE;
  3342. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3343. }
  3344. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3345. POSTING_READ(reg);
  3346. udelay(150);
  3347. for (i = 0; i < 4; i++) {
  3348. reg = FDI_TX_CTL(pipe);
  3349. temp = I915_READ(reg);
  3350. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3351. temp |= snb_b_fdi_train_param[i];
  3352. I915_WRITE(reg, temp);
  3353. POSTING_READ(reg);
  3354. udelay(500);
  3355. for (retry = 0; retry < 5; retry++) {
  3356. reg = FDI_RX_IIR(pipe);
  3357. temp = I915_READ(reg);
  3358. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3359. if (temp & FDI_RX_BIT_LOCK) {
  3360. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3361. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3362. break;
  3363. }
  3364. udelay(50);
  3365. }
  3366. if (retry < 5)
  3367. break;
  3368. }
  3369. if (i == 4)
  3370. DRM_ERROR("FDI train 1 fail!\n");
  3371. /* Train 2 */
  3372. reg = FDI_TX_CTL(pipe);
  3373. temp = I915_READ(reg);
  3374. temp &= ~FDI_LINK_TRAIN_NONE;
  3375. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3376. if (IS_GEN6(dev_priv)) {
  3377. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3378. /* SNB-B */
  3379. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3380. }
  3381. I915_WRITE(reg, temp);
  3382. reg = FDI_RX_CTL(pipe);
  3383. temp = I915_READ(reg);
  3384. if (HAS_PCH_CPT(dev_priv)) {
  3385. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3386. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3387. } else {
  3388. temp &= ~FDI_LINK_TRAIN_NONE;
  3389. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3390. }
  3391. I915_WRITE(reg, temp);
  3392. POSTING_READ(reg);
  3393. udelay(150);
  3394. for (i = 0; i < 4; i++) {
  3395. reg = FDI_TX_CTL(pipe);
  3396. temp = I915_READ(reg);
  3397. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3398. temp |= snb_b_fdi_train_param[i];
  3399. I915_WRITE(reg, temp);
  3400. POSTING_READ(reg);
  3401. udelay(500);
  3402. for (retry = 0; retry < 5; retry++) {
  3403. reg = FDI_RX_IIR(pipe);
  3404. temp = I915_READ(reg);
  3405. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3406. if (temp & FDI_RX_SYMBOL_LOCK) {
  3407. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3408. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3409. break;
  3410. }
  3411. udelay(50);
  3412. }
  3413. if (retry < 5)
  3414. break;
  3415. }
  3416. if (i == 4)
  3417. DRM_ERROR("FDI train 2 fail!\n");
  3418. DRM_DEBUG_KMS("FDI train done.\n");
  3419. }
  3420. /* Manual link training for Ivy Bridge A0 parts */
  3421. static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
  3422. {
  3423. struct drm_device *dev = crtc->dev;
  3424. struct drm_i915_private *dev_priv = to_i915(dev);
  3425. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3426. int pipe = intel_crtc->pipe;
  3427. i915_reg_t reg;
  3428. u32 temp, i, j;
  3429. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3430. for train result */
  3431. reg = FDI_RX_IMR(pipe);
  3432. temp = I915_READ(reg);
  3433. temp &= ~FDI_RX_SYMBOL_LOCK;
  3434. temp &= ~FDI_RX_BIT_LOCK;
  3435. I915_WRITE(reg, temp);
  3436. POSTING_READ(reg);
  3437. udelay(150);
  3438. DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
  3439. I915_READ(FDI_RX_IIR(pipe)));
  3440. /* Try each vswing and preemphasis setting twice before moving on */
  3441. for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
  3442. /* disable first in case we need to retry */
  3443. reg = FDI_TX_CTL(pipe);
  3444. temp = I915_READ(reg);
  3445. temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
  3446. temp &= ~FDI_TX_ENABLE;
  3447. I915_WRITE(reg, temp);
  3448. reg = FDI_RX_CTL(pipe);
  3449. temp = I915_READ(reg);
  3450. temp &= ~FDI_LINK_TRAIN_AUTO;
  3451. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3452. temp &= ~FDI_RX_ENABLE;
  3453. I915_WRITE(reg, temp);
  3454. /* enable CPU FDI TX and PCH FDI RX */
  3455. reg = FDI_TX_CTL(pipe);
  3456. temp = I915_READ(reg);
  3457. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3458. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3459. temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
  3460. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3461. temp |= snb_b_fdi_train_param[j/2];
  3462. temp |= FDI_COMPOSITE_SYNC;
  3463. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3464. I915_WRITE(FDI_RX_MISC(pipe),
  3465. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3466. reg = FDI_RX_CTL(pipe);
  3467. temp = I915_READ(reg);
  3468. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3469. temp |= FDI_COMPOSITE_SYNC;
  3470. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3471. POSTING_READ(reg);
  3472. udelay(1); /* should be 0.5us */
  3473. for (i = 0; i < 4; i++) {
  3474. reg = FDI_RX_IIR(pipe);
  3475. temp = I915_READ(reg);
  3476. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3477. if (temp & FDI_RX_BIT_LOCK ||
  3478. (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
  3479. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3480. DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
  3481. i);
  3482. break;
  3483. }
  3484. udelay(1); /* should be 0.5us */
  3485. }
  3486. if (i == 4) {
  3487. DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
  3488. continue;
  3489. }
  3490. /* Train 2 */
  3491. reg = FDI_TX_CTL(pipe);
  3492. temp = I915_READ(reg);
  3493. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3494. temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
  3495. I915_WRITE(reg, temp);
  3496. reg = FDI_RX_CTL(pipe);
  3497. temp = I915_READ(reg);
  3498. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3499. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3500. I915_WRITE(reg, temp);
  3501. POSTING_READ(reg);
  3502. udelay(2); /* should be 1.5us */
  3503. for (i = 0; i < 4; i++) {
  3504. reg = FDI_RX_IIR(pipe);
  3505. temp = I915_READ(reg);
  3506. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3507. if (temp & FDI_RX_SYMBOL_LOCK ||
  3508. (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
  3509. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3510. DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
  3511. i);
  3512. goto train_done;
  3513. }
  3514. udelay(2); /* should be 1.5us */
  3515. }
  3516. if (i == 4)
  3517. DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
  3518. }
  3519. train_done:
  3520. DRM_DEBUG_KMS("FDI train done.\n");
  3521. }
  3522. static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
  3523. {
  3524. struct drm_device *dev = intel_crtc->base.dev;
  3525. struct drm_i915_private *dev_priv = to_i915(dev);
  3526. int pipe = intel_crtc->pipe;
  3527. i915_reg_t reg;
  3528. u32 temp;
  3529. /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
  3530. reg = FDI_RX_CTL(pipe);
  3531. temp = I915_READ(reg);
  3532. temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
  3533. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3534. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3535. I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  3536. POSTING_READ(reg);
  3537. udelay(200);
  3538. /* Switch from Rawclk to PCDclk */
  3539. temp = I915_READ(reg);
  3540. I915_WRITE(reg, temp | FDI_PCDCLK);
  3541. POSTING_READ(reg);
  3542. udelay(200);
  3543. /* Enable CPU FDI TX PLL, always on for Ironlake */
  3544. reg = FDI_TX_CTL(pipe);
  3545. temp = I915_READ(reg);
  3546. if ((temp & FDI_TX_PLL_ENABLE) == 0) {
  3547. I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  3548. POSTING_READ(reg);
  3549. udelay(100);
  3550. }
  3551. }
  3552. static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
  3553. {
  3554. struct drm_device *dev = intel_crtc->base.dev;
  3555. struct drm_i915_private *dev_priv = to_i915(dev);
  3556. int pipe = intel_crtc->pipe;
  3557. i915_reg_t reg;
  3558. u32 temp;
  3559. /* Switch from PCDclk to Rawclk */
  3560. reg = FDI_RX_CTL(pipe);
  3561. temp = I915_READ(reg);
  3562. I915_WRITE(reg, temp & ~FDI_PCDCLK);
  3563. /* Disable CPU FDI TX PLL */
  3564. reg = FDI_TX_CTL(pipe);
  3565. temp = I915_READ(reg);
  3566. I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
  3567. POSTING_READ(reg);
  3568. udelay(100);
  3569. reg = FDI_RX_CTL(pipe);
  3570. temp = I915_READ(reg);
  3571. I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
  3572. /* Wait for the clocks to turn off. */
  3573. POSTING_READ(reg);
  3574. udelay(100);
  3575. }
  3576. static void ironlake_fdi_disable(struct drm_crtc *crtc)
  3577. {
  3578. struct drm_device *dev = crtc->dev;
  3579. struct drm_i915_private *dev_priv = to_i915(dev);
  3580. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3581. int pipe = intel_crtc->pipe;
  3582. i915_reg_t reg;
  3583. u32 temp;
  3584. /* disable CPU FDI tx and PCH FDI rx */
  3585. reg = FDI_TX_CTL(pipe);
  3586. temp = I915_READ(reg);
  3587. I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
  3588. POSTING_READ(reg);
  3589. reg = FDI_RX_CTL(pipe);
  3590. temp = I915_READ(reg);
  3591. temp &= ~(0x7 << 16);
  3592. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3593. I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  3594. POSTING_READ(reg);
  3595. udelay(100);
  3596. /* Ironlake workaround, disable clock pointer after downing FDI */
  3597. if (HAS_PCH_IBX(dev_priv))
  3598. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3599. /* still set train pattern 1 */
  3600. reg = FDI_TX_CTL(pipe);
  3601. temp = I915_READ(reg);
  3602. temp &= ~FDI_LINK_TRAIN_NONE;
  3603. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3604. I915_WRITE(reg, temp);
  3605. reg = FDI_RX_CTL(pipe);
  3606. temp = I915_READ(reg);
  3607. if (HAS_PCH_CPT(dev_priv)) {
  3608. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3609. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3610. } else {
  3611. temp &= ~FDI_LINK_TRAIN_NONE;
  3612. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3613. }
  3614. /* BPC in FDI rx is consistent with that in PIPECONF */
  3615. temp &= ~(0x07 << 16);
  3616. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3617. I915_WRITE(reg, temp);
  3618. POSTING_READ(reg);
  3619. udelay(100);
  3620. }
  3621. bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
  3622. {
  3623. struct intel_crtc *crtc;
  3624. /* Note that we don't need to be called with mode_config.lock here
  3625. * as our list of CRTC objects is static for the lifetime of the
  3626. * device and so cannot disappear as we iterate. Similarly, we can
  3627. * happily treat the predicates as racy, atomic checks as userspace
  3628. * cannot claim and pin a new fb without at least acquring the
  3629. * struct_mutex and so serialising with us.
  3630. */
  3631. for_each_intel_crtc(&dev_priv->drm, crtc) {
  3632. if (atomic_read(&crtc->unpin_work_count) == 0)
  3633. continue;
  3634. if (crtc->flip_work)
  3635. intel_wait_for_vblank(dev_priv, crtc->pipe);
  3636. return true;
  3637. }
  3638. return false;
  3639. }
  3640. static void page_flip_completed(struct intel_crtc *intel_crtc)
  3641. {
  3642. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  3643. struct intel_flip_work *work = intel_crtc->flip_work;
  3644. intel_crtc->flip_work = NULL;
  3645. if (work->event)
  3646. drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
  3647. drm_crtc_vblank_put(&intel_crtc->base);
  3648. wake_up_all(&dev_priv->pending_flip_queue);
  3649. queue_work(dev_priv->wq, &work->unpin_work);
  3650. trace_i915_flip_complete(intel_crtc->plane,
  3651. work->pending_flip_obj);
  3652. }
  3653. static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  3654. {
  3655. struct drm_device *dev = crtc->dev;
  3656. struct drm_i915_private *dev_priv = to_i915(dev);
  3657. long ret;
  3658. WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
  3659. ret = wait_event_interruptible_timeout(
  3660. dev_priv->pending_flip_queue,
  3661. !intel_crtc_has_pending_flip(crtc),
  3662. 60*HZ);
  3663. if (ret < 0)
  3664. return ret;
  3665. if (ret == 0) {
  3666. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3667. struct intel_flip_work *work;
  3668. spin_lock_irq(&dev->event_lock);
  3669. work = intel_crtc->flip_work;
  3670. if (work && !is_mmio_work(work)) {
  3671. WARN_ONCE(1, "Removing stuck page flip\n");
  3672. page_flip_completed(intel_crtc);
  3673. }
  3674. spin_unlock_irq(&dev->event_lock);
  3675. }
  3676. return 0;
  3677. }
  3678. void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
  3679. {
  3680. u32 temp;
  3681. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
  3682. mutex_lock(&dev_priv->sb_lock);
  3683. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3684. temp |= SBI_SSCCTL_DISABLE;
  3685. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3686. mutex_unlock(&dev_priv->sb_lock);
  3687. }
  3688. /* Program iCLKIP clock to the desired frequency */
  3689. static void lpt_program_iclkip(struct drm_crtc *crtc)
  3690. {
  3691. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  3692. int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
  3693. u32 divsel, phaseinc, auxdiv, phasedir = 0;
  3694. u32 temp;
  3695. lpt_disable_iclkip(dev_priv);
  3696. /* The iCLK virtual clock root frequency is in MHz,
  3697. * but the adjusted_mode->crtc_clock in in KHz. To get the
  3698. * divisors, it is necessary to divide one by another, so we
  3699. * convert the virtual clock precision to KHz here for higher
  3700. * precision.
  3701. */
  3702. for (auxdiv = 0; auxdiv < 2; auxdiv++) {
  3703. u32 iclk_virtual_root_freq = 172800 * 1000;
  3704. u32 iclk_pi_range = 64;
  3705. u32 desired_divisor;
  3706. desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3707. clock << auxdiv);
  3708. divsel = (desired_divisor / iclk_pi_range) - 2;
  3709. phaseinc = desired_divisor % iclk_pi_range;
  3710. /*
  3711. * Near 20MHz is a corner case which is
  3712. * out of range for the 7-bit divisor
  3713. */
  3714. if (divsel <= 0x7f)
  3715. break;
  3716. }
  3717. /* This should not happen with any sane values */
  3718. WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
  3719. ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
  3720. WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
  3721. ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
  3722. DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
  3723. clock,
  3724. auxdiv,
  3725. divsel,
  3726. phasedir,
  3727. phaseinc);
  3728. mutex_lock(&dev_priv->sb_lock);
  3729. /* Program SSCDIVINTPHASE6 */
  3730. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3731. temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
  3732. temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
  3733. temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
  3734. temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
  3735. temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
  3736. temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
  3737. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
  3738. /* Program SSCAUXDIV */
  3739. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3740. temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
  3741. temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
  3742. intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
  3743. /* Enable modulator and associated divider */
  3744. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3745. temp &= ~SBI_SSCCTL_DISABLE;
  3746. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3747. mutex_unlock(&dev_priv->sb_lock);
  3748. /* Wait for initialization time */
  3749. udelay(24);
  3750. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
  3751. }
  3752. int lpt_get_iclkip(struct drm_i915_private *dev_priv)
  3753. {
  3754. u32 divsel, phaseinc, auxdiv;
  3755. u32 iclk_virtual_root_freq = 172800 * 1000;
  3756. u32 iclk_pi_range = 64;
  3757. u32 desired_divisor;
  3758. u32 temp;
  3759. if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
  3760. return 0;
  3761. mutex_lock(&dev_priv->sb_lock);
  3762. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3763. if (temp & SBI_SSCCTL_DISABLE) {
  3764. mutex_unlock(&dev_priv->sb_lock);
  3765. return 0;
  3766. }
  3767. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3768. divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
  3769. SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
  3770. phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
  3771. SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
  3772. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3773. auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
  3774. SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
  3775. mutex_unlock(&dev_priv->sb_lock);
  3776. desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
  3777. return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3778. desired_divisor << auxdiv);
  3779. }
  3780. static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
  3781. enum pipe pch_transcoder)
  3782. {
  3783. struct drm_device *dev = crtc->base.dev;
  3784. struct drm_i915_private *dev_priv = to_i915(dev);
  3785. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  3786. I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
  3787. I915_READ(HTOTAL(cpu_transcoder)));
  3788. I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
  3789. I915_READ(HBLANK(cpu_transcoder)));
  3790. I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
  3791. I915_READ(HSYNC(cpu_transcoder)));
  3792. I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
  3793. I915_READ(VTOTAL(cpu_transcoder)));
  3794. I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
  3795. I915_READ(VBLANK(cpu_transcoder)));
  3796. I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
  3797. I915_READ(VSYNC(cpu_transcoder)));
  3798. I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
  3799. I915_READ(VSYNCSHIFT(cpu_transcoder)));
  3800. }
  3801. static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
  3802. {
  3803. struct drm_i915_private *dev_priv = to_i915(dev);
  3804. uint32_t temp;
  3805. temp = I915_READ(SOUTH_CHICKEN1);
  3806. if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
  3807. return;
  3808. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
  3809. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
  3810. temp &= ~FDI_BC_BIFURCATION_SELECT;
  3811. if (enable)
  3812. temp |= FDI_BC_BIFURCATION_SELECT;
  3813. DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
  3814. I915_WRITE(SOUTH_CHICKEN1, temp);
  3815. POSTING_READ(SOUTH_CHICKEN1);
  3816. }
  3817. static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
  3818. {
  3819. struct drm_device *dev = intel_crtc->base.dev;
  3820. switch (intel_crtc->pipe) {
  3821. case PIPE_A:
  3822. break;
  3823. case PIPE_B:
  3824. if (intel_crtc->config->fdi_lanes > 2)
  3825. cpt_set_fdi_bc_bifurcation(dev, false);
  3826. else
  3827. cpt_set_fdi_bc_bifurcation(dev, true);
  3828. break;
  3829. case PIPE_C:
  3830. cpt_set_fdi_bc_bifurcation(dev, true);
  3831. break;
  3832. default:
  3833. BUG();
  3834. }
  3835. }
  3836. /* Return which DP Port should be selected for Transcoder DP control */
  3837. static enum port
  3838. intel_trans_dp_port_sel(struct drm_crtc *crtc)
  3839. {
  3840. struct drm_device *dev = crtc->dev;
  3841. struct intel_encoder *encoder;
  3842. for_each_encoder_on_crtc(dev, crtc, encoder) {
  3843. if (encoder->type == INTEL_OUTPUT_DP ||
  3844. encoder->type == INTEL_OUTPUT_EDP)
  3845. return enc_to_dig_port(&encoder->base)->port;
  3846. }
  3847. return -1;
  3848. }
  3849. /*
  3850. * Enable PCH resources required for PCH ports:
  3851. * - PCH PLLs
  3852. * - FDI training & RX/TX
  3853. * - update transcoder timings
  3854. * - DP transcoding bits
  3855. * - transcoder
  3856. */
  3857. static void ironlake_pch_enable(struct drm_crtc *crtc)
  3858. {
  3859. struct drm_device *dev = crtc->dev;
  3860. struct drm_i915_private *dev_priv = to_i915(dev);
  3861. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3862. int pipe = intel_crtc->pipe;
  3863. u32 temp;
  3864. assert_pch_transcoder_disabled(dev_priv, pipe);
  3865. if (IS_IVYBRIDGE(dev_priv))
  3866. ivybridge_update_fdi_bc_bifurcation(intel_crtc);
  3867. /* Write the TU size bits before fdi link training, so that error
  3868. * detection works. */
  3869. I915_WRITE(FDI_RX_TUSIZE1(pipe),
  3870. I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  3871. /* For PCH output, training FDI link */
  3872. dev_priv->display.fdi_link_train(crtc);
  3873. /* We need to program the right clock selection before writing the pixel
  3874. * mutliplier into the DPLL. */
  3875. if (HAS_PCH_CPT(dev_priv)) {
  3876. u32 sel;
  3877. temp = I915_READ(PCH_DPLL_SEL);
  3878. temp |= TRANS_DPLL_ENABLE(pipe);
  3879. sel = TRANS_DPLLB_SEL(pipe);
  3880. if (intel_crtc->config->shared_dpll ==
  3881. intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
  3882. temp |= sel;
  3883. else
  3884. temp &= ~sel;
  3885. I915_WRITE(PCH_DPLL_SEL, temp);
  3886. }
  3887. /* XXX: pch pll's can be enabled any time before we enable the PCH
  3888. * transcoder, and we actually should do this to not upset any PCH
  3889. * transcoder that already use the clock when we share it.
  3890. *
  3891. * Note that enable_shared_dpll tries to do the right thing, but
  3892. * get_shared_dpll unconditionally resets the pll - we need that to have
  3893. * the right LVDS enable sequence. */
  3894. intel_enable_shared_dpll(intel_crtc);
  3895. /* set transcoder timing, panel must allow it */
  3896. assert_panel_unlocked(dev_priv, pipe);
  3897. ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
  3898. intel_fdi_normal_train(crtc);
  3899. /* For PCH DP, enable TRANS_DP_CTL */
  3900. if (HAS_PCH_CPT(dev_priv) &&
  3901. intel_crtc_has_dp_encoder(intel_crtc->config)) {
  3902. const struct drm_display_mode *adjusted_mode =
  3903. &intel_crtc->config->base.adjusted_mode;
  3904. u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
  3905. i915_reg_t reg = TRANS_DP_CTL(pipe);
  3906. temp = I915_READ(reg);
  3907. temp &= ~(TRANS_DP_PORT_SEL_MASK |
  3908. TRANS_DP_SYNC_MASK |
  3909. TRANS_DP_BPC_MASK);
  3910. temp |= TRANS_DP_OUTPUT_ENABLE;
  3911. temp |= bpc << 9; /* same format but at 11:9 */
  3912. if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  3913. temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
  3914. if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  3915. temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
  3916. switch (intel_trans_dp_port_sel(crtc)) {
  3917. case PORT_B:
  3918. temp |= TRANS_DP_PORT_SEL_B;
  3919. break;
  3920. case PORT_C:
  3921. temp |= TRANS_DP_PORT_SEL_C;
  3922. break;
  3923. case PORT_D:
  3924. temp |= TRANS_DP_PORT_SEL_D;
  3925. break;
  3926. default:
  3927. BUG();
  3928. }
  3929. I915_WRITE(reg, temp);
  3930. }
  3931. ironlake_enable_pch_transcoder(dev_priv, pipe);
  3932. }
  3933. static void lpt_pch_enable(struct drm_crtc *crtc)
  3934. {
  3935. struct drm_device *dev = crtc->dev;
  3936. struct drm_i915_private *dev_priv = to_i915(dev);
  3937. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3938. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  3939. assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
  3940. lpt_program_iclkip(crtc);
  3941. /* Set transcoder timing. */
  3942. ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
  3943. lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
  3944. }
  3945. static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  3946. {
  3947. struct drm_i915_private *dev_priv = to_i915(dev);
  3948. i915_reg_t dslreg = PIPEDSL(pipe);
  3949. u32 temp;
  3950. temp = I915_READ(dslreg);
  3951. udelay(500);
  3952. if (wait_for(I915_READ(dslreg) != temp, 5)) {
  3953. if (wait_for(I915_READ(dslreg) != temp, 5))
  3954. DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
  3955. }
  3956. }
  3957. static int
  3958. skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  3959. unsigned scaler_user, int *scaler_id, unsigned int rotation,
  3960. int src_w, int src_h, int dst_w, int dst_h)
  3961. {
  3962. struct intel_crtc_scaler_state *scaler_state =
  3963. &crtc_state->scaler_state;
  3964. struct intel_crtc *intel_crtc =
  3965. to_intel_crtc(crtc_state->base.crtc);
  3966. int need_scaling;
  3967. need_scaling = drm_rotation_90_or_270(rotation) ?
  3968. (src_h != dst_w || src_w != dst_h):
  3969. (src_w != dst_w || src_h != dst_h);
  3970. /*
  3971. * if plane is being disabled or scaler is no more required or force detach
  3972. * - free scaler binded to this plane/crtc
  3973. * - in order to do this, update crtc->scaler_usage
  3974. *
  3975. * Here scaler state in crtc_state is set free so that
  3976. * scaler can be assigned to other user. Actual register
  3977. * update to free the scaler is done in plane/panel-fit programming.
  3978. * For this purpose crtc/plane_state->scaler_id isn't reset here.
  3979. */
  3980. if (force_detach || !need_scaling) {
  3981. if (*scaler_id >= 0) {
  3982. scaler_state->scaler_users &= ~(1 << scaler_user);
  3983. scaler_state->scalers[*scaler_id].in_use = 0;
  3984. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  3985. "Staged freeing scaler id %d scaler_users = 0x%x\n",
  3986. intel_crtc->pipe, scaler_user, *scaler_id,
  3987. scaler_state->scaler_users);
  3988. *scaler_id = -1;
  3989. }
  3990. return 0;
  3991. }
  3992. /* range checks */
  3993. if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
  3994. dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
  3995. src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
  3996. dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
  3997. DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
  3998. "size is out of scaler range\n",
  3999. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
  4000. return -EINVAL;
  4001. }
  4002. /* mark this plane as a scaler user in crtc_state */
  4003. scaler_state->scaler_users |= (1 << scaler_user);
  4004. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  4005. "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
  4006. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
  4007. scaler_state->scaler_users);
  4008. return 0;
  4009. }
  4010. /**
  4011. * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
  4012. *
  4013. * @state: crtc's scaler state
  4014. *
  4015. * Return
  4016. * 0 - scaler_usage updated successfully
  4017. * error - requested scaling cannot be supported or other error condition
  4018. */
  4019. int skl_update_scaler_crtc(struct intel_crtc_state *state)
  4020. {
  4021. const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
  4022. return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
  4023. &state->scaler_state.scaler_id, DRM_ROTATE_0,
  4024. state->pipe_src_w, state->pipe_src_h,
  4025. adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
  4026. }
  4027. /**
  4028. * skl_update_scaler_plane - Stages update to scaler state for a given plane.
  4029. *
  4030. * @state: crtc's scaler state
  4031. * @plane_state: atomic plane state to update
  4032. *
  4033. * Return
  4034. * 0 - scaler_usage updated successfully
  4035. * error - requested scaling cannot be supported or other error condition
  4036. */
  4037. static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
  4038. struct intel_plane_state *plane_state)
  4039. {
  4040. struct intel_plane *intel_plane =
  4041. to_intel_plane(plane_state->base.plane);
  4042. struct drm_framebuffer *fb = plane_state->base.fb;
  4043. int ret;
  4044. bool force_detach = !fb || !plane_state->base.visible;
  4045. ret = skl_update_scaler(crtc_state, force_detach,
  4046. drm_plane_index(&intel_plane->base),
  4047. &plane_state->scaler_id,
  4048. plane_state->base.rotation,
  4049. drm_rect_width(&plane_state->base.src) >> 16,
  4050. drm_rect_height(&plane_state->base.src) >> 16,
  4051. drm_rect_width(&plane_state->base.dst),
  4052. drm_rect_height(&plane_state->base.dst));
  4053. if (ret || plane_state->scaler_id < 0)
  4054. return ret;
  4055. /* check colorkey */
  4056. if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
  4057. DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
  4058. intel_plane->base.base.id,
  4059. intel_plane->base.name);
  4060. return -EINVAL;
  4061. }
  4062. /* Check src format */
  4063. switch (fb->pixel_format) {
  4064. case DRM_FORMAT_RGB565:
  4065. case DRM_FORMAT_XBGR8888:
  4066. case DRM_FORMAT_XRGB8888:
  4067. case DRM_FORMAT_ABGR8888:
  4068. case DRM_FORMAT_ARGB8888:
  4069. case DRM_FORMAT_XRGB2101010:
  4070. case DRM_FORMAT_XBGR2101010:
  4071. case DRM_FORMAT_YUYV:
  4072. case DRM_FORMAT_YVYU:
  4073. case DRM_FORMAT_UYVY:
  4074. case DRM_FORMAT_VYUY:
  4075. break;
  4076. default:
  4077. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
  4078. intel_plane->base.base.id, intel_plane->base.name,
  4079. fb->base.id, fb->pixel_format);
  4080. return -EINVAL;
  4081. }
  4082. return 0;
  4083. }
  4084. static void skylake_scaler_disable(struct intel_crtc *crtc)
  4085. {
  4086. int i;
  4087. for (i = 0; i < crtc->num_scalers; i++)
  4088. skl_detach_scaler(crtc, i);
  4089. }
  4090. static void skylake_pfit_enable(struct intel_crtc *crtc)
  4091. {
  4092. struct drm_device *dev = crtc->base.dev;
  4093. struct drm_i915_private *dev_priv = to_i915(dev);
  4094. int pipe = crtc->pipe;
  4095. struct intel_crtc_scaler_state *scaler_state =
  4096. &crtc->config->scaler_state;
  4097. DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
  4098. if (crtc->config->pch_pfit.enabled) {
  4099. int id;
  4100. if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
  4101. DRM_ERROR("Requesting pfit without getting a scaler first\n");
  4102. return;
  4103. }
  4104. id = scaler_state->scaler_id;
  4105. I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
  4106. PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
  4107. I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
  4108. I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
  4109. DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
  4110. }
  4111. }
  4112. static void ironlake_pfit_enable(struct intel_crtc *crtc)
  4113. {
  4114. struct drm_device *dev = crtc->base.dev;
  4115. struct drm_i915_private *dev_priv = to_i915(dev);
  4116. int pipe = crtc->pipe;
  4117. if (crtc->config->pch_pfit.enabled) {
  4118. /* Force use of hard-coded filter coefficients
  4119. * as some pre-programmed values are broken,
  4120. * e.g. x201.
  4121. */
  4122. if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
  4123. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
  4124. PF_PIPE_SEL_IVB(pipe));
  4125. else
  4126. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
  4127. I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
  4128. I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
  4129. }
  4130. }
  4131. void hsw_enable_ips(struct intel_crtc *crtc)
  4132. {
  4133. struct drm_device *dev = crtc->base.dev;
  4134. struct drm_i915_private *dev_priv = to_i915(dev);
  4135. if (!crtc->config->ips_enabled)
  4136. return;
  4137. /*
  4138. * We can only enable IPS after we enable a plane and wait for a vblank
  4139. * This function is called from post_plane_update, which is run after
  4140. * a vblank wait.
  4141. */
  4142. assert_plane_enabled(dev_priv, crtc->plane);
  4143. if (IS_BROADWELL(dev_priv)) {
  4144. mutex_lock(&dev_priv->rps.hw_lock);
  4145. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
  4146. mutex_unlock(&dev_priv->rps.hw_lock);
  4147. /* Quoting Art Runyan: "its not safe to expect any particular
  4148. * value in IPS_CTL bit 31 after enabling IPS through the
  4149. * mailbox." Moreover, the mailbox may return a bogus state,
  4150. * so we need to just enable it and continue on.
  4151. */
  4152. } else {
  4153. I915_WRITE(IPS_CTL, IPS_ENABLE);
  4154. /* The bit only becomes 1 in the next vblank, so this wait here
  4155. * is essentially intel_wait_for_vblank. If we don't have this
  4156. * and don't wait for vblanks until the end of crtc_enable, then
  4157. * the HW state readout code will complain that the expected
  4158. * IPS_CTL value is not the one we read. */
  4159. if (intel_wait_for_register(dev_priv,
  4160. IPS_CTL, IPS_ENABLE, IPS_ENABLE,
  4161. 50))
  4162. DRM_ERROR("Timed out waiting for IPS enable\n");
  4163. }
  4164. }
  4165. void hsw_disable_ips(struct intel_crtc *crtc)
  4166. {
  4167. struct drm_device *dev = crtc->base.dev;
  4168. struct drm_i915_private *dev_priv = to_i915(dev);
  4169. if (!crtc->config->ips_enabled)
  4170. return;
  4171. assert_plane_enabled(dev_priv, crtc->plane);
  4172. if (IS_BROADWELL(dev_priv)) {
  4173. mutex_lock(&dev_priv->rps.hw_lock);
  4174. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
  4175. mutex_unlock(&dev_priv->rps.hw_lock);
  4176. /* wait for pcode to finish disabling IPS, which may take up to 42ms */
  4177. if (intel_wait_for_register(dev_priv,
  4178. IPS_CTL, IPS_ENABLE, 0,
  4179. 42))
  4180. DRM_ERROR("Timed out waiting for IPS disable\n");
  4181. } else {
  4182. I915_WRITE(IPS_CTL, 0);
  4183. POSTING_READ(IPS_CTL);
  4184. }
  4185. /* We need to wait for a vblank before we can disable the plane. */
  4186. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4187. }
  4188. static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
  4189. {
  4190. if (intel_crtc->overlay) {
  4191. struct drm_device *dev = intel_crtc->base.dev;
  4192. struct drm_i915_private *dev_priv = to_i915(dev);
  4193. mutex_lock(&dev->struct_mutex);
  4194. dev_priv->mm.interruptible = false;
  4195. (void) intel_overlay_switch_off(intel_crtc->overlay);
  4196. dev_priv->mm.interruptible = true;
  4197. mutex_unlock(&dev->struct_mutex);
  4198. }
  4199. /* Let userspace switch the overlay on again. In most cases userspace
  4200. * has to recompute where to put it anyway.
  4201. */
  4202. }
  4203. /**
  4204. * intel_post_enable_primary - Perform operations after enabling primary plane
  4205. * @crtc: the CRTC whose primary plane was just enabled
  4206. *
  4207. * Performs potentially sleeping operations that must be done after the primary
  4208. * plane is enabled, such as updating FBC and IPS. Note that this may be
  4209. * called due to an explicit primary plane update, or due to an implicit
  4210. * re-enable that is caused when a sprite plane is updated to no longer
  4211. * completely hide the primary plane.
  4212. */
  4213. static void
  4214. intel_post_enable_primary(struct drm_crtc *crtc)
  4215. {
  4216. struct drm_device *dev = crtc->dev;
  4217. struct drm_i915_private *dev_priv = to_i915(dev);
  4218. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4219. int pipe = intel_crtc->pipe;
  4220. /*
  4221. * FIXME IPS should be fine as long as one plane is
  4222. * enabled, but in practice it seems to have problems
  4223. * when going from primary only to sprite only and vice
  4224. * versa.
  4225. */
  4226. hsw_enable_ips(intel_crtc);
  4227. /*
  4228. * Gen2 reports pipe underruns whenever all planes are disabled.
  4229. * So don't enable underrun reporting before at least some planes
  4230. * are enabled.
  4231. * FIXME: Need to fix the logic to work when we turn off all planes
  4232. * but leave the pipe running.
  4233. */
  4234. if (IS_GEN2(dev_priv))
  4235. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4236. /* Underruns don't always raise interrupts, so check manually. */
  4237. intel_check_cpu_fifo_underruns(dev_priv);
  4238. intel_check_pch_fifo_underruns(dev_priv);
  4239. }
  4240. /* FIXME move all this to pre_plane_update() with proper state tracking */
  4241. static void
  4242. intel_pre_disable_primary(struct drm_crtc *crtc)
  4243. {
  4244. struct drm_device *dev = crtc->dev;
  4245. struct drm_i915_private *dev_priv = to_i915(dev);
  4246. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4247. int pipe = intel_crtc->pipe;
  4248. /*
  4249. * Gen2 reports pipe underruns whenever all planes are disabled.
  4250. * So diasble underrun reporting before all the planes get disabled.
  4251. * FIXME: Need to fix the logic to work when we turn off all planes
  4252. * but leave the pipe running.
  4253. */
  4254. if (IS_GEN2(dev_priv))
  4255. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4256. /*
  4257. * FIXME IPS should be fine as long as one plane is
  4258. * enabled, but in practice it seems to have problems
  4259. * when going from primary only to sprite only and vice
  4260. * versa.
  4261. */
  4262. hsw_disable_ips(intel_crtc);
  4263. }
  4264. /* FIXME get rid of this and use pre_plane_update */
  4265. static void
  4266. intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  4267. {
  4268. struct drm_device *dev = crtc->dev;
  4269. struct drm_i915_private *dev_priv = to_i915(dev);
  4270. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4271. int pipe = intel_crtc->pipe;
  4272. intel_pre_disable_primary(crtc);
  4273. /*
  4274. * Vblank time updates from the shadow to live plane control register
  4275. * are blocked if the memory self-refresh mode is active at that
  4276. * moment. So to make sure the plane gets truly disabled, disable
  4277. * first the self-refresh mode. The self-refresh enable bit in turn
  4278. * will be checked/applied by the HW only at the next frame start
  4279. * event which is after the vblank start event, so we need to have a
  4280. * wait-for-vblank between disabling the plane and the pipe.
  4281. */
  4282. if (HAS_GMCH_DISPLAY(dev_priv)) {
  4283. intel_set_memory_cxsr(dev_priv, false);
  4284. dev_priv->wm.vlv.cxsr = false;
  4285. intel_wait_for_vblank(dev_priv, pipe);
  4286. }
  4287. }
  4288. static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
  4289. {
  4290. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4291. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4292. struct intel_crtc_state *pipe_config =
  4293. to_intel_crtc_state(crtc->base.state);
  4294. struct drm_plane *primary = crtc->base.primary;
  4295. struct drm_plane_state *old_pri_state =
  4296. drm_atomic_get_existing_plane_state(old_state, primary);
  4297. intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
  4298. crtc->wm.cxsr_allowed = true;
  4299. if (pipe_config->update_wm_post && pipe_config->base.active)
  4300. intel_update_watermarks(crtc);
  4301. if (old_pri_state) {
  4302. struct intel_plane_state *primary_state =
  4303. to_intel_plane_state(primary->state);
  4304. struct intel_plane_state *old_primary_state =
  4305. to_intel_plane_state(old_pri_state);
  4306. intel_fbc_post_update(crtc);
  4307. if (primary_state->base.visible &&
  4308. (needs_modeset(&pipe_config->base) ||
  4309. !old_primary_state->base.visible))
  4310. intel_post_enable_primary(&crtc->base);
  4311. }
  4312. }
  4313. static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
  4314. {
  4315. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4316. struct drm_device *dev = crtc->base.dev;
  4317. struct drm_i915_private *dev_priv = to_i915(dev);
  4318. struct intel_crtc_state *pipe_config =
  4319. to_intel_crtc_state(crtc->base.state);
  4320. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4321. struct drm_plane *primary = crtc->base.primary;
  4322. struct drm_plane_state *old_pri_state =
  4323. drm_atomic_get_existing_plane_state(old_state, primary);
  4324. bool modeset = needs_modeset(&pipe_config->base);
  4325. struct intel_atomic_state *old_intel_state =
  4326. to_intel_atomic_state(old_state);
  4327. if (old_pri_state) {
  4328. struct intel_plane_state *primary_state =
  4329. to_intel_plane_state(primary->state);
  4330. struct intel_plane_state *old_primary_state =
  4331. to_intel_plane_state(old_pri_state);
  4332. intel_fbc_pre_update(crtc, pipe_config, primary_state);
  4333. if (old_primary_state->base.visible &&
  4334. (modeset || !primary_state->base.visible))
  4335. intel_pre_disable_primary(&crtc->base);
  4336. }
  4337. if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
  4338. crtc->wm.cxsr_allowed = false;
  4339. /*
  4340. * Vblank time updates from the shadow to live plane control register
  4341. * are blocked if the memory self-refresh mode is active at that
  4342. * moment. So to make sure the plane gets truly disabled, disable
  4343. * first the self-refresh mode. The self-refresh enable bit in turn
  4344. * will be checked/applied by the HW only at the next frame start
  4345. * event which is after the vblank start event, so we need to have a
  4346. * wait-for-vblank between disabling the plane and the pipe.
  4347. */
  4348. if (old_crtc_state->base.active) {
  4349. intel_set_memory_cxsr(dev_priv, false);
  4350. dev_priv->wm.vlv.cxsr = false;
  4351. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4352. }
  4353. }
  4354. /*
  4355. * IVB workaround: must disable low power watermarks for at least
  4356. * one frame before enabling scaling. LP watermarks can be re-enabled
  4357. * when scaling is disabled.
  4358. *
  4359. * WaCxSRDisabledForSpriteScaling:ivb
  4360. */
  4361. if (pipe_config->disable_lp_wm) {
  4362. ilk_disable_lp_wm(dev);
  4363. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4364. }
  4365. /*
  4366. * If we're doing a modeset, we're done. No need to do any pre-vblank
  4367. * watermark programming here.
  4368. */
  4369. if (needs_modeset(&pipe_config->base))
  4370. return;
  4371. /*
  4372. * For platforms that support atomic watermarks, program the
  4373. * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
  4374. * will be the intermediate values that are safe for both pre- and
  4375. * post- vblank; when vblank happens, the 'active' values will be set
  4376. * to the final 'target' values and we'll do this again to get the
  4377. * optimal watermarks. For gen9+ platforms, the values we program here
  4378. * will be the final target values which will get automatically latched
  4379. * at vblank time; no further programming will be necessary.
  4380. *
  4381. * If a platform hasn't been transitioned to atomic watermarks yet,
  4382. * we'll continue to update watermarks the old way, if flags tell
  4383. * us to.
  4384. */
  4385. if (dev_priv->display.initial_watermarks != NULL)
  4386. dev_priv->display.initial_watermarks(old_intel_state,
  4387. pipe_config);
  4388. else if (pipe_config->update_wm_pre)
  4389. intel_update_watermarks(crtc);
  4390. }
  4391. static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
  4392. {
  4393. struct drm_device *dev = crtc->dev;
  4394. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4395. struct drm_plane *p;
  4396. int pipe = intel_crtc->pipe;
  4397. intel_crtc_dpms_overlay_disable(intel_crtc);
  4398. drm_for_each_plane_mask(p, dev, plane_mask)
  4399. to_intel_plane(p)->disable_plane(p, crtc);
  4400. /*
  4401. * FIXME: Once we grow proper nuclear flip support out of this we need
  4402. * to compute the mask of flip planes precisely. For the time being
  4403. * consider this a flip to a NULL plane.
  4404. */
  4405. intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
  4406. }
  4407. static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
  4408. struct intel_crtc_state *crtc_state,
  4409. struct drm_atomic_state *old_state)
  4410. {
  4411. struct drm_connector_state *old_conn_state;
  4412. struct drm_connector *conn;
  4413. int i;
  4414. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4415. struct drm_connector_state *conn_state = conn->state;
  4416. struct intel_encoder *encoder =
  4417. to_intel_encoder(conn_state->best_encoder);
  4418. if (conn_state->crtc != crtc)
  4419. continue;
  4420. if (encoder->pre_pll_enable)
  4421. encoder->pre_pll_enable(encoder, crtc_state, conn_state);
  4422. }
  4423. }
  4424. static void intel_encoders_pre_enable(struct drm_crtc *crtc,
  4425. struct intel_crtc_state *crtc_state,
  4426. struct drm_atomic_state *old_state)
  4427. {
  4428. struct drm_connector_state *old_conn_state;
  4429. struct drm_connector *conn;
  4430. int i;
  4431. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4432. struct drm_connector_state *conn_state = conn->state;
  4433. struct intel_encoder *encoder =
  4434. to_intel_encoder(conn_state->best_encoder);
  4435. if (conn_state->crtc != crtc)
  4436. continue;
  4437. if (encoder->pre_enable)
  4438. encoder->pre_enable(encoder, crtc_state, conn_state);
  4439. }
  4440. }
  4441. static void intel_encoders_enable(struct drm_crtc *crtc,
  4442. struct intel_crtc_state *crtc_state,
  4443. struct drm_atomic_state *old_state)
  4444. {
  4445. struct drm_connector_state *old_conn_state;
  4446. struct drm_connector *conn;
  4447. int i;
  4448. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4449. struct drm_connector_state *conn_state = conn->state;
  4450. struct intel_encoder *encoder =
  4451. to_intel_encoder(conn_state->best_encoder);
  4452. if (conn_state->crtc != crtc)
  4453. continue;
  4454. encoder->enable(encoder, crtc_state, conn_state);
  4455. intel_opregion_notify_encoder(encoder, true);
  4456. }
  4457. }
  4458. static void intel_encoders_disable(struct drm_crtc *crtc,
  4459. struct intel_crtc_state *old_crtc_state,
  4460. struct drm_atomic_state *old_state)
  4461. {
  4462. struct drm_connector_state *old_conn_state;
  4463. struct drm_connector *conn;
  4464. int i;
  4465. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4466. struct intel_encoder *encoder =
  4467. to_intel_encoder(old_conn_state->best_encoder);
  4468. if (old_conn_state->crtc != crtc)
  4469. continue;
  4470. intel_opregion_notify_encoder(encoder, false);
  4471. encoder->disable(encoder, old_crtc_state, old_conn_state);
  4472. }
  4473. }
  4474. static void intel_encoders_post_disable(struct drm_crtc *crtc,
  4475. struct intel_crtc_state *old_crtc_state,
  4476. struct drm_atomic_state *old_state)
  4477. {
  4478. struct drm_connector_state *old_conn_state;
  4479. struct drm_connector *conn;
  4480. int i;
  4481. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4482. struct intel_encoder *encoder =
  4483. to_intel_encoder(old_conn_state->best_encoder);
  4484. if (old_conn_state->crtc != crtc)
  4485. continue;
  4486. if (encoder->post_disable)
  4487. encoder->post_disable(encoder, old_crtc_state, old_conn_state);
  4488. }
  4489. }
  4490. static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
  4491. struct intel_crtc_state *old_crtc_state,
  4492. struct drm_atomic_state *old_state)
  4493. {
  4494. struct drm_connector_state *old_conn_state;
  4495. struct drm_connector *conn;
  4496. int i;
  4497. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4498. struct intel_encoder *encoder =
  4499. to_intel_encoder(old_conn_state->best_encoder);
  4500. if (old_conn_state->crtc != crtc)
  4501. continue;
  4502. if (encoder->post_pll_disable)
  4503. encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
  4504. }
  4505. }
  4506. static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  4507. struct drm_atomic_state *old_state)
  4508. {
  4509. struct drm_crtc *crtc = pipe_config->base.crtc;
  4510. struct drm_device *dev = crtc->dev;
  4511. struct drm_i915_private *dev_priv = to_i915(dev);
  4512. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4513. int pipe = intel_crtc->pipe;
  4514. struct intel_atomic_state *old_intel_state =
  4515. to_intel_atomic_state(old_state);
  4516. if (WARN_ON(intel_crtc->active))
  4517. return;
  4518. /*
  4519. * Sometimes spurious CPU pipe underruns happen during FDI
  4520. * training, at least with VGA+HDMI cloning. Suppress them.
  4521. *
  4522. * On ILK we get an occasional spurious CPU pipe underruns
  4523. * between eDP port A enable and vdd enable. Also PCH port
  4524. * enable seems to result in the occasional CPU pipe underrun.
  4525. *
  4526. * Spurious PCH underruns also occur during PCH enabling.
  4527. */
  4528. if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
  4529. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4530. if (intel_crtc->config->has_pch_encoder)
  4531. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4532. if (intel_crtc->config->has_pch_encoder)
  4533. intel_prepare_shared_dpll(intel_crtc);
  4534. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4535. intel_dp_set_m_n(intel_crtc, M1_N1);
  4536. intel_set_pipe_timings(intel_crtc);
  4537. intel_set_pipe_src_size(intel_crtc);
  4538. if (intel_crtc->config->has_pch_encoder) {
  4539. intel_cpu_transcoder_set_m_n(intel_crtc,
  4540. &intel_crtc->config->fdi_m_n, NULL);
  4541. }
  4542. ironlake_set_pipeconf(crtc);
  4543. intel_crtc->active = true;
  4544. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4545. if (intel_crtc->config->has_pch_encoder) {
  4546. /* Note: FDI PLL enabling _must_ be done before we enable the
  4547. * cpu pipes, hence this is separate from all the other fdi/pch
  4548. * enabling. */
  4549. ironlake_fdi_pll_enable(intel_crtc);
  4550. } else {
  4551. assert_fdi_tx_disabled(dev_priv, pipe);
  4552. assert_fdi_rx_disabled(dev_priv, pipe);
  4553. }
  4554. ironlake_pfit_enable(intel_crtc);
  4555. /*
  4556. * On ILK+ LUT must be loaded before the pipe is running but with
  4557. * clocks enabled
  4558. */
  4559. intel_color_load_luts(&pipe_config->base);
  4560. if (dev_priv->display.initial_watermarks != NULL)
  4561. dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
  4562. intel_enable_pipe(intel_crtc);
  4563. if (intel_crtc->config->has_pch_encoder)
  4564. ironlake_pch_enable(crtc);
  4565. assert_vblank_disabled(crtc);
  4566. drm_crtc_vblank_on(crtc);
  4567. intel_encoders_enable(crtc, pipe_config, old_state);
  4568. if (HAS_PCH_CPT(dev_priv))
  4569. cpt_verify_modeset(dev, intel_crtc->pipe);
  4570. /* Must wait for vblank to avoid spurious PCH FIFO underruns */
  4571. if (intel_crtc->config->has_pch_encoder)
  4572. intel_wait_for_vblank(dev_priv, pipe);
  4573. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4574. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4575. }
  4576. /* IPS only exists on ULT machines and is tied to pipe A. */
  4577. static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
  4578. {
  4579. return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
  4580. }
  4581. static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  4582. struct drm_atomic_state *old_state)
  4583. {
  4584. struct drm_crtc *crtc = pipe_config->base.crtc;
  4585. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4586. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4587. int pipe = intel_crtc->pipe, hsw_workaround_pipe;
  4588. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4589. struct intel_atomic_state *old_intel_state =
  4590. to_intel_atomic_state(old_state);
  4591. if (WARN_ON(intel_crtc->active))
  4592. return;
  4593. if (intel_crtc->config->has_pch_encoder)
  4594. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4595. false);
  4596. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  4597. if (intel_crtc->config->shared_dpll)
  4598. intel_enable_shared_dpll(intel_crtc);
  4599. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4600. intel_dp_set_m_n(intel_crtc, M1_N1);
  4601. if (!transcoder_is_dsi(cpu_transcoder))
  4602. intel_set_pipe_timings(intel_crtc);
  4603. intel_set_pipe_src_size(intel_crtc);
  4604. if (cpu_transcoder != TRANSCODER_EDP &&
  4605. !transcoder_is_dsi(cpu_transcoder)) {
  4606. I915_WRITE(PIPE_MULT(cpu_transcoder),
  4607. intel_crtc->config->pixel_multiplier - 1);
  4608. }
  4609. if (intel_crtc->config->has_pch_encoder) {
  4610. intel_cpu_transcoder_set_m_n(intel_crtc,
  4611. &intel_crtc->config->fdi_m_n, NULL);
  4612. }
  4613. if (!transcoder_is_dsi(cpu_transcoder))
  4614. haswell_set_pipeconf(crtc);
  4615. haswell_set_pipemisc(crtc);
  4616. intel_color_set_csc(&pipe_config->base);
  4617. intel_crtc->active = true;
  4618. if (intel_crtc->config->has_pch_encoder)
  4619. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4620. else
  4621. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4622. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4623. if (intel_crtc->config->has_pch_encoder)
  4624. dev_priv->display.fdi_link_train(crtc);
  4625. if (!transcoder_is_dsi(cpu_transcoder))
  4626. intel_ddi_enable_pipe_clock(intel_crtc);
  4627. if (INTEL_GEN(dev_priv) >= 9)
  4628. skylake_pfit_enable(intel_crtc);
  4629. else
  4630. ironlake_pfit_enable(intel_crtc);
  4631. /*
  4632. * On ILK+ LUT must be loaded before the pipe is running but with
  4633. * clocks enabled
  4634. */
  4635. intel_color_load_luts(&pipe_config->base);
  4636. intel_ddi_set_pipe_settings(crtc);
  4637. if (!transcoder_is_dsi(cpu_transcoder))
  4638. intel_ddi_enable_transcoder_func(crtc);
  4639. if (dev_priv->display.initial_watermarks != NULL)
  4640. dev_priv->display.initial_watermarks(old_intel_state,
  4641. pipe_config);
  4642. else
  4643. intel_update_watermarks(intel_crtc);
  4644. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4645. if (!transcoder_is_dsi(cpu_transcoder))
  4646. intel_enable_pipe(intel_crtc);
  4647. if (intel_crtc->config->has_pch_encoder)
  4648. lpt_pch_enable(crtc);
  4649. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4650. intel_ddi_set_vc_payload_alloc(crtc, true);
  4651. assert_vblank_disabled(crtc);
  4652. drm_crtc_vblank_on(crtc);
  4653. intel_encoders_enable(crtc, pipe_config, old_state);
  4654. if (intel_crtc->config->has_pch_encoder) {
  4655. intel_wait_for_vblank(dev_priv, pipe);
  4656. intel_wait_for_vblank(dev_priv, pipe);
  4657. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4658. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4659. true);
  4660. }
  4661. /* If we change the relative order between pipe/planes enabling, we need
  4662. * to change the workaround. */
  4663. hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
  4664. if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
  4665. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4666. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4667. }
  4668. }
  4669. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
  4670. {
  4671. struct drm_device *dev = crtc->base.dev;
  4672. struct drm_i915_private *dev_priv = to_i915(dev);
  4673. int pipe = crtc->pipe;
  4674. /* To avoid upsetting the power well on haswell only disable the pfit if
  4675. * it's in use. The hw state code will make sure we get this right. */
  4676. if (force || crtc->config->pch_pfit.enabled) {
  4677. I915_WRITE(PF_CTL(pipe), 0);
  4678. I915_WRITE(PF_WIN_POS(pipe), 0);
  4679. I915_WRITE(PF_WIN_SZ(pipe), 0);
  4680. }
  4681. }
  4682. static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4683. struct drm_atomic_state *old_state)
  4684. {
  4685. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4686. struct drm_device *dev = crtc->dev;
  4687. struct drm_i915_private *dev_priv = to_i915(dev);
  4688. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4689. int pipe = intel_crtc->pipe;
  4690. /*
  4691. * Sometimes spurious CPU pipe underruns happen when the
  4692. * pipe is already disabled, but FDI RX/TX is still enabled.
  4693. * Happens at least with VGA+HDMI cloning. Suppress them.
  4694. */
  4695. if (intel_crtc->config->has_pch_encoder) {
  4696. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4697. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4698. }
  4699. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4700. drm_crtc_vblank_off(crtc);
  4701. assert_vblank_disabled(crtc);
  4702. intel_disable_pipe(intel_crtc);
  4703. ironlake_pfit_disable(intel_crtc, false);
  4704. if (intel_crtc->config->has_pch_encoder)
  4705. ironlake_fdi_disable(crtc);
  4706. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4707. if (intel_crtc->config->has_pch_encoder) {
  4708. ironlake_disable_pch_transcoder(dev_priv, pipe);
  4709. if (HAS_PCH_CPT(dev_priv)) {
  4710. i915_reg_t reg;
  4711. u32 temp;
  4712. /* disable TRANS_DP_CTL */
  4713. reg = TRANS_DP_CTL(pipe);
  4714. temp = I915_READ(reg);
  4715. temp &= ~(TRANS_DP_OUTPUT_ENABLE |
  4716. TRANS_DP_PORT_SEL_MASK);
  4717. temp |= TRANS_DP_PORT_SEL_NONE;
  4718. I915_WRITE(reg, temp);
  4719. /* disable DPLL_SEL */
  4720. temp = I915_READ(PCH_DPLL_SEL);
  4721. temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
  4722. I915_WRITE(PCH_DPLL_SEL, temp);
  4723. }
  4724. ironlake_fdi_pll_disable(intel_crtc);
  4725. }
  4726. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4727. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4728. }
  4729. static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4730. struct drm_atomic_state *old_state)
  4731. {
  4732. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4733. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4734. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4735. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4736. if (intel_crtc->config->has_pch_encoder)
  4737. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4738. false);
  4739. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4740. drm_crtc_vblank_off(crtc);
  4741. assert_vblank_disabled(crtc);
  4742. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4743. if (!transcoder_is_dsi(cpu_transcoder))
  4744. intel_disable_pipe(intel_crtc);
  4745. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4746. intel_ddi_set_vc_payload_alloc(crtc, false);
  4747. if (!transcoder_is_dsi(cpu_transcoder))
  4748. intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
  4749. if (INTEL_GEN(dev_priv) >= 9)
  4750. skylake_scaler_disable(intel_crtc);
  4751. else
  4752. ironlake_pfit_disable(intel_crtc, false);
  4753. if (!transcoder_is_dsi(cpu_transcoder))
  4754. intel_ddi_disable_pipe_clock(intel_crtc);
  4755. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4756. if (old_crtc_state->has_pch_encoder)
  4757. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4758. true);
  4759. }
  4760. static void i9xx_pfit_enable(struct intel_crtc *crtc)
  4761. {
  4762. struct drm_device *dev = crtc->base.dev;
  4763. struct drm_i915_private *dev_priv = to_i915(dev);
  4764. struct intel_crtc_state *pipe_config = crtc->config;
  4765. if (!pipe_config->gmch_pfit.control)
  4766. return;
  4767. /*
  4768. * The panel fitter should only be adjusted whilst the pipe is disabled,
  4769. * according to register description and PRM.
  4770. */
  4771. WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
  4772. assert_pipe_disabled(dev_priv, crtc->pipe);
  4773. I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
  4774. I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
  4775. /* Border color in case we don't scale up to the full screen. Black by
  4776. * default, change to something else for debugging. */
  4777. I915_WRITE(BCLRPAT(crtc->pipe), 0);
  4778. }
  4779. static enum intel_display_power_domain port_to_power_domain(enum port port)
  4780. {
  4781. switch (port) {
  4782. case PORT_A:
  4783. return POWER_DOMAIN_PORT_DDI_A_LANES;
  4784. case PORT_B:
  4785. return POWER_DOMAIN_PORT_DDI_B_LANES;
  4786. case PORT_C:
  4787. return POWER_DOMAIN_PORT_DDI_C_LANES;
  4788. case PORT_D:
  4789. return POWER_DOMAIN_PORT_DDI_D_LANES;
  4790. case PORT_E:
  4791. return POWER_DOMAIN_PORT_DDI_E_LANES;
  4792. default:
  4793. MISSING_CASE(port);
  4794. return POWER_DOMAIN_PORT_OTHER;
  4795. }
  4796. }
  4797. static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
  4798. {
  4799. switch (port) {
  4800. case PORT_A:
  4801. return POWER_DOMAIN_AUX_A;
  4802. case PORT_B:
  4803. return POWER_DOMAIN_AUX_B;
  4804. case PORT_C:
  4805. return POWER_DOMAIN_AUX_C;
  4806. case PORT_D:
  4807. return POWER_DOMAIN_AUX_D;
  4808. case PORT_E:
  4809. /* FIXME: Check VBT for actual wiring of PORT E */
  4810. return POWER_DOMAIN_AUX_D;
  4811. default:
  4812. MISSING_CASE(port);
  4813. return POWER_DOMAIN_AUX_A;
  4814. }
  4815. }
  4816. enum intel_display_power_domain
  4817. intel_display_port_power_domain(struct intel_encoder *intel_encoder)
  4818. {
  4819. struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
  4820. struct intel_digital_port *intel_dig_port;
  4821. switch (intel_encoder->type) {
  4822. case INTEL_OUTPUT_UNKNOWN:
  4823. /* Only DDI platforms should ever use this output type */
  4824. WARN_ON_ONCE(!HAS_DDI(dev_priv));
  4825. case INTEL_OUTPUT_DP:
  4826. case INTEL_OUTPUT_HDMI:
  4827. case INTEL_OUTPUT_EDP:
  4828. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4829. return port_to_power_domain(intel_dig_port->port);
  4830. case INTEL_OUTPUT_DP_MST:
  4831. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4832. return port_to_power_domain(intel_dig_port->port);
  4833. case INTEL_OUTPUT_ANALOG:
  4834. return POWER_DOMAIN_PORT_CRT;
  4835. case INTEL_OUTPUT_DSI:
  4836. return POWER_DOMAIN_PORT_DSI;
  4837. default:
  4838. return POWER_DOMAIN_PORT_OTHER;
  4839. }
  4840. }
  4841. enum intel_display_power_domain
  4842. intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
  4843. {
  4844. struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
  4845. struct intel_digital_port *intel_dig_port;
  4846. switch (intel_encoder->type) {
  4847. case INTEL_OUTPUT_UNKNOWN:
  4848. case INTEL_OUTPUT_HDMI:
  4849. /*
  4850. * Only DDI platforms should ever use these output types.
  4851. * We can get here after the HDMI detect code has already set
  4852. * the type of the shared encoder. Since we can't be sure
  4853. * what's the status of the given connectors, play safe and
  4854. * run the DP detection too.
  4855. */
  4856. WARN_ON_ONCE(!HAS_DDI(dev_priv));
  4857. case INTEL_OUTPUT_DP:
  4858. case INTEL_OUTPUT_EDP:
  4859. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4860. return port_to_aux_power_domain(intel_dig_port->port);
  4861. case INTEL_OUTPUT_DP_MST:
  4862. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4863. return port_to_aux_power_domain(intel_dig_port->port);
  4864. default:
  4865. MISSING_CASE(intel_encoder->type);
  4866. return POWER_DOMAIN_AUX_A;
  4867. }
  4868. }
  4869. static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
  4870. struct intel_crtc_state *crtc_state)
  4871. {
  4872. struct drm_device *dev = crtc->dev;
  4873. struct drm_encoder *encoder;
  4874. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4875. enum pipe pipe = intel_crtc->pipe;
  4876. unsigned long mask;
  4877. enum transcoder transcoder = crtc_state->cpu_transcoder;
  4878. if (!crtc_state->base.active)
  4879. return 0;
  4880. mask = BIT(POWER_DOMAIN_PIPE(pipe));
  4881. mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
  4882. if (crtc_state->pch_pfit.enabled ||
  4883. crtc_state->pch_pfit.force_thru)
  4884. mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
  4885. drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
  4886. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  4887. mask |= BIT(intel_display_port_power_domain(intel_encoder));
  4888. }
  4889. if (crtc_state->shared_dpll)
  4890. mask |= BIT(POWER_DOMAIN_PLLS);
  4891. return mask;
  4892. }
  4893. static unsigned long
  4894. modeset_get_crtc_power_domains(struct drm_crtc *crtc,
  4895. struct intel_crtc_state *crtc_state)
  4896. {
  4897. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4898. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4899. enum intel_display_power_domain domain;
  4900. unsigned long domains, new_domains, old_domains;
  4901. old_domains = intel_crtc->enabled_power_domains;
  4902. intel_crtc->enabled_power_domains = new_domains =
  4903. get_crtc_power_domains(crtc, crtc_state);
  4904. domains = new_domains & ~old_domains;
  4905. for_each_power_domain(domain, domains)
  4906. intel_display_power_get(dev_priv, domain);
  4907. return old_domains & ~new_domains;
  4908. }
  4909. static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
  4910. unsigned long domains)
  4911. {
  4912. enum intel_display_power_domain domain;
  4913. for_each_power_domain(domain, domains)
  4914. intel_display_power_put(dev_priv, domain);
  4915. }
  4916. static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
  4917. {
  4918. int max_cdclk_freq = dev_priv->max_cdclk_freq;
  4919. if (INTEL_INFO(dev_priv)->gen >= 9 ||
  4920. IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  4921. return max_cdclk_freq;
  4922. else if (IS_CHERRYVIEW(dev_priv))
  4923. return max_cdclk_freq*95/100;
  4924. else if (INTEL_INFO(dev_priv)->gen < 4)
  4925. return 2*max_cdclk_freq*90/100;
  4926. else
  4927. return max_cdclk_freq*90/100;
  4928. }
  4929. static int skl_calc_cdclk(int max_pixclk, int vco);
  4930. static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
  4931. {
  4932. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  4933. u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
  4934. int max_cdclk, vco;
  4935. vco = dev_priv->skl_preferred_vco_freq;
  4936. WARN_ON(vco != 8100000 && vco != 8640000);
  4937. /*
  4938. * Use the lower (vco 8640) cdclk values as a
  4939. * first guess. skl_calc_cdclk() will correct it
  4940. * if the preferred vco is 8100 instead.
  4941. */
  4942. if (limit == SKL_DFSM_CDCLK_LIMIT_675)
  4943. max_cdclk = 617143;
  4944. else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
  4945. max_cdclk = 540000;
  4946. else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
  4947. max_cdclk = 432000;
  4948. else
  4949. max_cdclk = 308571;
  4950. dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
  4951. } else if (IS_BROXTON(dev_priv)) {
  4952. dev_priv->max_cdclk_freq = 624000;
  4953. } else if (IS_BROADWELL(dev_priv)) {
  4954. /*
  4955. * FIXME with extra cooling we can allow
  4956. * 540 MHz for ULX and 675 Mhz for ULT.
  4957. * How can we know if extra cooling is
  4958. * available? PCI ID, VTB, something else?
  4959. */
  4960. if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  4961. dev_priv->max_cdclk_freq = 450000;
  4962. else if (IS_BDW_ULX(dev_priv))
  4963. dev_priv->max_cdclk_freq = 450000;
  4964. else if (IS_BDW_ULT(dev_priv))
  4965. dev_priv->max_cdclk_freq = 540000;
  4966. else
  4967. dev_priv->max_cdclk_freq = 675000;
  4968. } else if (IS_CHERRYVIEW(dev_priv)) {
  4969. dev_priv->max_cdclk_freq = 320000;
  4970. } else if (IS_VALLEYVIEW(dev_priv)) {
  4971. dev_priv->max_cdclk_freq = 400000;
  4972. } else {
  4973. /* otherwise assume cdclk is fixed */
  4974. dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
  4975. }
  4976. dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
  4977. DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
  4978. dev_priv->max_cdclk_freq);
  4979. DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
  4980. dev_priv->max_dotclk_freq);
  4981. }
  4982. static void intel_update_cdclk(struct drm_i915_private *dev_priv)
  4983. {
  4984. dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
  4985. if (INTEL_GEN(dev_priv) >= 9)
  4986. DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
  4987. dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
  4988. dev_priv->cdclk_pll.ref);
  4989. else
  4990. DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
  4991. dev_priv->cdclk_freq);
  4992. /*
  4993. * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
  4994. * Programmng [sic] note: bit[9:2] should be programmed to the number
  4995. * of cdclk that generates 4MHz reference clock freq which is used to
  4996. * generate GMBus clock. This will vary with the cdclk freq.
  4997. */
  4998. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  4999. I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
  5000. }
  5001. /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
  5002. static int skl_cdclk_decimal(int cdclk)
  5003. {
  5004. return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
  5005. }
  5006. static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
  5007. {
  5008. int ratio;
  5009. if (cdclk == dev_priv->cdclk_pll.ref)
  5010. return 0;
  5011. switch (cdclk) {
  5012. default:
  5013. MISSING_CASE(cdclk);
  5014. case 144000:
  5015. case 288000:
  5016. case 384000:
  5017. case 576000:
  5018. ratio = 60;
  5019. break;
  5020. case 624000:
  5021. ratio = 65;
  5022. break;
  5023. }
  5024. return dev_priv->cdclk_pll.ref * ratio;
  5025. }
  5026. static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
  5027. {
  5028. I915_WRITE(BXT_DE_PLL_ENABLE, 0);
  5029. /* Timeout 200us */
  5030. if (intel_wait_for_register(dev_priv,
  5031. BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
  5032. 1))
  5033. DRM_ERROR("timeout waiting for DE PLL unlock\n");
  5034. dev_priv->cdclk_pll.vco = 0;
  5035. }
  5036. static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
  5037. {
  5038. int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
  5039. u32 val;
  5040. val = I915_READ(BXT_DE_PLL_CTL);
  5041. val &= ~BXT_DE_PLL_RATIO_MASK;
  5042. val |= BXT_DE_PLL_RATIO(ratio);
  5043. I915_WRITE(BXT_DE_PLL_CTL, val);
  5044. I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
  5045. /* Timeout 200us */
  5046. if (intel_wait_for_register(dev_priv,
  5047. BXT_DE_PLL_ENABLE,
  5048. BXT_DE_PLL_LOCK,
  5049. BXT_DE_PLL_LOCK,
  5050. 1))
  5051. DRM_ERROR("timeout waiting for DE PLL lock\n");
  5052. dev_priv->cdclk_pll.vco = vco;
  5053. }
  5054. static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
  5055. {
  5056. u32 val, divider;
  5057. int vco, ret;
  5058. vco = bxt_de_pll_vco(dev_priv, cdclk);
  5059. DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
  5060. /* cdclk = vco / 2 / div{1,1.5,2,4} */
  5061. switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
  5062. case 8:
  5063. divider = BXT_CDCLK_CD2X_DIV_SEL_4;
  5064. break;
  5065. case 4:
  5066. divider = BXT_CDCLK_CD2X_DIV_SEL_2;
  5067. break;
  5068. case 3:
  5069. divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
  5070. break;
  5071. case 2:
  5072. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  5073. break;
  5074. default:
  5075. WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
  5076. WARN_ON(vco != 0);
  5077. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  5078. break;
  5079. }
  5080. /* Inform power controller of upcoming frequency change */
  5081. mutex_lock(&dev_priv->rps.hw_lock);
  5082. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  5083. 0x80000000);
  5084. mutex_unlock(&dev_priv->rps.hw_lock);
  5085. if (ret) {
  5086. DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
  5087. ret, cdclk);
  5088. return;
  5089. }
  5090. if (dev_priv->cdclk_pll.vco != 0 &&
  5091. dev_priv->cdclk_pll.vco != vco)
  5092. bxt_de_pll_disable(dev_priv);
  5093. if (dev_priv->cdclk_pll.vco != vco)
  5094. bxt_de_pll_enable(dev_priv, vco);
  5095. val = divider | skl_cdclk_decimal(cdclk);
  5096. /*
  5097. * FIXME if only the cd2x divider needs changing, it could be done
  5098. * without shutting off the pipe (if only one pipe is active).
  5099. */
  5100. val |= BXT_CDCLK_CD2X_PIPE_NONE;
  5101. /*
  5102. * Disable SSA Precharge when CD clock frequency < 500 MHz,
  5103. * enable otherwise.
  5104. */
  5105. if (cdclk >= 500000)
  5106. val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  5107. I915_WRITE(CDCLK_CTL, val);
  5108. mutex_lock(&dev_priv->rps.hw_lock);
  5109. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  5110. DIV_ROUND_UP(cdclk, 25000));
  5111. mutex_unlock(&dev_priv->rps.hw_lock);
  5112. if (ret) {
  5113. DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
  5114. ret, cdclk);
  5115. return;
  5116. }
  5117. intel_update_cdclk(dev_priv);
  5118. }
  5119. static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
  5120. {
  5121. u32 cdctl, expected;
  5122. intel_update_cdclk(dev_priv);
  5123. if (dev_priv->cdclk_pll.vco == 0 ||
  5124. dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
  5125. goto sanitize;
  5126. /* DPLL okay; verify the cdclock
  5127. *
  5128. * Some BIOS versions leave an incorrect decimal frequency value and
  5129. * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
  5130. * so sanitize this register.
  5131. */
  5132. cdctl = I915_READ(CDCLK_CTL);
  5133. /*
  5134. * Let's ignore the pipe field, since BIOS could have configured the
  5135. * dividers both synching to an active pipe, or asynchronously
  5136. * (PIPE_NONE).
  5137. */
  5138. cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
  5139. expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
  5140. skl_cdclk_decimal(dev_priv->cdclk_freq);
  5141. /*
  5142. * Disable SSA Precharge when CD clock frequency < 500 MHz,
  5143. * enable otherwise.
  5144. */
  5145. if (dev_priv->cdclk_freq >= 500000)
  5146. expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  5147. if (cdctl == expected)
  5148. /* All well; nothing to sanitize */
  5149. return;
  5150. sanitize:
  5151. DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
  5152. /* force cdclk programming */
  5153. dev_priv->cdclk_freq = 0;
  5154. /* force full PLL disable + enable */
  5155. dev_priv->cdclk_pll.vco = -1;
  5156. }
  5157. void bxt_init_cdclk(struct drm_i915_private *dev_priv)
  5158. {
  5159. bxt_sanitize_cdclk(dev_priv);
  5160. if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
  5161. return;
  5162. /*
  5163. * FIXME:
  5164. * - The initial CDCLK needs to be read from VBT.
  5165. * Need to make this change after VBT has changes for BXT.
  5166. */
  5167. bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
  5168. }
  5169. void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
  5170. {
  5171. bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
  5172. }
  5173. static int skl_calc_cdclk(int max_pixclk, int vco)
  5174. {
  5175. if (vco == 8640000) {
  5176. if (max_pixclk > 540000)
  5177. return 617143;
  5178. else if (max_pixclk > 432000)
  5179. return 540000;
  5180. else if (max_pixclk > 308571)
  5181. return 432000;
  5182. else
  5183. return 308571;
  5184. } else {
  5185. if (max_pixclk > 540000)
  5186. return 675000;
  5187. else if (max_pixclk > 450000)
  5188. return 540000;
  5189. else if (max_pixclk > 337500)
  5190. return 450000;
  5191. else
  5192. return 337500;
  5193. }
  5194. }
  5195. static void
  5196. skl_dpll0_update(struct drm_i915_private *dev_priv)
  5197. {
  5198. u32 val;
  5199. dev_priv->cdclk_pll.ref = 24000;
  5200. dev_priv->cdclk_pll.vco = 0;
  5201. val = I915_READ(LCPLL1_CTL);
  5202. if ((val & LCPLL_PLL_ENABLE) == 0)
  5203. return;
  5204. if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
  5205. return;
  5206. val = I915_READ(DPLL_CTRL1);
  5207. if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
  5208. DPLL_CTRL1_SSC(SKL_DPLL0) |
  5209. DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
  5210. DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
  5211. return;
  5212. switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
  5213. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
  5214. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
  5215. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
  5216. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
  5217. dev_priv->cdclk_pll.vco = 8100000;
  5218. break;
  5219. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
  5220. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
  5221. dev_priv->cdclk_pll.vco = 8640000;
  5222. break;
  5223. default:
  5224. MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
  5225. break;
  5226. }
  5227. }
  5228. void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
  5229. {
  5230. bool changed = dev_priv->skl_preferred_vco_freq != vco;
  5231. dev_priv->skl_preferred_vco_freq = vco;
  5232. if (changed)
  5233. intel_update_max_cdclk(dev_priv);
  5234. }
  5235. static void
  5236. skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
  5237. {
  5238. int min_cdclk = skl_calc_cdclk(0, vco);
  5239. u32 val;
  5240. WARN_ON(vco != 8100000 && vco != 8640000);
  5241. /* select the minimum CDCLK before enabling DPLL 0 */
  5242. val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
  5243. I915_WRITE(CDCLK_CTL, val);
  5244. POSTING_READ(CDCLK_CTL);
  5245. /*
  5246. * We always enable DPLL0 with the lowest link rate possible, but still
  5247. * taking into account the VCO required to operate the eDP panel at the
  5248. * desired frequency. The usual DP link rates operate with a VCO of
  5249. * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
  5250. * The modeset code is responsible for the selection of the exact link
  5251. * rate later on, with the constraint of choosing a frequency that
  5252. * works with vco.
  5253. */
  5254. val = I915_READ(DPLL_CTRL1);
  5255. val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
  5256. DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
  5257. val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
  5258. if (vco == 8640000)
  5259. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
  5260. SKL_DPLL0);
  5261. else
  5262. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
  5263. SKL_DPLL0);
  5264. I915_WRITE(DPLL_CTRL1, val);
  5265. POSTING_READ(DPLL_CTRL1);
  5266. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
  5267. if (intel_wait_for_register(dev_priv,
  5268. LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  5269. 5))
  5270. DRM_ERROR("DPLL0 not locked\n");
  5271. dev_priv->cdclk_pll.vco = vco;
  5272. /* We'll want to keep using the current vco from now on. */
  5273. skl_set_preferred_cdclk_vco(dev_priv, vco);
  5274. }
  5275. static void
  5276. skl_dpll0_disable(struct drm_i915_private *dev_priv)
  5277. {
  5278. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
  5279. if (intel_wait_for_register(dev_priv,
  5280. LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
  5281. 1))
  5282. DRM_ERROR("Couldn't disable DPLL0\n");
  5283. dev_priv->cdclk_pll.vco = 0;
  5284. }
  5285. static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
  5286. {
  5287. int ret;
  5288. u32 val;
  5289. /* inform PCU we want to change CDCLK */
  5290. val = SKL_CDCLK_PREPARE_FOR_CHANGE;
  5291. mutex_lock(&dev_priv->rps.hw_lock);
  5292. ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
  5293. mutex_unlock(&dev_priv->rps.hw_lock);
  5294. return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
  5295. }
  5296. static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
  5297. {
  5298. return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
  5299. }
  5300. static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
  5301. {
  5302. u32 freq_select, pcu_ack;
  5303. WARN_ON((cdclk == 24000) != (vco == 0));
  5304. DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
  5305. if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
  5306. DRM_ERROR("failed to inform PCU about cdclk change\n");
  5307. return;
  5308. }
  5309. /* set CDCLK_CTL */
  5310. switch (cdclk) {
  5311. case 450000:
  5312. case 432000:
  5313. freq_select = CDCLK_FREQ_450_432;
  5314. pcu_ack = 1;
  5315. break;
  5316. case 540000:
  5317. freq_select = CDCLK_FREQ_540;
  5318. pcu_ack = 2;
  5319. break;
  5320. case 308571:
  5321. case 337500:
  5322. default:
  5323. freq_select = CDCLK_FREQ_337_308;
  5324. pcu_ack = 0;
  5325. break;
  5326. case 617143:
  5327. case 675000:
  5328. freq_select = CDCLK_FREQ_675_617;
  5329. pcu_ack = 3;
  5330. break;
  5331. }
  5332. if (dev_priv->cdclk_pll.vco != 0 &&
  5333. dev_priv->cdclk_pll.vco != vco)
  5334. skl_dpll0_disable(dev_priv);
  5335. if (dev_priv->cdclk_pll.vco != vco)
  5336. skl_dpll0_enable(dev_priv, vco);
  5337. I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
  5338. POSTING_READ(CDCLK_CTL);
  5339. /* inform PCU of the change */
  5340. mutex_lock(&dev_priv->rps.hw_lock);
  5341. sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
  5342. mutex_unlock(&dev_priv->rps.hw_lock);
  5343. intel_update_cdclk(dev_priv);
  5344. }
  5345. static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
  5346. void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
  5347. {
  5348. skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
  5349. }
  5350. void skl_init_cdclk(struct drm_i915_private *dev_priv)
  5351. {
  5352. int cdclk, vco;
  5353. skl_sanitize_cdclk(dev_priv);
  5354. if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
  5355. /*
  5356. * Use the current vco as our initial
  5357. * guess as to what the preferred vco is.
  5358. */
  5359. if (dev_priv->skl_preferred_vco_freq == 0)
  5360. skl_set_preferred_cdclk_vco(dev_priv,
  5361. dev_priv->cdclk_pll.vco);
  5362. return;
  5363. }
  5364. vco = dev_priv->skl_preferred_vco_freq;
  5365. if (vco == 0)
  5366. vco = 8100000;
  5367. cdclk = skl_calc_cdclk(0, vco);
  5368. skl_set_cdclk(dev_priv, cdclk, vco);
  5369. }
  5370. static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  5371. {
  5372. uint32_t cdctl, expected;
  5373. /*
  5374. * check if the pre-os intialized the display
  5375. * There is SWF18 scratchpad register defined which is set by the
  5376. * pre-os which can be used by the OS drivers to check the status
  5377. */
  5378. if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
  5379. goto sanitize;
  5380. intel_update_cdclk(dev_priv);
  5381. /* Is PLL enabled and locked ? */
  5382. if (dev_priv->cdclk_pll.vco == 0 ||
  5383. dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
  5384. goto sanitize;
  5385. /* DPLL okay; verify the cdclock
  5386. *
  5387. * Noticed in some instances that the freq selection is correct but
  5388. * decimal part is programmed wrong from BIOS where pre-os does not
  5389. * enable display. Verify the same as well.
  5390. */
  5391. cdctl = I915_READ(CDCLK_CTL);
  5392. expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
  5393. skl_cdclk_decimal(dev_priv->cdclk_freq);
  5394. if (cdctl == expected)
  5395. /* All well; nothing to sanitize */
  5396. return;
  5397. sanitize:
  5398. DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
  5399. /* force cdclk programming */
  5400. dev_priv->cdclk_freq = 0;
  5401. /* force full PLL disable + enable */
  5402. dev_priv->cdclk_pll.vco = -1;
  5403. }
  5404. /* Adjust CDclk dividers to allow high res or save power if possible */
  5405. static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
  5406. {
  5407. struct drm_i915_private *dev_priv = to_i915(dev);
  5408. u32 val, cmd;
  5409. WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
  5410. != dev_priv->cdclk_freq);
  5411. if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
  5412. cmd = 2;
  5413. else if (cdclk == 266667)
  5414. cmd = 1;
  5415. else
  5416. cmd = 0;
  5417. mutex_lock(&dev_priv->rps.hw_lock);
  5418. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  5419. val &= ~DSPFREQGUAR_MASK;
  5420. val |= (cmd << DSPFREQGUAR_SHIFT);
  5421. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  5422. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  5423. DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
  5424. 50)) {
  5425. DRM_ERROR("timed out waiting for CDclk change\n");
  5426. }
  5427. mutex_unlock(&dev_priv->rps.hw_lock);
  5428. mutex_lock(&dev_priv->sb_lock);
  5429. if (cdclk == 400000) {
  5430. u32 divider;
  5431. divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  5432. /* adjust cdclk divider */
  5433. val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
  5434. val &= ~CCK_FREQUENCY_VALUES;
  5435. val |= divider;
  5436. vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
  5437. if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
  5438. CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
  5439. 50))
  5440. DRM_ERROR("timed out waiting for CDclk change\n");
  5441. }
  5442. /* adjust self-refresh exit latency value */
  5443. val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
  5444. val &= ~0x7f;
  5445. /*
  5446. * For high bandwidth configs, we set a higher latency in the bunit
  5447. * so that the core display fetch happens in time to avoid underruns.
  5448. */
  5449. if (cdclk == 400000)
  5450. val |= 4500 / 250; /* 4.5 usec */
  5451. else
  5452. val |= 3000 / 250; /* 3.0 usec */
  5453. vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
  5454. mutex_unlock(&dev_priv->sb_lock);
  5455. intel_update_cdclk(dev_priv);
  5456. }
  5457. static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
  5458. {
  5459. struct drm_i915_private *dev_priv = to_i915(dev);
  5460. u32 val, cmd;
  5461. WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
  5462. != dev_priv->cdclk_freq);
  5463. switch (cdclk) {
  5464. case 333333:
  5465. case 320000:
  5466. case 266667:
  5467. case 200000:
  5468. break;
  5469. default:
  5470. MISSING_CASE(cdclk);
  5471. return;
  5472. }
  5473. /*
  5474. * Specs are full of misinformation, but testing on actual
  5475. * hardware has shown that we just need to write the desired
  5476. * CCK divider into the Punit register.
  5477. */
  5478. cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  5479. mutex_lock(&dev_priv->rps.hw_lock);
  5480. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  5481. val &= ~DSPFREQGUAR_MASK_CHV;
  5482. val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
  5483. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  5484. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  5485. DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
  5486. 50)) {
  5487. DRM_ERROR("timed out waiting for CDclk change\n");
  5488. }
  5489. mutex_unlock(&dev_priv->rps.hw_lock);
  5490. intel_update_cdclk(dev_priv);
  5491. }
  5492. static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
  5493. int max_pixclk)
  5494. {
  5495. int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
  5496. int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
  5497. /*
  5498. * Really only a few cases to deal with, as only 4 CDclks are supported:
  5499. * 200MHz
  5500. * 267MHz
  5501. * 320/333MHz (depends on HPLL freq)
  5502. * 400MHz (VLV only)
  5503. * So we check to see whether we're above 90% (VLV) or 95% (CHV)
  5504. * of the lower bin and adjust if needed.
  5505. *
  5506. * We seem to get an unstable or solid color picture at 200MHz.
  5507. * Not sure what's wrong. For now use 200MHz only when all pipes
  5508. * are off.
  5509. */
  5510. if (!IS_CHERRYVIEW(dev_priv) &&
  5511. max_pixclk > freq_320*limit/100)
  5512. return 400000;
  5513. else if (max_pixclk > 266667*limit/100)
  5514. return freq_320;
  5515. else if (max_pixclk > 0)
  5516. return 266667;
  5517. else
  5518. return 200000;
  5519. }
  5520. static int bxt_calc_cdclk(int max_pixclk)
  5521. {
  5522. if (max_pixclk > 576000)
  5523. return 624000;
  5524. else if (max_pixclk > 384000)
  5525. return 576000;
  5526. else if (max_pixclk > 288000)
  5527. return 384000;
  5528. else if (max_pixclk > 144000)
  5529. return 288000;
  5530. else
  5531. return 144000;
  5532. }
  5533. /* Compute the max pixel clock for new configuration. */
  5534. static int intel_mode_max_pixclk(struct drm_device *dev,
  5535. struct drm_atomic_state *state)
  5536. {
  5537. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  5538. struct drm_i915_private *dev_priv = to_i915(dev);
  5539. struct drm_crtc *crtc;
  5540. struct drm_crtc_state *crtc_state;
  5541. unsigned max_pixclk = 0, i;
  5542. enum pipe pipe;
  5543. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  5544. sizeof(intel_state->min_pixclk));
  5545. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  5546. int pixclk = 0;
  5547. if (crtc_state->enable)
  5548. pixclk = crtc_state->adjusted_mode.crtc_clock;
  5549. intel_state->min_pixclk[i] = pixclk;
  5550. }
  5551. for_each_pipe(dev_priv, pipe)
  5552. max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
  5553. return max_pixclk;
  5554. }
  5555. static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
  5556. {
  5557. struct drm_device *dev = state->dev;
  5558. struct drm_i915_private *dev_priv = to_i915(dev);
  5559. int max_pixclk = intel_mode_max_pixclk(dev, state);
  5560. struct intel_atomic_state *intel_state =
  5561. to_intel_atomic_state(state);
  5562. intel_state->cdclk = intel_state->dev_cdclk =
  5563. valleyview_calc_cdclk(dev_priv, max_pixclk);
  5564. if (!intel_state->active_crtcs)
  5565. intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
  5566. return 0;
  5567. }
  5568. static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
  5569. {
  5570. int max_pixclk = ilk_max_pixel_rate(state);
  5571. struct intel_atomic_state *intel_state =
  5572. to_intel_atomic_state(state);
  5573. intel_state->cdclk = intel_state->dev_cdclk =
  5574. bxt_calc_cdclk(max_pixclk);
  5575. if (!intel_state->active_crtcs)
  5576. intel_state->dev_cdclk = bxt_calc_cdclk(0);
  5577. return 0;
  5578. }
  5579. static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
  5580. {
  5581. unsigned int credits, default_credits;
  5582. if (IS_CHERRYVIEW(dev_priv))
  5583. default_credits = PFI_CREDIT(12);
  5584. else
  5585. default_credits = PFI_CREDIT(8);
  5586. if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
  5587. /* CHV suggested value is 31 or 63 */
  5588. if (IS_CHERRYVIEW(dev_priv))
  5589. credits = PFI_CREDIT_63;
  5590. else
  5591. credits = PFI_CREDIT(15);
  5592. } else {
  5593. credits = default_credits;
  5594. }
  5595. /*
  5596. * WA - write default credits before re-programming
  5597. * FIXME: should we also set the resend bit here?
  5598. */
  5599. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5600. default_credits);
  5601. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5602. credits | PFI_CREDIT_RESEND);
  5603. /*
  5604. * FIXME is this guaranteed to clear
  5605. * immediately or should we poll for it?
  5606. */
  5607. WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
  5608. }
  5609. static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  5610. {
  5611. struct drm_device *dev = old_state->dev;
  5612. struct drm_i915_private *dev_priv = to_i915(dev);
  5613. struct intel_atomic_state *old_intel_state =
  5614. to_intel_atomic_state(old_state);
  5615. unsigned req_cdclk = old_intel_state->dev_cdclk;
  5616. /*
  5617. * FIXME: We can end up here with all power domains off, yet
  5618. * with a CDCLK frequency other than the minimum. To account
  5619. * for this take the PIPE-A power domain, which covers the HW
  5620. * blocks needed for the following programming. This can be
  5621. * removed once it's guaranteed that we get here either with
  5622. * the minimum CDCLK set, or the required power domains
  5623. * enabled.
  5624. */
  5625. intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
  5626. if (IS_CHERRYVIEW(dev_priv))
  5627. cherryview_set_cdclk(dev, req_cdclk);
  5628. else
  5629. valleyview_set_cdclk(dev, req_cdclk);
  5630. vlv_program_pfi_credits(dev_priv);
  5631. intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
  5632. }
  5633. static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  5634. struct drm_atomic_state *old_state)
  5635. {
  5636. struct drm_crtc *crtc = pipe_config->base.crtc;
  5637. struct drm_device *dev = crtc->dev;
  5638. struct drm_i915_private *dev_priv = to_i915(dev);
  5639. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5640. int pipe = intel_crtc->pipe;
  5641. if (WARN_ON(intel_crtc->active))
  5642. return;
  5643. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  5644. intel_dp_set_m_n(intel_crtc, M1_N1);
  5645. intel_set_pipe_timings(intel_crtc);
  5646. intel_set_pipe_src_size(intel_crtc);
  5647. if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  5648. struct drm_i915_private *dev_priv = to_i915(dev);
  5649. I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
  5650. I915_WRITE(CHV_CANVAS(pipe), 0);
  5651. }
  5652. i9xx_set_pipeconf(intel_crtc);
  5653. intel_crtc->active = true;
  5654. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5655. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  5656. if (IS_CHERRYVIEW(dev_priv)) {
  5657. chv_prepare_pll(intel_crtc, intel_crtc->config);
  5658. chv_enable_pll(intel_crtc, intel_crtc->config);
  5659. } else {
  5660. vlv_prepare_pll(intel_crtc, intel_crtc->config);
  5661. vlv_enable_pll(intel_crtc, intel_crtc->config);
  5662. }
  5663. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  5664. i9xx_pfit_enable(intel_crtc);
  5665. intel_color_load_luts(&pipe_config->base);
  5666. intel_update_watermarks(intel_crtc);
  5667. intel_enable_pipe(intel_crtc);
  5668. assert_vblank_disabled(crtc);
  5669. drm_crtc_vblank_on(crtc);
  5670. intel_encoders_enable(crtc, pipe_config, old_state);
  5671. }
  5672. static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
  5673. {
  5674. struct drm_device *dev = crtc->base.dev;
  5675. struct drm_i915_private *dev_priv = to_i915(dev);
  5676. I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
  5677. I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
  5678. }
  5679. static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
  5680. struct drm_atomic_state *old_state)
  5681. {
  5682. struct drm_crtc *crtc = pipe_config->base.crtc;
  5683. struct drm_device *dev = crtc->dev;
  5684. struct drm_i915_private *dev_priv = to_i915(dev);
  5685. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5686. enum pipe pipe = intel_crtc->pipe;
  5687. if (WARN_ON(intel_crtc->active))
  5688. return;
  5689. i9xx_set_pll_dividers(intel_crtc);
  5690. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  5691. intel_dp_set_m_n(intel_crtc, M1_N1);
  5692. intel_set_pipe_timings(intel_crtc);
  5693. intel_set_pipe_src_size(intel_crtc);
  5694. i9xx_set_pipeconf(intel_crtc);
  5695. intel_crtc->active = true;
  5696. if (!IS_GEN2(dev_priv))
  5697. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5698. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  5699. i9xx_enable_pll(intel_crtc);
  5700. i9xx_pfit_enable(intel_crtc);
  5701. intel_color_load_luts(&pipe_config->base);
  5702. intel_update_watermarks(intel_crtc);
  5703. intel_enable_pipe(intel_crtc);
  5704. assert_vblank_disabled(crtc);
  5705. drm_crtc_vblank_on(crtc);
  5706. intel_encoders_enable(crtc, pipe_config, old_state);
  5707. }
  5708. static void i9xx_pfit_disable(struct intel_crtc *crtc)
  5709. {
  5710. struct drm_device *dev = crtc->base.dev;
  5711. struct drm_i915_private *dev_priv = to_i915(dev);
  5712. if (!crtc->config->gmch_pfit.control)
  5713. return;
  5714. assert_pipe_disabled(dev_priv, crtc->pipe);
  5715. DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
  5716. I915_READ(PFIT_CONTROL));
  5717. I915_WRITE(PFIT_CONTROL, 0);
  5718. }
  5719. static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
  5720. struct drm_atomic_state *old_state)
  5721. {
  5722. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  5723. struct drm_device *dev = crtc->dev;
  5724. struct drm_i915_private *dev_priv = to_i915(dev);
  5725. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5726. int pipe = intel_crtc->pipe;
  5727. /*
  5728. * On gen2 planes are double buffered but the pipe isn't, so we must
  5729. * wait for planes to fully turn off before disabling the pipe.
  5730. */
  5731. if (IS_GEN2(dev_priv))
  5732. intel_wait_for_vblank(dev_priv, pipe);
  5733. intel_encoders_disable(crtc, old_crtc_state, old_state);
  5734. drm_crtc_vblank_off(crtc);
  5735. assert_vblank_disabled(crtc);
  5736. intel_disable_pipe(intel_crtc);
  5737. i9xx_pfit_disable(intel_crtc);
  5738. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  5739. if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
  5740. if (IS_CHERRYVIEW(dev_priv))
  5741. chv_disable_pll(dev_priv, pipe);
  5742. else if (IS_VALLEYVIEW(dev_priv))
  5743. vlv_disable_pll(dev_priv, pipe);
  5744. else
  5745. i9xx_disable_pll(intel_crtc);
  5746. }
  5747. intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
  5748. if (!IS_GEN2(dev_priv))
  5749. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  5750. }
  5751. static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
  5752. {
  5753. struct intel_encoder *encoder;
  5754. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5755. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  5756. enum intel_display_power_domain domain;
  5757. unsigned long domains;
  5758. struct drm_atomic_state *state;
  5759. struct intel_crtc_state *crtc_state;
  5760. int ret;
  5761. if (!intel_crtc->active)
  5762. return;
  5763. if (to_intel_plane_state(crtc->primary->state)->base.visible) {
  5764. WARN_ON(intel_crtc->flip_work);
  5765. intel_pre_disable_primary_noatomic(crtc);
  5766. intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
  5767. to_intel_plane_state(crtc->primary->state)->base.visible = false;
  5768. }
  5769. state = drm_atomic_state_alloc(crtc->dev);
  5770. state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
  5771. /* Everything's already locked, -EDEADLK can't happen. */
  5772. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  5773. ret = drm_atomic_add_affected_connectors(state, crtc);
  5774. WARN_ON(IS_ERR(crtc_state) || ret);
  5775. dev_priv->display.crtc_disable(crtc_state, state);
  5776. drm_atomic_state_put(state);
  5777. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
  5778. crtc->base.id, crtc->name);
  5779. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
  5780. crtc->state->active = false;
  5781. intel_crtc->active = false;
  5782. crtc->enabled = false;
  5783. crtc->state->connector_mask = 0;
  5784. crtc->state->encoder_mask = 0;
  5785. for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
  5786. encoder->base.crtc = NULL;
  5787. intel_fbc_disable(intel_crtc);
  5788. intel_update_watermarks(intel_crtc);
  5789. intel_disable_shared_dpll(intel_crtc);
  5790. domains = intel_crtc->enabled_power_domains;
  5791. for_each_power_domain(domain, domains)
  5792. intel_display_power_put(dev_priv, domain);
  5793. intel_crtc->enabled_power_domains = 0;
  5794. dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
  5795. dev_priv->min_pixclk[intel_crtc->pipe] = 0;
  5796. }
  5797. /*
  5798. * turn all crtc's off, but do not adjust state
  5799. * This has to be paired with a call to intel_modeset_setup_hw_state.
  5800. */
  5801. int intel_display_suspend(struct drm_device *dev)
  5802. {
  5803. struct drm_i915_private *dev_priv = to_i915(dev);
  5804. struct drm_atomic_state *state;
  5805. int ret;
  5806. state = drm_atomic_helper_suspend(dev);
  5807. ret = PTR_ERR_OR_ZERO(state);
  5808. if (ret)
  5809. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  5810. else
  5811. dev_priv->modeset_restore_state = state;
  5812. return ret;
  5813. }
  5814. void intel_encoder_destroy(struct drm_encoder *encoder)
  5815. {
  5816. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  5817. drm_encoder_cleanup(encoder);
  5818. kfree(intel_encoder);
  5819. }
  5820. /* Cross check the actual hw state with our own modeset state tracking (and it's
  5821. * internal consistency). */
  5822. static void intel_connector_verify_state(struct intel_connector *connector)
  5823. {
  5824. struct drm_crtc *crtc = connector->base.state->crtc;
  5825. DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  5826. connector->base.base.id,
  5827. connector->base.name);
  5828. if (connector->get_hw_state(connector)) {
  5829. struct intel_encoder *encoder = connector->encoder;
  5830. struct drm_connector_state *conn_state = connector->base.state;
  5831. I915_STATE_WARN(!crtc,
  5832. "connector enabled without attached crtc\n");
  5833. if (!crtc)
  5834. return;
  5835. I915_STATE_WARN(!crtc->state->active,
  5836. "connector is active, but attached crtc isn't\n");
  5837. if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
  5838. return;
  5839. I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
  5840. "atomic encoder doesn't match attached encoder\n");
  5841. I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
  5842. "attached encoder crtc differs from connector crtc\n");
  5843. } else {
  5844. I915_STATE_WARN(crtc && crtc->state->active,
  5845. "attached crtc is active, but connector isn't\n");
  5846. I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
  5847. "best encoder set without crtc!\n");
  5848. }
  5849. }
  5850. int intel_connector_init(struct intel_connector *connector)
  5851. {
  5852. drm_atomic_helper_connector_reset(&connector->base);
  5853. if (!connector->base.state)
  5854. return -ENOMEM;
  5855. return 0;
  5856. }
  5857. struct intel_connector *intel_connector_alloc(void)
  5858. {
  5859. struct intel_connector *connector;
  5860. connector = kzalloc(sizeof *connector, GFP_KERNEL);
  5861. if (!connector)
  5862. return NULL;
  5863. if (intel_connector_init(connector) < 0) {
  5864. kfree(connector);
  5865. return NULL;
  5866. }
  5867. return connector;
  5868. }
  5869. /* Simple connector->get_hw_state implementation for encoders that support only
  5870. * one connector and no cloning and hence the encoder state determines the state
  5871. * of the connector. */
  5872. bool intel_connector_get_hw_state(struct intel_connector *connector)
  5873. {
  5874. enum pipe pipe = 0;
  5875. struct intel_encoder *encoder = connector->encoder;
  5876. return encoder->get_hw_state(encoder, &pipe);
  5877. }
  5878. static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  5879. {
  5880. if (crtc_state->base.enable && crtc_state->has_pch_encoder)
  5881. return crtc_state->fdi_lanes;
  5882. return 0;
  5883. }
  5884. static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  5885. struct intel_crtc_state *pipe_config)
  5886. {
  5887. struct drm_i915_private *dev_priv = to_i915(dev);
  5888. struct drm_atomic_state *state = pipe_config->base.state;
  5889. struct intel_crtc *other_crtc;
  5890. struct intel_crtc_state *other_crtc_state;
  5891. DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
  5892. pipe_name(pipe), pipe_config->fdi_lanes);
  5893. if (pipe_config->fdi_lanes > 4) {
  5894. DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
  5895. pipe_name(pipe), pipe_config->fdi_lanes);
  5896. return -EINVAL;
  5897. }
  5898. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  5899. if (pipe_config->fdi_lanes > 2) {
  5900. DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
  5901. pipe_config->fdi_lanes);
  5902. return -EINVAL;
  5903. } else {
  5904. return 0;
  5905. }
  5906. }
  5907. if (INTEL_INFO(dev_priv)->num_pipes == 2)
  5908. return 0;
  5909. /* Ivybridge 3 pipe is really complicated */
  5910. switch (pipe) {
  5911. case PIPE_A:
  5912. return 0;
  5913. case PIPE_B:
  5914. if (pipe_config->fdi_lanes <= 2)
  5915. return 0;
  5916. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
  5917. other_crtc_state =
  5918. intel_atomic_get_crtc_state(state, other_crtc);
  5919. if (IS_ERR(other_crtc_state))
  5920. return PTR_ERR(other_crtc_state);
  5921. if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
  5922. DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
  5923. pipe_name(pipe), pipe_config->fdi_lanes);
  5924. return -EINVAL;
  5925. }
  5926. return 0;
  5927. case PIPE_C:
  5928. if (pipe_config->fdi_lanes > 2) {
  5929. DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
  5930. pipe_name(pipe), pipe_config->fdi_lanes);
  5931. return -EINVAL;
  5932. }
  5933. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
  5934. other_crtc_state =
  5935. intel_atomic_get_crtc_state(state, other_crtc);
  5936. if (IS_ERR(other_crtc_state))
  5937. return PTR_ERR(other_crtc_state);
  5938. if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
  5939. DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
  5940. return -EINVAL;
  5941. }
  5942. return 0;
  5943. default:
  5944. BUG();
  5945. }
  5946. }
  5947. #define RETRY 1
  5948. static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
  5949. struct intel_crtc_state *pipe_config)
  5950. {
  5951. struct drm_device *dev = intel_crtc->base.dev;
  5952. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  5953. int lane, link_bw, fdi_dotclock, ret;
  5954. bool needs_recompute = false;
  5955. retry:
  5956. /* FDI is a binary signal running at ~2.7GHz, encoding
  5957. * each output octet as 10 bits. The actual frequency
  5958. * is stored as a divider into a 100MHz clock, and the
  5959. * mode pixel clock is stored in units of 1KHz.
  5960. * Hence the bw of each lane in terms of the mode signal
  5961. * is:
  5962. */
  5963. link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
  5964. fdi_dotclock = adjusted_mode->crtc_clock;
  5965. lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
  5966. pipe_config->pipe_bpp);
  5967. pipe_config->fdi_lanes = lane;
  5968. intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
  5969. link_bw, &pipe_config->fdi_m_n);
  5970. ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
  5971. if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
  5972. pipe_config->pipe_bpp -= 2*3;
  5973. DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
  5974. pipe_config->pipe_bpp);
  5975. needs_recompute = true;
  5976. pipe_config->bw_constrained = true;
  5977. goto retry;
  5978. }
  5979. if (needs_recompute)
  5980. return RETRY;
  5981. return ret;
  5982. }
  5983. static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
  5984. struct intel_crtc_state *pipe_config)
  5985. {
  5986. if (pipe_config->pipe_bpp > 24)
  5987. return false;
  5988. /* HSW can handle pixel rate up to cdclk? */
  5989. if (IS_HASWELL(dev_priv))
  5990. return true;
  5991. /*
  5992. * We compare against max which means we must take
  5993. * the increased cdclk requirement into account when
  5994. * calculating the new cdclk.
  5995. *
  5996. * Should measure whether using a lower cdclk w/o IPS
  5997. */
  5998. return ilk_pipe_pixel_rate(pipe_config) <=
  5999. dev_priv->max_cdclk_freq * 95 / 100;
  6000. }
  6001. static void hsw_compute_ips_config(struct intel_crtc *crtc,
  6002. struct intel_crtc_state *pipe_config)
  6003. {
  6004. struct drm_device *dev = crtc->base.dev;
  6005. struct drm_i915_private *dev_priv = to_i915(dev);
  6006. pipe_config->ips_enabled = i915.enable_ips &&
  6007. hsw_crtc_supports_ips(crtc) &&
  6008. pipe_config_supports_ips(dev_priv, pipe_config);
  6009. }
  6010. static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
  6011. {
  6012. const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6013. /* GDG double wide on either pipe, otherwise pipe A only */
  6014. return INTEL_INFO(dev_priv)->gen < 4 &&
  6015. (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
  6016. }
  6017. static int intel_crtc_compute_config(struct intel_crtc *crtc,
  6018. struct intel_crtc_state *pipe_config)
  6019. {
  6020. struct drm_device *dev = crtc->base.dev;
  6021. struct drm_i915_private *dev_priv = to_i915(dev);
  6022. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  6023. int clock_limit = dev_priv->max_dotclk_freq;
  6024. if (INTEL_GEN(dev_priv) < 4) {
  6025. clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
  6026. /*
  6027. * Enable double wide mode when the dot clock
  6028. * is > 90% of the (display) core speed.
  6029. */
  6030. if (intel_crtc_supports_double_wide(crtc) &&
  6031. adjusted_mode->crtc_clock > clock_limit) {
  6032. clock_limit = dev_priv->max_dotclk_freq;
  6033. pipe_config->double_wide = true;
  6034. }
  6035. }
  6036. if (adjusted_mode->crtc_clock > clock_limit) {
  6037. DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
  6038. adjusted_mode->crtc_clock, clock_limit,
  6039. yesno(pipe_config->double_wide));
  6040. return -EINVAL;
  6041. }
  6042. /*
  6043. * Pipe horizontal size must be even in:
  6044. * - DVO ganged mode
  6045. * - LVDS dual channel mode
  6046. * - Double wide pipe
  6047. */
  6048. if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
  6049. intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
  6050. pipe_config->pipe_src_w &= ~1;
  6051. /* Cantiga+ cannot handle modes with a hsync front porch of 0.
  6052. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
  6053. */
  6054. if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
  6055. adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
  6056. return -EINVAL;
  6057. if (HAS_IPS(dev_priv))
  6058. hsw_compute_ips_config(crtc, pipe_config);
  6059. if (pipe_config->has_pch_encoder)
  6060. return ironlake_fdi_compute_config(crtc, pipe_config);
  6061. return 0;
  6062. }
  6063. static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6064. {
  6065. u32 cdctl;
  6066. skl_dpll0_update(dev_priv);
  6067. if (dev_priv->cdclk_pll.vco == 0)
  6068. return dev_priv->cdclk_pll.ref;
  6069. cdctl = I915_READ(CDCLK_CTL);
  6070. if (dev_priv->cdclk_pll.vco == 8640000) {
  6071. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  6072. case CDCLK_FREQ_450_432:
  6073. return 432000;
  6074. case CDCLK_FREQ_337_308:
  6075. return 308571;
  6076. case CDCLK_FREQ_540:
  6077. return 540000;
  6078. case CDCLK_FREQ_675_617:
  6079. return 617143;
  6080. default:
  6081. MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
  6082. }
  6083. } else {
  6084. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  6085. case CDCLK_FREQ_450_432:
  6086. return 450000;
  6087. case CDCLK_FREQ_337_308:
  6088. return 337500;
  6089. case CDCLK_FREQ_540:
  6090. return 540000;
  6091. case CDCLK_FREQ_675_617:
  6092. return 675000;
  6093. default:
  6094. MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
  6095. }
  6096. }
  6097. return dev_priv->cdclk_pll.ref;
  6098. }
  6099. static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
  6100. {
  6101. u32 val;
  6102. dev_priv->cdclk_pll.ref = 19200;
  6103. dev_priv->cdclk_pll.vco = 0;
  6104. val = I915_READ(BXT_DE_PLL_ENABLE);
  6105. if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
  6106. return;
  6107. if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
  6108. return;
  6109. val = I915_READ(BXT_DE_PLL_CTL);
  6110. dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
  6111. dev_priv->cdclk_pll.ref;
  6112. }
  6113. static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6114. {
  6115. u32 divider;
  6116. int div, vco;
  6117. bxt_de_pll_update(dev_priv);
  6118. vco = dev_priv->cdclk_pll.vco;
  6119. if (vco == 0)
  6120. return dev_priv->cdclk_pll.ref;
  6121. divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
  6122. switch (divider) {
  6123. case BXT_CDCLK_CD2X_DIV_SEL_1:
  6124. div = 2;
  6125. break;
  6126. case BXT_CDCLK_CD2X_DIV_SEL_1_5:
  6127. div = 3;
  6128. break;
  6129. case BXT_CDCLK_CD2X_DIV_SEL_2:
  6130. div = 4;
  6131. break;
  6132. case BXT_CDCLK_CD2X_DIV_SEL_4:
  6133. div = 8;
  6134. break;
  6135. default:
  6136. MISSING_CASE(divider);
  6137. return dev_priv->cdclk_pll.ref;
  6138. }
  6139. return DIV_ROUND_CLOSEST(vco, div);
  6140. }
  6141. static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6142. {
  6143. uint32_t lcpll = I915_READ(LCPLL_CTL);
  6144. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  6145. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  6146. return 800000;
  6147. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  6148. return 450000;
  6149. else if (freq == LCPLL_CLK_FREQ_450)
  6150. return 450000;
  6151. else if (freq == LCPLL_CLK_FREQ_54O_BDW)
  6152. return 540000;
  6153. else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
  6154. return 337500;
  6155. else
  6156. return 675000;
  6157. }
  6158. static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6159. {
  6160. uint32_t lcpll = I915_READ(LCPLL_CTL);
  6161. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  6162. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  6163. return 800000;
  6164. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  6165. return 450000;
  6166. else if (freq == LCPLL_CLK_FREQ_450)
  6167. return 450000;
  6168. else if (IS_HSW_ULT(dev_priv))
  6169. return 337500;
  6170. else
  6171. return 540000;
  6172. }
  6173. static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6174. {
  6175. return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
  6176. CCK_DISPLAY_CLOCK_CONTROL);
  6177. }
  6178. static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6179. {
  6180. return 450000;
  6181. }
  6182. static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6183. {
  6184. return 400000;
  6185. }
  6186. static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6187. {
  6188. return 333333;
  6189. }
  6190. static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6191. {
  6192. return 200000;
  6193. }
  6194. static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6195. {
  6196. struct pci_dev *pdev = dev_priv->drm.pdev;
  6197. u16 gcfgc = 0;
  6198. pci_read_config_word(pdev, GCFGC, &gcfgc);
  6199. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  6200. case GC_DISPLAY_CLOCK_267_MHZ_PNV:
  6201. return 266667;
  6202. case GC_DISPLAY_CLOCK_333_MHZ_PNV:
  6203. return 333333;
  6204. case GC_DISPLAY_CLOCK_444_MHZ_PNV:
  6205. return 444444;
  6206. case GC_DISPLAY_CLOCK_200_MHZ_PNV:
  6207. return 200000;
  6208. default:
  6209. DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
  6210. case GC_DISPLAY_CLOCK_133_MHZ_PNV:
  6211. return 133333;
  6212. case GC_DISPLAY_CLOCK_167_MHZ_PNV:
  6213. return 166667;
  6214. }
  6215. }
  6216. static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6217. {
  6218. struct pci_dev *pdev = dev_priv->drm.pdev;
  6219. u16 gcfgc = 0;
  6220. pci_read_config_word(pdev, GCFGC, &gcfgc);
  6221. if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
  6222. return 133333;
  6223. else {
  6224. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  6225. case GC_DISPLAY_CLOCK_333_MHZ:
  6226. return 333333;
  6227. default:
  6228. case GC_DISPLAY_CLOCK_190_200_MHZ:
  6229. return 190000;
  6230. }
  6231. }
  6232. }
  6233. static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6234. {
  6235. return 266667;
  6236. }
  6237. static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6238. {
  6239. struct pci_dev *pdev = dev_priv->drm.pdev;
  6240. u16 hpllcc = 0;
  6241. /*
  6242. * 852GM/852GMV only supports 133 MHz and the HPLLCC
  6243. * encoding is different :(
  6244. * FIXME is this the right way to detect 852GM/852GMV?
  6245. */
  6246. if (pdev->revision == 0x1)
  6247. return 133333;
  6248. pci_bus_read_config_word(pdev->bus,
  6249. PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
  6250. /* Assume that the hardware is in the high speed state. This
  6251. * should be the default.
  6252. */
  6253. switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
  6254. case GC_CLOCK_133_200:
  6255. case GC_CLOCK_133_200_2:
  6256. case GC_CLOCK_100_200:
  6257. return 200000;
  6258. case GC_CLOCK_166_250:
  6259. return 250000;
  6260. case GC_CLOCK_100_133:
  6261. return 133333;
  6262. case GC_CLOCK_133_266:
  6263. case GC_CLOCK_133_266_2:
  6264. case GC_CLOCK_166_266:
  6265. return 266667;
  6266. }
  6267. /* Shouldn't happen */
  6268. return 0;
  6269. }
  6270. static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6271. {
  6272. return 133333;
  6273. }
  6274. static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
  6275. {
  6276. static const unsigned int blb_vco[8] = {
  6277. [0] = 3200000,
  6278. [1] = 4000000,
  6279. [2] = 5333333,
  6280. [3] = 4800000,
  6281. [4] = 6400000,
  6282. };
  6283. static const unsigned int pnv_vco[8] = {
  6284. [0] = 3200000,
  6285. [1] = 4000000,
  6286. [2] = 5333333,
  6287. [3] = 4800000,
  6288. [4] = 2666667,
  6289. };
  6290. static const unsigned int cl_vco[8] = {
  6291. [0] = 3200000,
  6292. [1] = 4000000,
  6293. [2] = 5333333,
  6294. [3] = 6400000,
  6295. [4] = 3333333,
  6296. [5] = 3566667,
  6297. [6] = 4266667,
  6298. };
  6299. static const unsigned int elk_vco[8] = {
  6300. [0] = 3200000,
  6301. [1] = 4000000,
  6302. [2] = 5333333,
  6303. [3] = 4800000,
  6304. };
  6305. static const unsigned int ctg_vco[8] = {
  6306. [0] = 3200000,
  6307. [1] = 4000000,
  6308. [2] = 5333333,
  6309. [3] = 6400000,
  6310. [4] = 2666667,
  6311. [5] = 4266667,
  6312. };
  6313. const unsigned int *vco_table;
  6314. unsigned int vco;
  6315. uint8_t tmp = 0;
  6316. /* FIXME other chipsets? */
  6317. if (IS_GM45(dev_priv))
  6318. vco_table = ctg_vco;
  6319. else if (IS_G4X(dev_priv))
  6320. vco_table = elk_vco;
  6321. else if (IS_CRESTLINE(dev_priv))
  6322. vco_table = cl_vco;
  6323. else if (IS_PINEVIEW(dev_priv))
  6324. vco_table = pnv_vco;
  6325. else if (IS_G33(dev_priv))
  6326. vco_table = blb_vco;
  6327. else
  6328. return 0;
  6329. tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
  6330. vco = vco_table[tmp & 0x7];
  6331. if (vco == 0)
  6332. DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
  6333. else
  6334. DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
  6335. return vco;
  6336. }
  6337. static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6338. {
  6339. struct pci_dev *pdev = dev_priv->drm.pdev;
  6340. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6341. uint16_t tmp = 0;
  6342. pci_read_config_word(pdev, GCFGC, &tmp);
  6343. cdclk_sel = (tmp >> 12) & 0x1;
  6344. switch (vco) {
  6345. case 2666667:
  6346. case 4000000:
  6347. case 5333333:
  6348. return cdclk_sel ? 333333 : 222222;
  6349. case 3200000:
  6350. return cdclk_sel ? 320000 : 228571;
  6351. default:
  6352. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
  6353. return 222222;
  6354. }
  6355. }
  6356. static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6357. {
  6358. struct pci_dev *pdev = dev_priv->drm.pdev;
  6359. static const uint8_t div_3200[] = { 16, 10, 8 };
  6360. static const uint8_t div_4000[] = { 20, 12, 10 };
  6361. static const uint8_t div_5333[] = { 24, 16, 14 };
  6362. const uint8_t *div_table;
  6363. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6364. uint16_t tmp = 0;
  6365. pci_read_config_word(pdev, GCFGC, &tmp);
  6366. cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
  6367. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  6368. goto fail;
  6369. switch (vco) {
  6370. case 3200000:
  6371. div_table = div_3200;
  6372. break;
  6373. case 4000000:
  6374. div_table = div_4000;
  6375. break;
  6376. case 5333333:
  6377. div_table = div_5333;
  6378. break;
  6379. default:
  6380. goto fail;
  6381. }
  6382. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  6383. fail:
  6384. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
  6385. return 200000;
  6386. }
  6387. static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6388. {
  6389. struct pci_dev *pdev = dev_priv->drm.pdev;
  6390. static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
  6391. static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
  6392. static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
  6393. static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
  6394. const uint8_t *div_table;
  6395. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6396. uint16_t tmp = 0;
  6397. pci_read_config_word(pdev, GCFGC, &tmp);
  6398. cdclk_sel = (tmp >> 4) & 0x7;
  6399. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  6400. goto fail;
  6401. switch (vco) {
  6402. case 3200000:
  6403. div_table = div_3200;
  6404. break;
  6405. case 4000000:
  6406. div_table = div_4000;
  6407. break;
  6408. case 4800000:
  6409. div_table = div_4800;
  6410. break;
  6411. case 5333333:
  6412. div_table = div_5333;
  6413. break;
  6414. default:
  6415. goto fail;
  6416. }
  6417. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  6418. fail:
  6419. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
  6420. return 190476;
  6421. }
  6422. static void
  6423. intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
  6424. {
  6425. while (*num > DATA_LINK_M_N_MASK ||
  6426. *den > DATA_LINK_M_N_MASK) {
  6427. *num >>= 1;
  6428. *den >>= 1;
  6429. }
  6430. }
  6431. static void compute_m_n(unsigned int m, unsigned int n,
  6432. uint32_t *ret_m, uint32_t *ret_n)
  6433. {
  6434. *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
  6435. *ret_m = div_u64((uint64_t) m * *ret_n, n);
  6436. intel_reduce_m_n_ratio(ret_m, ret_n);
  6437. }
  6438. void
  6439. intel_link_compute_m_n(int bits_per_pixel, int nlanes,
  6440. int pixel_clock, int link_clock,
  6441. struct intel_link_m_n *m_n)
  6442. {
  6443. m_n->tu = 64;
  6444. compute_m_n(bits_per_pixel * pixel_clock,
  6445. link_clock * nlanes * 8,
  6446. &m_n->gmch_m, &m_n->gmch_n);
  6447. compute_m_n(pixel_clock, link_clock,
  6448. &m_n->link_m, &m_n->link_n);
  6449. }
  6450. static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  6451. {
  6452. if (i915.panel_use_ssc >= 0)
  6453. return i915.panel_use_ssc != 0;
  6454. return dev_priv->vbt.lvds_use_ssc
  6455. && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  6456. }
  6457. static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
  6458. {
  6459. return (1 << dpll->n) << 16 | dpll->m2;
  6460. }
  6461. static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
  6462. {
  6463. return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
  6464. }
  6465. static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
  6466. struct intel_crtc_state *crtc_state,
  6467. struct dpll *reduced_clock)
  6468. {
  6469. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6470. u32 fp, fp2 = 0;
  6471. if (IS_PINEVIEW(dev_priv)) {
  6472. fp = pnv_dpll_compute_fp(&crtc_state->dpll);
  6473. if (reduced_clock)
  6474. fp2 = pnv_dpll_compute_fp(reduced_clock);
  6475. } else {
  6476. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  6477. if (reduced_clock)
  6478. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  6479. }
  6480. crtc_state->dpll_hw_state.fp0 = fp;
  6481. crtc->lowfreq_avail = false;
  6482. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6483. reduced_clock) {
  6484. crtc_state->dpll_hw_state.fp1 = fp2;
  6485. crtc->lowfreq_avail = true;
  6486. } else {
  6487. crtc_state->dpll_hw_state.fp1 = fp;
  6488. }
  6489. }
  6490. static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
  6491. pipe)
  6492. {
  6493. u32 reg_val;
  6494. /*
  6495. * PLLB opamp always calibrates to max value of 0x3f, force enable it
  6496. * and set it to a reasonable value instead.
  6497. */
  6498. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  6499. reg_val &= 0xffffff00;
  6500. reg_val |= 0x00000030;
  6501. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  6502. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  6503. reg_val &= 0x8cffffff;
  6504. reg_val = 0x8c000000;
  6505. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  6506. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  6507. reg_val &= 0xffffff00;
  6508. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  6509. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  6510. reg_val &= 0x00ffffff;
  6511. reg_val |= 0xb0000000;
  6512. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  6513. }
  6514. static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
  6515. struct intel_link_m_n *m_n)
  6516. {
  6517. struct drm_device *dev = crtc->base.dev;
  6518. struct drm_i915_private *dev_priv = to_i915(dev);
  6519. int pipe = crtc->pipe;
  6520. I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6521. I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
  6522. I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
  6523. I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
  6524. }
  6525. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  6526. struct intel_link_m_n *m_n,
  6527. struct intel_link_m_n *m2_n2)
  6528. {
  6529. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6530. int pipe = crtc->pipe;
  6531. enum transcoder transcoder = crtc->config->cpu_transcoder;
  6532. if (INTEL_GEN(dev_priv) >= 5) {
  6533. I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6534. I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
  6535. I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
  6536. I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
  6537. /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
  6538. * for gen < 8) and if DRRS is supported (to make sure the
  6539. * registers are not unnecessarily accessed).
  6540. */
  6541. if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
  6542. INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
  6543. I915_WRITE(PIPE_DATA_M2(transcoder),
  6544. TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
  6545. I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
  6546. I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
  6547. I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
  6548. }
  6549. } else {
  6550. I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6551. I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
  6552. I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
  6553. I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
  6554. }
  6555. }
  6556. void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
  6557. {
  6558. struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
  6559. if (m_n == M1_N1) {
  6560. dp_m_n = &crtc->config->dp_m_n;
  6561. dp_m2_n2 = &crtc->config->dp_m2_n2;
  6562. } else if (m_n == M2_N2) {
  6563. /*
  6564. * M2_N2 registers are not supported. Hence m2_n2 divider value
  6565. * needs to be programmed into M1_N1.
  6566. */
  6567. dp_m_n = &crtc->config->dp_m2_n2;
  6568. } else {
  6569. DRM_ERROR("Unsupported divider value\n");
  6570. return;
  6571. }
  6572. if (crtc->config->has_pch_encoder)
  6573. intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
  6574. else
  6575. intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
  6576. }
  6577. static void vlv_compute_dpll(struct intel_crtc *crtc,
  6578. struct intel_crtc_state *pipe_config)
  6579. {
  6580. pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
  6581. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6582. if (crtc->pipe != PIPE_A)
  6583. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6584. /* DPLL not used with DSI, but still need the rest set up */
  6585. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  6586. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
  6587. DPLL_EXT_BUFFER_ENABLE_VLV;
  6588. pipe_config->dpll_hw_state.dpll_md =
  6589. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6590. }
  6591. static void chv_compute_dpll(struct intel_crtc *crtc,
  6592. struct intel_crtc_state *pipe_config)
  6593. {
  6594. pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
  6595. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6596. if (crtc->pipe != PIPE_A)
  6597. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6598. /* DPLL not used with DSI, but still need the rest set up */
  6599. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  6600. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
  6601. pipe_config->dpll_hw_state.dpll_md =
  6602. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6603. }
  6604. static void vlv_prepare_pll(struct intel_crtc *crtc,
  6605. const struct intel_crtc_state *pipe_config)
  6606. {
  6607. struct drm_device *dev = crtc->base.dev;
  6608. struct drm_i915_private *dev_priv = to_i915(dev);
  6609. enum pipe pipe = crtc->pipe;
  6610. u32 mdiv;
  6611. u32 bestn, bestm1, bestm2, bestp1, bestp2;
  6612. u32 coreclk, reg_val;
  6613. /* Enable Refclk */
  6614. I915_WRITE(DPLL(pipe),
  6615. pipe_config->dpll_hw_state.dpll &
  6616. ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
  6617. /* No need to actually set up the DPLL with DSI */
  6618. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6619. return;
  6620. mutex_lock(&dev_priv->sb_lock);
  6621. bestn = pipe_config->dpll.n;
  6622. bestm1 = pipe_config->dpll.m1;
  6623. bestm2 = pipe_config->dpll.m2;
  6624. bestp1 = pipe_config->dpll.p1;
  6625. bestp2 = pipe_config->dpll.p2;
  6626. /* See eDP HDMI DPIO driver vbios notes doc */
  6627. /* PLL B needs special handling */
  6628. if (pipe == PIPE_B)
  6629. vlv_pllb_recal_opamp(dev_priv, pipe);
  6630. /* Set up Tx target for periodic Rcomp update */
  6631. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
  6632. /* Disable target IRef on PLL */
  6633. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
  6634. reg_val &= 0x00ffffff;
  6635. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
  6636. /* Disable fast lock */
  6637. vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
  6638. /* Set idtafcrecal before PLL is enabled */
  6639. mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
  6640. mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
  6641. mdiv |= ((bestn << DPIO_N_SHIFT));
  6642. mdiv |= (1 << DPIO_K_SHIFT);
  6643. /*
  6644. * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
  6645. * but we don't support that).
  6646. * Note: don't use the DAC post divider as it seems unstable.
  6647. */
  6648. mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
  6649. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6650. mdiv |= DPIO_ENABLE_CALIBRATION;
  6651. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6652. /* Set HBR and RBR LPF coefficients */
  6653. if (pipe_config->port_clock == 162000 ||
  6654. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
  6655. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
  6656. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6657. 0x009f0003);
  6658. else
  6659. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6660. 0x00d0000f);
  6661. if (intel_crtc_has_dp_encoder(pipe_config)) {
  6662. /* Use SSC source */
  6663. if (pipe == PIPE_A)
  6664. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6665. 0x0df40000);
  6666. else
  6667. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6668. 0x0df70000);
  6669. } else { /* HDMI or VGA */
  6670. /* Use bend source */
  6671. if (pipe == PIPE_A)
  6672. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6673. 0x0df70000);
  6674. else
  6675. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6676. 0x0df40000);
  6677. }
  6678. coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
  6679. coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
  6680. if (intel_crtc_has_dp_encoder(crtc->config))
  6681. coreclk |= 0x01000000;
  6682. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  6683. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
  6684. mutex_unlock(&dev_priv->sb_lock);
  6685. }
  6686. static void chv_prepare_pll(struct intel_crtc *crtc,
  6687. const struct intel_crtc_state *pipe_config)
  6688. {
  6689. struct drm_device *dev = crtc->base.dev;
  6690. struct drm_i915_private *dev_priv = to_i915(dev);
  6691. enum pipe pipe = crtc->pipe;
  6692. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  6693. u32 loopfilter, tribuf_calcntr;
  6694. u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
  6695. u32 dpio_val;
  6696. int vco;
  6697. /* Enable Refclk and SSC */
  6698. I915_WRITE(DPLL(pipe),
  6699. pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
  6700. /* No need to actually set up the DPLL with DSI */
  6701. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6702. return;
  6703. bestn = pipe_config->dpll.n;
  6704. bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
  6705. bestm1 = pipe_config->dpll.m1;
  6706. bestm2 = pipe_config->dpll.m2 >> 22;
  6707. bestp1 = pipe_config->dpll.p1;
  6708. bestp2 = pipe_config->dpll.p2;
  6709. vco = pipe_config->dpll.vco;
  6710. dpio_val = 0;
  6711. loopfilter = 0;
  6712. mutex_lock(&dev_priv->sb_lock);
  6713. /* p1 and p2 divider */
  6714. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
  6715. 5 << DPIO_CHV_S1_DIV_SHIFT |
  6716. bestp1 << DPIO_CHV_P1_DIV_SHIFT |
  6717. bestp2 << DPIO_CHV_P2_DIV_SHIFT |
  6718. 1 << DPIO_CHV_K_DIV_SHIFT);
  6719. /* Feedback post-divider - m2 */
  6720. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
  6721. /* Feedback refclk divider - n and m1 */
  6722. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
  6723. DPIO_CHV_M1_DIV_BY_2 |
  6724. 1 << DPIO_CHV_N_DIV_SHIFT);
  6725. /* M2 fraction division */
  6726. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
  6727. /* M2 fraction division enable */
  6728. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  6729. dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
  6730. dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
  6731. if (bestm2_frac)
  6732. dpio_val |= DPIO_CHV_FRAC_DIV_EN;
  6733. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
  6734. /* Program digital lock detect threshold */
  6735. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
  6736. dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
  6737. DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
  6738. dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
  6739. if (!bestm2_frac)
  6740. dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
  6741. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
  6742. /* Loop filter */
  6743. if (vco == 5400000) {
  6744. loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
  6745. loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
  6746. loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6747. tribuf_calcntr = 0x9;
  6748. } else if (vco <= 6200000) {
  6749. loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
  6750. loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
  6751. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6752. tribuf_calcntr = 0x9;
  6753. } else if (vco <= 6480000) {
  6754. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6755. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6756. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6757. tribuf_calcntr = 0x8;
  6758. } else {
  6759. /* Not supported. Apply the same limits as in the max case */
  6760. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6761. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6762. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6763. tribuf_calcntr = 0;
  6764. }
  6765. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
  6766. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
  6767. dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
  6768. dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
  6769. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
  6770. /* AFC Recal */
  6771. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
  6772. vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
  6773. DPIO_AFC_RECAL);
  6774. mutex_unlock(&dev_priv->sb_lock);
  6775. }
  6776. /**
  6777. * vlv_force_pll_on - forcibly enable just the PLL
  6778. * @dev_priv: i915 private structure
  6779. * @pipe: pipe PLL to enable
  6780. * @dpll: PLL configuration
  6781. *
  6782. * Enable the PLL for @pipe using the supplied @dpll config. To be used
  6783. * in cases where we need the PLL enabled even when @pipe is not going to
  6784. * be enabled.
  6785. */
  6786. int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
  6787. const struct dpll *dpll)
  6788. {
  6789. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  6790. struct intel_crtc_state *pipe_config;
  6791. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  6792. if (!pipe_config)
  6793. return -ENOMEM;
  6794. pipe_config->base.crtc = &crtc->base;
  6795. pipe_config->pixel_multiplier = 1;
  6796. pipe_config->dpll = *dpll;
  6797. if (IS_CHERRYVIEW(dev_priv)) {
  6798. chv_compute_dpll(crtc, pipe_config);
  6799. chv_prepare_pll(crtc, pipe_config);
  6800. chv_enable_pll(crtc, pipe_config);
  6801. } else {
  6802. vlv_compute_dpll(crtc, pipe_config);
  6803. vlv_prepare_pll(crtc, pipe_config);
  6804. vlv_enable_pll(crtc, pipe_config);
  6805. }
  6806. kfree(pipe_config);
  6807. return 0;
  6808. }
  6809. /**
  6810. * vlv_force_pll_off - forcibly disable just the PLL
  6811. * @dev_priv: i915 private structure
  6812. * @pipe: pipe PLL to disable
  6813. *
  6814. * Disable the PLL for @pipe. To be used in cases where we need
  6815. * the PLL enabled even when @pipe is not going to be enabled.
  6816. */
  6817. void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
  6818. {
  6819. if (IS_CHERRYVIEW(dev_priv))
  6820. chv_disable_pll(dev_priv, pipe);
  6821. else
  6822. vlv_disable_pll(dev_priv, pipe);
  6823. }
  6824. static void i9xx_compute_dpll(struct intel_crtc *crtc,
  6825. struct intel_crtc_state *crtc_state,
  6826. struct dpll *reduced_clock)
  6827. {
  6828. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6829. u32 dpll;
  6830. struct dpll *clock = &crtc_state->dpll;
  6831. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6832. dpll = DPLL_VGA_MODE_DIS;
  6833. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  6834. dpll |= DPLLB_MODE_LVDS;
  6835. else
  6836. dpll |= DPLLB_MODE_DAC_SERIAL;
  6837. if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
  6838. dpll |= (crtc_state->pixel_multiplier - 1)
  6839. << SDVO_MULTIPLIER_SHIFT_HIRES;
  6840. }
  6841. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  6842. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  6843. dpll |= DPLL_SDVO_HIGH_SPEED;
  6844. if (intel_crtc_has_dp_encoder(crtc_state))
  6845. dpll |= DPLL_SDVO_HIGH_SPEED;
  6846. /* compute bitmask from p1 value */
  6847. if (IS_PINEVIEW(dev_priv))
  6848. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
  6849. else {
  6850. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6851. if (IS_G4X(dev_priv) && reduced_clock)
  6852. dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  6853. }
  6854. switch (clock->p2) {
  6855. case 5:
  6856. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  6857. break;
  6858. case 7:
  6859. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  6860. break;
  6861. case 10:
  6862. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  6863. break;
  6864. case 14:
  6865. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  6866. break;
  6867. }
  6868. if (INTEL_GEN(dev_priv) >= 4)
  6869. dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
  6870. if (crtc_state->sdvo_tv_clock)
  6871. dpll |= PLL_REF_INPUT_TVCLKINBC;
  6872. else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6873. intel_panel_use_ssc(dev_priv))
  6874. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6875. else
  6876. dpll |= PLL_REF_INPUT_DREFCLK;
  6877. dpll |= DPLL_VCO_ENABLE;
  6878. crtc_state->dpll_hw_state.dpll = dpll;
  6879. if (INTEL_GEN(dev_priv) >= 4) {
  6880. u32 dpll_md = (crtc_state->pixel_multiplier - 1)
  6881. << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6882. crtc_state->dpll_hw_state.dpll_md = dpll_md;
  6883. }
  6884. }
  6885. static void i8xx_compute_dpll(struct intel_crtc *crtc,
  6886. struct intel_crtc_state *crtc_state,
  6887. struct dpll *reduced_clock)
  6888. {
  6889. struct drm_device *dev = crtc->base.dev;
  6890. struct drm_i915_private *dev_priv = to_i915(dev);
  6891. u32 dpll;
  6892. struct dpll *clock = &crtc_state->dpll;
  6893. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6894. dpll = DPLL_VGA_MODE_DIS;
  6895. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6896. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6897. } else {
  6898. if (clock->p1 == 2)
  6899. dpll |= PLL_P1_DIVIDE_BY_TWO;
  6900. else
  6901. dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6902. if (clock->p2 == 4)
  6903. dpll |= PLL_P2_DIVIDE_BY_4;
  6904. }
  6905. if (!IS_I830(dev_priv) &&
  6906. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
  6907. dpll |= DPLL_DVO_2X_MODE;
  6908. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6909. intel_panel_use_ssc(dev_priv))
  6910. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6911. else
  6912. dpll |= PLL_REF_INPUT_DREFCLK;
  6913. dpll |= DPLL_VCO_ENABLE;
  6914. crtc_state->dpll_hw_state.dpll = dpll;
  6915. }
  6916. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
  6917. {
  6918. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  6919. enum pipe pipe = intel_crtc->pipe;
  6920. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  6921. const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
  6922. uint32_t crtc_vtotal, crtc_vblank_end;
  6923. int vsyncshift = 0;
  6924. /* We need to be careful not to changed the adjusted mode, for otherwise
  6925. * the hw state checker will get angry at the mismatch. */
  6926. crtc_vtotal = adjusted_mode->crtc_vtotal;
  6927. crtc_vblank_end = adjusted_mode->crtc_vblank_end;
  6928. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  6929. /* the chip adds 2 halflines automatically */
  6930. crtc_vtotal -= 1;
  6931. crtc_vblank_end -= 1;
  6932. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  6933. vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
  6934. else
  6935. vsyncshift = adjusted_mode->crtc_hsync_start -
  6936. adjusted_mode->crtc_htotal / 2;
  6937. if (vsyncshift < 0)
  6938. vsyncshift += adjusted_mode->crtc_htotal;
  6939. }
  6940. if (INTEL_GEN(dev_priv) > 3)
  6941. I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
  6942. I915_WRITE(HTOTAL(cpu_transcoder),
  6943. (adjusted_mode->crtc_hdisplay - 1) |
  6944. ((adjusted_mode->crtc_htotal - 1) << 16));
  6945. I915_WRITE(HBLANK(cpu_transcoder),
  6946. (adjusted_mode->crtc_hblank_start - 1) |
  6947. ((adjusted_mode->crtc_hblank_end - 1) << 16));
  6948. I915_WRITE(HSYNC(cpu_transcoder),
  6949. (adjusted_mode->crtc_hsync_start - 1) |
  6950. ((adjusted_mode->crtc_hsync_end - 1) << 16));
  6951. I915_WRITE(VTOTAL(cpu_transcoder),
  6952. (adjusted_mode->crtc_vdisplay - 1) |
  6953. ((crtc_vtotal - 1) << 16));
  6954. I915_WRITE(VBLANK(cpu_transcoder),
  6955. (adjusted_mode->crtc_vblank_start - 1) |
  6956. ((crtc_vblank_end - 1) << 16));
  6957. I915_WRITE(VSYNC(cpu_transcoder),
  6958. (adjusted_mode->crtc_vsync_start - 1) |
  6959. ((adjusted_mode->crtc_vsync_end - 1) << 16));
  6960. /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
  6961. * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
  6962. * documented on the DDI_FUNC_CTL register description, EDP Input Select
  6963. * bits. */
  6964. if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
  6965. (pipe == PIPE_B || pipe == PIPE_C))
  6966. I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
  6967. }
  6968. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
  6969. {
  6970. struct drm_device *dev = intel_crtc->base.dev;
  6971. struct drm_i915_private *dev_priv = to_i915(dev);
  6972. enum pipe pipe = intel_crtc->pipe;
  6973. /* pipesrc controls the size that is scaled from, which should
  6974. * always be the user's requested size.
  6975. */
  6976. I915_WRITE(PIPESRC(pipe),
  6977. ((intel_crtc->config->pipe_src_w - 1) << 16) |
  6978. (intel_crtc->config->pipe_src_h - 1));
  6979. }
  6980. static void intel_get_pipe_timings(struct intel_crtc *crtc,
  6981. struct intel_crtc_state *pipe_config)
  6982. {
  6983. struct drm_device *dev = crtc->base.dev;
  6984. struct drm_i915_private *dev_priv = to_i915(dev);
  6985. enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
  6986. uint32_t tmp;
  6987. tmp = I915_READ(HTOTAL(cpu_transcoder));
  6988. pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
  6989. pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
  6990. tmp = I915_READ(HBLANK(cpu_transcoder));
  6991. pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
  6992. pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
  6993. tmp = I915_READ(HSYNC(cpu_transcoder));
  6994. pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
  6995. pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
  6996. tmp = I915_READ(VTOTAL(cpu_transcoder));
  6997. pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
  6998. pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
  6999. tmp = I915_READ(VBLANK(cpu_transcoder));
  7000. pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
  7001. pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
  7002. tmp = I915_READ(VSYNC(cpu_transcoder));
  7003. pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
  7004. pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
  7005. if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
  7006. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
  7007. pipe_config->base.adjusted_mode.crtc_vtotal += 1;
  7008. pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
  7009. }
  7010. }
  7011. static void intel_get_pipe_src_size(struct intel_crtc *crtc,
  7012. struct intel_crtc_state *pipe_config)
  7013. {
  7014. struct drm_device *dev = crtc->base.dev;
  7015. struct drm_i915_private *dev_priv = to_i915(dev);
  7016. u32 tmp;
  7017. tmp = I915_READ(PIPESRC(crtc->pipe));
  7018. pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
  7019. pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
  7020. pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
  7021. pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
  7022. }
  7023. void intel_mode_from_pipe_config(struct drm_display_mode *mode,
  7024. struct intel_crtc_state *pipe_config)
  7025. {
  7026. mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
  7027. mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
  7028. mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
  7029. mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
  7030. mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
  7031. mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
  7032. mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
  7033. mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
  7034. mode->flags = pipe_config->base.adjusted_mode.flags;
  7035. mode->type = DRM_MODE_TYPE_DRIVER;
  7036. mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
  7037. mode->flags |= pipe_config->base.adjusted_mode.flags;
  7038. mode->hsync = drm_mode_hsync(mode);
  7039. mode->vrefresh = drm_mode_vrefresh(mode);
  7040. drm_mode_set_name(mode);
  7041. }
  7042. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
  7043. {
  7044. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  7045. uint32_t pipeconf;
  7046. pipeconf = 0;
  7047. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  7048. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  7049. pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
  7050. if (intel_crtc->config->double_wide)
  7051. pipeconf |= PIPECONF_DOUBLE_WIDE;
  7052. /* only g4x and later have fancy bpc/dither controls */
  7053. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  7054. IS_CHERRYVIEW(dev_priv)) {
  7055. /* Bspec claims that we can't use dithering for 30bpp pipes. */
  7056. if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
  7057. pipeconf |= PIPECONF_DITHER_EN |
  7058. PIPECONF_DITHER_TYPE_SP;
  7059. switch (intel_crtc->config->pipe_bpp) {
  7060. case 18:
  7061. pipeconf |= PIPECONF_6BPC;
  7062. break;
  7063. case 24:
  7064. pipeconf |= PIPECONF_8BPC;
  7065. break;
  7066. case 30:
  7067. pipeconf |= PIPECONF_10BPC;
  7068. break;
  7069. default:
  7070. /* Case prevented by intel_choose_pipe_bpp_dither. */
  7071. BUG();
  7072. }
  7073. }
  7074. if (HAS_PIPE_CXSR(dev_priv)) {
  7075. if (intel_crtc->lowfreq_avail) {
  7076. DRM_DEBUG_KMS("enabling CxSR downclocking\n");
  7077. pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
  7078. } else {
  7079. DRM_DEBUG_KMS("disabling CxSR downclocking\n");
  7080. }
  7081. }
  7082. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  7083. if (INTEL_GEN(dev_priv) < 4 ||
  7084. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  7085. pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  7086. else
  7087. pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
  7088. } else
  7089. pipeconf |= PIPECONF_PROGRESSIVE;
  7090. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  7091. intel_crtc->config->limited_color_range)
  7092. pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
  7093. I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
  7094. POSTING_READ(PIPECONF(intel_crtc->pipe));
  7095. }
  7096. static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  7097. struct intel_crtc_state *crtc_state)
  7098. {
  7099. struct drm_device *dev = crtc->base.dev;
  7100. struct drm_i915_private *dev_priv = to_i915(dev);
  7101. const struct intel_limit *limit;
  7102. int refclk = 48000;
  7103. memset(&crtc_state->dpll_hw_state, 0,
  7104. sizeof(crtc_state->dpll_hw_state));
  7105. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7106. if (intel_panel_use_ssc(dev_priv)) {
  7107. refclk = dev_priv->vbt.lvds_ssc_freq;
  7108. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7109. }
  7110. limit = &intel_limits_i8xx_lvds;
  7111. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
  7112. limit = &intel_limits_i8xx_dvo;
  7113. } else {
  7114. limit = &intel_limits_i8xx_dac;
  7115. }
  7116. if (!crtc_state->clock_set &&
  7117. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7118. refclk, NULL, &crtc_state->dpll)) {
  7119. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7120. return -EINVAL;
  7121. }
  7122. i8xx_compute_dpll(crtc, crtc_state, NULL);
  7123. return 0;
  7124. }
  7125. static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
  7126. struct intel_crtc_state *crtc_state)
  7127. {
  7128. struct drm_device *dev = crtc->base.dev;
  7129. struct drm_i915_private *dev_priv = to_i915(dev);
  7130. const struct intel_limit *limit;
  7131. int refclk = 96000;
  7132. memset(&crtc_state->dpll_hw_state, 0,
  7133. sizeof(crtc_state->dpll_hw_state));
  7134. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7135. if (intel_panel_use_ssc(dev_priv)) {
  7136. refclk = dev_priv->vbt.lvds_ssc_freq;
  7137. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7138. }
  7139. if (intel_is_dual_link_lvds(dev))
  7140. limit = &intel_limits_g4x_dual_channel_lvds;
  7141. else
  7142. limit = &intel_limits_g4x_single_channel_lvds;
  7143. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
  7144. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
  7145. limit = &intel_limits_g4x_hdmi;
  7146. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
  7147. limit = &intel_limits_g4x_sdvo;
  7148. } else {
  7149. /* The option is for other outputs */
  7150. limit = &intel_limits_i9xx_sdvo;
  7151. }
  7152. if (!crtc_state->clock_set &&
  7153. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7154. refclk, NULL, &crtc_state->dpll)) {
  7155. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7156. return -EINVAL;
  7157. }
  7158. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7159. return 0;
  7160. }
  7161. static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
  7162. struct intel_crtc_state *crtc_state)
  7163. {
  7164. struct drm_device *dev = crtc->base.dev;
  7165. struct drm_i915_private *dev_priv = to_i915(dev);
  7166. const struct intel_limit *limit;
  7167. int refclk = 96000;
  7168. memset(&crtc_state->dpll_hw_state, 0,
  7169. sizeof(crtc_state->dpll_hw_state));
  7170. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7171. if (intel_panel_use_ssc(dev_priv)) {
  7172. refclk = dev_priv->vbt.lvds_ssc_freq;
  7173. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7174. }
  7175. limit = &intel_limits_pineview_lvds;
  7176. } else {
  7177. limit = &intel_limits_pineview_sdvo;
  7178. }
  7179. if (!crtc_state->clock_set &&
  7180. !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7181. refclk, NULL, &crtc_state->dpll)) {
  7182. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7183. return -EINVAL;
  7184. }
  7185. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7186. return 0;
  7187. }
  7188. static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
  7189. struct intel_crtc_state *crtc_state)
  7190. {
  7191. struct drm_device *dev = crtc->base.dev;
  7192. struct drm_i915_private *dev_priv = to_i915(dev);
  7193. const struct intel_limit *limit;
  7194. int refclk = 96000;
  7195. memset(&crtc_state->dpll_hw_state, 0,
  7196. sizeof(crtc_state->dpll_hw_state));
  7197. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7198. if (intel_panel_use_ssc(dev_priv)) {
  7199. refclk = dev_priv->vbt.lvds_ssc_freq;
  7200. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7201. }
  7202. limit = &intel_limits_i9xx_lvds;
  7203. } else {
  7204. limit = &intel_limits_i9xx_sdvo;
  7205. }
  7206. if (!crtc_state->clock_set &&
  7207. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7208. refclk, NULL, &crtc_state->dpll)) {
  7209. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7210. return -EINVAL;
  7211. }
  7212. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7213. return 0;
  7214. }
  7215. static int chv_crtc_compute_clock(struct intel_crtc *crtc,
  7216. struct intel_crtc_state *crtc_state)
  7217. {
  7218. int refclk = 100000;
  7219. const struct intel_limit *limit = &intel_limits_chv;
  7220. memset(&crtc_state->dpll_hw_state, 0,
  7221. sizeof(crtc_state->dpll_hw_state));
  7222. if (!crtc_state->clock_set &&
  7223. !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7224. refclk, NULL, &crtc_state->dpll)) {
  7225. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7226. return -EINVAL;
  7227. }
  7228. chv_compute_dpll(crtc, crtc_state);
  7229. return 0;
  7230. }
  7231. static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
  7232. struct intel_crtc_state *crtc_state)
  7233. {
  7234. int refclk = 100000;
  7235. const struct intel_limit *limit = &intel_limits_vlv;
  7236. memset(&crtc_state->dpll_hw_state, 0,
  7237. sizeof(crtc_state->dpll_hw_state));
  7238. if (!crtc_state->clock_set &&
  7239. !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7240. refclk, NULL, &crtc_state->dpll)) {
  7241. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7242. return -EINVAL;
  7243. }
  7244. vlv_compute_dpll(crtc, crtc_state);
  7245. return 0;
  7246. }
  7247. static void i9xx_get_pfit_config(struct intel_crtc *crtc,
  7248. struct intel_crtc_state *pipe_config)
  7249. {
  7250. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7251. uint32_t tmp;
  7252. if (INTEL_GEN(dev_priv) <= 3 &&
  7253. (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
  7254. return;
  7255. tmp = I915_READ(PFIT_CONTROL);
  7256. if (!(tmp & PFIT_ENABLE))
  7257. return;
  7258. /* Check whether the pfit is attached to our pipe. */
  7259. if (INTEL_GEN(dev_priv) < 4) {
  7260. if (crtc->pipe != PIPE_B)
  7261. return;
  7262. } else {
  7263. if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
  7264. return;
  7265. }
  7266. pipe_config->gmch_pfit.control = tmp;
  7267. pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
  7268. }
  7269. static void vlv_crtc_clock_get(struct intel_crtc *crtc,
  7270. struct intel_crtc_state *pipe_config)
  7271. {
  7272. struct drm_device *dev = crtc->base.dev;
  7273. struct drm_i915_private *dev_priv = to_i915(dev);
  7274. int pipe = pipe_config->cpu_transcoder;
  7275. struct dpll clock;
  7276. u32 mdiv;
  7277. int refclk = 100000;
  7278. /* In case of DSI, DPLL will not be used */
  7279. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  7280. return;
  7281. mutex_lock(&dev_priv->sb_lock);
  7282. mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
  7283. mutex_unlock(&dev_priv->sb_lock);
  7284. clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
  7285. clock.m2 = mdiv & DPIO_M2DIV_MASK;
  7286. clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
  7287. clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
  7288. clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
  7289. pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
  7290. }
  7291. static void
  7292. i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  7293. struct intel_initial_plane_config *plane_config)
  7294. {
  7295. struct drm_device *dev = crtc->base.dev;
  7296. struct drm_i915_private *dev_priv = to_i915(dev);
  7297. u32 val, base, offset;
  7298. int pipe = crtc->pipe, plane = crtc->plane;
  7299. int fourcc, pixel_format;
  7300. unsigned int aligned_height;
  7301. struct drm_framebuffer *fb;
  7302. struct intel_framebuffer *intel_fb;
  7303. val = I915_READ(DSPCNTR(plane));
  7304. if (!(val & DISPLAY_PLANE_ENABLE))
  7305. return;
  7306. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7307. if (!intel_fb) {
  7308. DRM_DEBUG_KMS("failed to alloc fb\n");
  7309. return;
  7310. }
  7311. fb = &intel_fb->base;
  7312. if (INTEL_GEN(dev_priv) >= 4) {
  7313. if (val & DISPPLANE_TILED) {
  7314. plane_config->tiling = I915_TILING_X;
  7315. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  7316. }
  7317. }
  7318. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  7319. fourcc = i9xx_format_to_fourcc(pixel_format);
  7320. fb->pixel_format = fourcc;
  7321. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  7322. if (INTEL_GEN(dev_priv) >= 4) {
  7323. if (plane_config->tiling)
  7324. offset = I915_READ(DSPTILEOFF(plane));
  7325. else
  7326. offset = I915_READ(DSPLINOFF(plane));
  7327. base = I915_READ(DSPSURF(plane)) & 0xfffff000;
  7328. } else {
  7329. base = I915_READ(DSPADDR(plane));
  7330. }
  7331. plane_config->base = base;
  7332. val = I915_READ(PIPESRC(pipe));
  7333. fb->width = ((val >> 16) & 0xfff) + 1;
  7334. fb->height = ((val >> 0) & 0xfff) + 1;
  7335. val = I915_READ(DSPSTRIDE(pipe));
  7336. fb->pitches[0] = val & 0xffffffc0;
  7337. aligned_height = intel_fb_align_height(dev, fb->height,
  7338. fb->pixel_format,
  7339. fb->modifier[0]);
  7340. plane_config->size = fb->pitches[0] * aligned_height;
  7341. DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  7342. pipe_name(pipe), plane, fb->width, fb->height,
  7343. fb->bits_per_pixel, base, fb->pitches[0],
  7344. plane_config->size);
  7345. plane_config->fb = intel_fb;
  7346. }
  7347. static void chv_crtc_clock_get(struct intel_crtc *crtc,
  7348. struct intel_crtc_state *pipe_config)
  7349. {
  7350. struct drm_device *dev = crtc->base.dev;
  7351. struct drm_i915_private *dev_priv = to_i915(dev);
  7352. int pipe = pipe_config->cpu_transcoder;
  7353. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  7354. struct dpll clock;
  7355. u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
  7356. int refclk = 100000;
  7357. /* In case of DSI, DPLL will not be used */
  7358. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  7359. return;
  7360. mutex_lock(&dev_priv->sb_lock);
  7361. cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
  7362. pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
  7363. pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
  7364. pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
  7365. pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  7366. mutex_unlock(&dev_priv->sb_lock);
  7367. clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
  7368. clock.m2 = (pll_dw0 & 0xff) << 22;
  7369. if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
  7370. clock.m2 |= pll_dw2 & 0x3fffff;
  7371. clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
  7372. clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
  7373. clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
  7374. pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
  7375. }
  7376. static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  7377. struct intel_crtc_state *pipe_config)
  7378. {
  7379. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7380. enum intel_display_power_domain power_domain;
  7381. uint32_t tmp;
  7382. bool ret;
  7383. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  7384. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7385. return false;
  7386. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  7387. pipe_config->shared_dpll = NULL;
  7388. ret = false;
  7389. tmp = I915_READ(PIPECONF(crtc->pipe));
  7390. if (!(tmp & PIPECONF_ENABLE))
  7391. goto out;
  7392. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  7393. IS_CHERRYVIEW(dev_priv)) {
  7394. switch (tmp & PIPECONF_BPC_MASK) {
  7395. case PIPECONF_6BPC:
  7396. pipe_config->pipe_bpp = 18;
  7397. break;
  7398. case PIPECONF_8BPC:
  7399. pipe_config->pipe_bpp = 24;
  7400. break;
  7401. case PIPECONF_10BPC:
  7402. pipe_config->pipe_bpp = 30;
  7403. break;
  7404. default:
  7405. break;
  7406. }
  7407. }
  7408. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  7409. (tmp & PIPECONF_COLOR_RANGE_SELECT))
  7410. pipe_config->limited_color_range = true;
  7411. if (INTEL_GEN(dev_priv) < 4)
  7412. pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
  7413. intel_get_pipe_timings(crtc, pipe_config);
  7414. intel_get_pipe_src_size(crtc, pipe_config);
  7415. i9xx_get_pfit_config(crtc, pipe_config);
  7416. if (INTEL_GEN(dev_priv) >= 4) {
  7417. /* No way to read it out on pipes B and C */
  7418. if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
  7419. tmp = dev_priv->chv_dpll_md[crtc->pipe];
  7420. else
  7421. tmp = I915_READ(DPLL_MD(crtc->pipe));
  7422. pipe_config->pixel_multiplier =
  7423. ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
  7424. >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
  7425. pipe_config->dpll_hw_state.dpll_md = tmp;
  7426. } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
  7427. IS_G33(dev_priv)) {
  7428. tmp = I915_READ(DPLL(crtc->pipe));
  7429. pipe_config->pixel_multiplier =
  7430. ((tmp & SDVO_MULTIPLIER_MASK)
  7431. >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
  7432. } else {
  7433. /* Note that on i915G/GM the pixel multiplier is in the sdvo
  7434. * port and will be fixed up in the encoder->get_config
  7435. * function. */
  7436. pipe_config->pixel_multiplier = 1;
  7437. }
  7438. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
  7439. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  7440. /*
  7441. * DPLL_DVO_2X_MODE must be enabled for both DPLLs
  7442. * on 830. Filter it out here so that we don't
  7443. * report errors due to that.
  7444. */
  7445. if (IS_I830(dev_priv))
  7446. pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
  7447. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
  7448. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
  7449. } else {
  7450. /* Mask out read-only status bits. */
  7451. pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
  7452. DPLL_PORTC_READY_MASK |
  7453. DPLL_PORTB_READY_MASK);
  7454. }
  7455. if (IS_CHERRYVIEW(dev_priv))
  7456. chv_crtc_clock_get(crtc, pipe_config);
  7457. else if (IS_VALLEYVIEW(dev_priv))
  7458. vlv_crtc_clock_get(crtc, pipe_config);
  7459. else
  7460. i9xx_crtc_clock_get(crtc, pipe_config);
  7461. /*
  7462. * Normally the dotclock is filled in by the encoder .get_config()
  7463. * but in case the pipe is enabled w/o any ports we need a sane
  7464. * default.
  7465. */
  7466. pipe_config->base.adjusted_mode.crtc_clock =
  7467. pipe_config->port_clock / pipe_config->pixel_multiplier;
  7468. ret = true;
  7469. out:
  7470. intel_display_power_put(dev_priv, power_domain);
  7471. return ret;
  7472. }
  7473. static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  7474. {
  7475. struct intel_encoder *encoder;
  7476. int i;
  7477. u32 val, final;
  7478. bool has_lvds = false;
  7479. bool has_cpu_edp = false;
  7480. bool has_panel = false;
  7481. bool has_ck505 = false;
  7482. bool can_ssc = false;
  7483. bool using_ssc_source = false;
  7484. /* We need to take the global config into account */
  7485. for_each_intel_encoder(&dev_priv->drm, encoder) {
  7486. switch (encoder->type) {
  7487. case INTEL_OUTPUT_LVDS:
  7488. has_panel = true;
  7489. has_lvds = true;
  7490. break;
  7491. case INTEL_OUTPUT_EDP:
  7492. has_panel = true;
  7493. if (enc_to_dig_port(&encoder->base)->port == PORT_A)
  7494. has_cpu_edp = true;
  7495. break;
  7496. default:
  7497. break;
  7498. }
  7499. }
  7500. if (HAS_PCH_IBX(dev_priv)) {
  7501. has_ck505 = dev_priv->vbt.display_clock_mode;
  7502. can_ssc = has_ck505;
  7503. } else {
  7504. has_ck505 = false;
  7505. can_ssc = true;
  7506. }
  7507. /* Check if any DPLLs are using the SSC source */
  7508. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  7509. u32 temp = I915_READ(PCH_DPLL(i));
  7510. if (!(temp & DPLL_VCO_ENABLE))
  7511. continue;
  7512. if ((temp & PLL_REF_INPUT_MASK) ==
  7513. PLLB_REF_INPUT_SPREADSPECTRUMIN) {
  7514. using_ssc_source = true;
  7515. break;
  7516. }
  7517. }
  7518. DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
  7519. has_panel, has_lvds, has_ck505, using_ssc_source);
  7520. /* Ironlake: try to setup display ref clock before DPLL
  7521. * enabling. This is only under driver's control after
  7522. * PCH B stepping, previous chipset stepping should be
  7523. * ignoring this setting.
  7524. */
  7525. val = I915_READ(PCH_DREF_CONTROL);
  7526. /* As we must carefully and slowly disable/enable each source in turn,
  7527. * compute the final state we want first and check if we need to
  7528. * make any changes at all.
  7529. */
  7530. final = val;
  7531. final &= ~DREF_NONSPREAD_SOURCE_MASK;
  7532. if (has_ck505)
  7533. final |= DREF_NONSPREAD_CK505_ENABLE;
  7534. else
  7535. final |= DREF_NONSPREAD_SOURCE_ENABLE;
  7536. final &= ~DREF_SSC_SOURCE_MASK;
  7537. final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7538. final &= ~DREF_SSC1_ENABLE;
  7539. if (has_panel) {
  7540. final |= DREF_SSC_SOURCE_ENABLE;
  7541. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7542. final |= DREF_SSC1_ENABLE;
  7543. if (has_cpu_edp) {
  7544. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7545. final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7546. else
  7547. final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7548. } else
  7549. final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7550. } else if (using_ssc_source) {
  7551. final |= DREF_SSC_SOURCE_ENABLE;
  7552. final |= DREF_SSC1_ENABLE;
  7553. }
  7554. if (final == val)
  7555. return;
  7556. /* Always enable nonspread source */
  7557. val &= ~DREF_NONSPREAD_SOURCE_MASK;
  7558. if (has_ck505)
  7559. val |= DREF_NONSPREAD_CK505_ENABLE;
  7560. else
  7561. val |= DREF_NONSPREAD_SOURCE_ENABLE;
  7562. if (has_panel) {
  7563. val &= ~DREF_SSC_SOURCE_MASK;
  7564. val |= DREF_SSC_SOURCE_ENABLE;
  7565. /* SSC must be turned on before enabling the CPU output */
  7566. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7567. DRM_DEBUG_KMS("Using SSC on panel\n");
  7568. val |= DREF_SSC1_ENABLE;
  7569. } else
  7570. val &= ~DREF_SSC1_ENABLE;
  7571. /* Get SSC going before enabling the outputs */
  7572. I915_WRITE(PCH_DREF_CONTROL, val);
  7573. POSTING_READ(PCH_DREF_CONTROL);
  7574. udelay(200);
  7575. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7576. /* Enable CPU source on CPU attached eDP */
  7577. if (has_cpu_edp) {
  7578. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7579. DRM_DEBUG_KMS("Using SSC on eDP\n");
  7580. val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7581. } else
  7582. val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7583. } else
  7584. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7585. I915_WRITE(PCH_DREF_CONTROL, val);
  7586. POSTING_READ(PCH_DREF_CONTROL);
  7587. udelay(200);
  7588. } else {
  7589. DRM_DEBUG_KMS("Disabling CPU source output\n");
  7590. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7591. /* Turn off CPU output */
  7592. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7593. I915_WRITE(PCH_DREF_CONTROL, val);
  7594. POSTING_READ(PCH_DREF_CONTROL);
  7595. udelay(200);
  7596. if (!using_ssc_source) {
  7597. DRM_DEBUG_KMS("Disabling SSC source\n");
  7598. /* Turn off the SSC source */
  7599. val &= ~DREF_SSC_SOURCE_MASK;
  7600. val |= DREF_SSC_SOURCE_DISABLE;
  7601. /* Turn off SSC1 */
  7602. val &= ~DREF_SSC1_ENABLE;
  7603. I915_WRITE(PCH_DREF_CONTROL, val);
  7604. POSTING_READ(PCH_DREF_CONTROL);
  7605. udelay(200);
  7606. }
  7607. }
  7608. BUG_ON(val != final);
  7609. }
  7610. static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
  7611. {
  7612. uint32_t tmp;
  7613. tmp = I915_READ(SOUTH_CHICKEN2);
  7614. tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
  7615. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7616. if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
  7617. FDI_MPHY_IOSFSB_RESET_STATUS, 100))
  7618. DRM_ERROR("FDI mPHY reset assert timeout\n");
  7619. tmp = I915_READ(SOUTH_CHICKEN2);
  7620. tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
  7621. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7622. if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
  7623. FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
  7624. DRM_ERROR("FDI mPHY reset de-assert timeout\n");
  7625. }
  7626. /* WaMPhyProgramming:hsw */
  7627. static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
  7628. {
  7629. uint32_t tmp;
  7630. tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
  7631. tmp &= ~(0xFF << 24);
  7632. tmp |= (0x12 << 24);
  7633. intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
  7634. tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
  7635. tmp |= (1 << 11);
  7636. intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
  7637. tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
  7638. tmp |= (1 << 11);
  7639. intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
  7640. tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
  7641. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7642. intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
  7643. tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
  7644. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7645. intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
  7646. tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
  7647. tmp &= ~(7 << 13);
  7648. tmp |= (5 << 13);
  7649. intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
  7650. tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
  7651. tmp &= ~(7 << 13);
  7652. tmp |= (5 << 13);
  7653. intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
  7654. tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
  7655. tmp &= ~0xFF;
  7656. tmp |= 0x1C;
  7657. intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
  7658. tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
  7659. tmp &= ~0xFF;
  7660. tmp |= 0x1C;
  7661. intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
  7662. tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
  7663. tmp &= ~(0xFF << 16);
  7664. tmp |= (0x1C << 16);
  7665. intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
  7666. tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
  7667. tmp &= ~(0xFF << 16);
  7668. tmp |= (0x1C << 16);
  7669. intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
  7670. tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
  7671. tmp |= (1 << 27);
  7672. intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
  7673. tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
  7674. tmp |= (1 << 27);
  7675. intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
  7676. tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
  7677. tmp &= ~(0xF << 28);
  7678. tmp |= (4 << 28);
  7679. intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
  7680. tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
  7681. tmp &= ~(0xF << 28);
  7682. tmp |= (4 << 28);
  7683. intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
  7684. }
  7685. /* Implements 3 different sequences from BSpec chapter "Display iCLK
  7686. * Programming" based on the parameters passed:
  7687. * - Sequence to enable CLKOUT_DP
  7688. * - Sequence to enable CLKOUT_DP without spread
  7689. * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
  7690. */
  7691. static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
  7692. bool with_spread, bool with_fdi)
  7693. {
  7694. uint32_t reg, tmp;
  7695. if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
  7696. with_spread = true;
  7697. if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
  7698. with_fdi, "LP PCH doesn't have FDI\n"))
  7699. with_fdi = false;
  7700. mutex_lock(&dev_priv->sb_lock);
  7701. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7702. tmp &= ~SBI_SSCCTL_DISABLE;
  7703. tmp |= SBI_SSCCTL_PATHALT;
  7704. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7705. udelay(24);
  7706. if (with_spread) {
  7707. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7708. tmp &= ~SBI_SSCCTL_PATHALT;
  7709. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7710. if (with_fdi) {
  7711. lpt_reset_fdi_mphy(dev_priv);
  7712. lpt_program_fdi_mphy(dev_priv);
  7713. }
  7714. }
  7715. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  7716. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7717. tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7718. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7719. mutex_unlock(&dev_priv->sb_lock);
  7720. }
  7721. /* Sequence to disable CLKOUT_DP */
  7722. static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
  7723. {
  7724. uint32_t reg, tmp;
  7725. mutex_lock(&dev_priv->sb_lock);
  7726. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  7727. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7728. tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7729. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7730. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7731. if (!(tmp & SBI_SSCCTL_DISABLE)) {
  7732. if (!(tmp & SBI_SSCCTL_PATHALT)) {
  7733. tmp |= SBI_SSCCTL_PATHALT;
  7734. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7735. udelay(32);
  7736. }
  7737. tmp |= SBI_SSCCTL_DISABLE;
  7738. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7739. }
  7740. mutex_unlock(&dev_priv->sb_lock);
  7741. }
  7742. #define BEND_IDX(steps) ((50 + (steps)) / 5)
  7743. static const uint16_t sscdivintphase[] = {
  7744. [BEND_IDX( 50)] = 0x3B23,
  7745. [BEND_IDX( 45)] = 0x3B23,
  7746. [BEND_IDX( 40)] = 0x3C23,
  7747. [BEND_IDX( 35)] = 0x3C23,
  7748. [BEND_IDX( 30)] = 0x3D23,
  7749. [BEND_IDX( 25)] = 0x3D23,
  7750. [BEND_IDX( 20)] = 0x3E23,
  7751. [BEND_IDX( 15)] = 0x3E23,
  7752. [BEND_IDX( 10)] = 0x3F23,
  7753. [BEND_IDX( 5)] = 0x3F23,
  7754. [BEND_IDX( 0)] = 0x0025,
  7755. [BEND_IDX( -5)] = 0x0025,
  7756. [BEND_IDX(-10)] = 0x0125,
  7757. [BEND_IDX(-15)] = 0x0125,
  7758. [BEND_IDX(-20)] = 0x0225,
  7759. [BEND_IDX(-25)] = 0x0225,
  7760. [BEND_IDX(-30)] = 0x0325,
  7761. [BEND_IDX(-35)] = 0x0325,
  7762. [BEND_IDX(-40)] = 0x0425,
  7763. [BEND_IDX(-45)] = 0x0425,
  7764. [BEND_IDX(-50)] = 0x0525,
  7765. };
  7766. /*
  7767. * Bend CLKOUT_DP
  7768. * steps -50 to 50 inclusive, in steps of 5
  7769. * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
  7770. * change in clock period = -(steps / 10) * 5.787 ps
  7771. */
  7772. static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
  7773. {
  7774. uint32_t tmp;
  7775. int idx = BEND_IDX(steps);
  7776. if (WARN_ON(steps % 5 != 0))
  7777. return;
  7778. if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
  7779. return;
  7780. mutex_lock(&dev_priv->sb_lock);
  7781. if (steps % 10 != 0)
  7782. tmp = 0xAAAAAAAB;
  7783. else
  7784. tmp = 0x00000000;
  7785. intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
  7786. tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
  7787. tmp &= 0xffff0000;
  7788. tmp |= sscdivintphase[idx];
  7789. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
  7790. mutex_unlock(&dev_priv->sb_lock);
  7791. }
  7792. #undef BEND_IDX
  7793. static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
  7794. {
  7795. struct intel_encoder *encoder;
  7796. bool has_vga = false;
  7797. for_each_intel_encoder(&dev_priv->drm, encoder) {
  7798. switch (encoder->type) {
  7799. case INTEL_OUTPUT_ANALOG:
  7800. has_vga = true;
  7801. break;
  7802. default:
  7803. break;
  7804. }
  7805. }
  7806. if (has_vga) {
  7807. lpt_bend_clkout_dp(dev_priv, 0);
  7808. lpt_enable_clkout_dp(dev_priv, true, true);
  7809. } else {
  7810. lpt_disable_clkout_dp(dev_priv);
  7811. }
  7812. }
  7813. /*
  7814. * Initialize reference clocks when the driver loads
  7815. */
  7816. void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
  7817. {
  7818. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
  7819. ironlake_init_pch_refclk(dev_priv);
  7820. else if (HAS_PCH_LPT(dev_priv))
  7821. lpt_init_pch_refclk(dev_priv);
  7822. }
  7823. static void ironlake_set_pipeconf(struct drm_crtc *crtc)
  7824. {
  7825. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7826. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7827. int pipe = intel_crtc->pipe;
  7828. uint32_t val;
  7829. val = 0;
  7830. switch (intel_crtc->config->pipe_bpp) {
  7831. case 18:
  7832. val |= PIPECONF_6BPC;
  7833. break;
  7834. case 24:
  7835. val |= PIPECONF_8BPC;
  7836. break;
  7837. case 30:
  7838. val |= PIPECONF_10BPC;
  7839. break;
  7840. case 36:
  7841. val |= PIPECONF_12BPC;
  7842. break;
  7843. default:
  7844. /* Case prevented by intel_choose_pipe_bpp_dither. */
  7845. BUG();
  7846. }
  7847. if (intel_crtc->config->dither)
  7848. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7849. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7850. val |= PIPECONF_INTERLACED_ILK;
  7851. else
  7852. val |= PIPECONF_PROGRESSIVE;
  7853. if (intel_crtc->config->limited_color_range)
  7854. val |= PIPECONF_COLOR_RANGE_SELECT;
  7855. I915_WRITE(PIPECONF(pipe), val);
  7856. POSTING_READ(PIPECONF(pipe));
  7857. }
  7858. static void haswell_set_pipeconf(struct drm_crtc *crtc)
  7859. {
  7860. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7861. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7862. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  7863. u32 val = 0;
  7864. if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
  7865. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7866. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7867. val |= PIPECONF_INTERLACED_ILK;
  7868. else
  7869. val |= PIPECONF_PROGRESSIVE;
  7870. I915_WRITE(PIPECONF(cpu_transcoder), val);
  7871. POSTING_READ(PIPECONF(cpu_transcoder));
  7872. }
  7873. static void haswell_set_pipemisc(struct drm_crtc *crtc)
  7874. {
  7875. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7876. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7877. if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
  7878. u32 val = 0;
  7879. switch (intel_crtc->config->pipe_bpp) {
  7880. case 18:
  7881. val |= PIPEMISC_DITHER_6_BPC;
  7882. break;
  7883. case 24:
  7884. val |= PIPEMISC_DITHER_8_BPC;
  7885. break;
  7886. case 30:
  7887. val |= PIPEMISC_DITHER_10_BPC;
  7888. break;
  7889. case 36:
  7890. val |= PIPEMISC_DITHER_12_BPC;
  7891. break;
  7892. default:
  7893. /* Case prevented by pipe_config_set_bpp. */
  7894. BUG();
  7895. }
  7896. if (intel_crtc->config->dither)
  7897. val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
  7898. I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
  7899. }
  7900. }
  7901. int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
  7902. {
  7903. /*
  7904. * Account for spread spectrum to avoid
  7905. * oversubscribing the link. Max center spread
  7906. * is 2.5%; use 5% for safety's sake.
  7907. */
  7908. u32 bps = target_clock * bpp * 21 / 20;
  7909. return DIV_ROUND_UP(bps, link_bw * 8);
  7910. }
  7911. static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
  7912. {
  7913. return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
  7914. }
  7915. static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
  7916. struct intel_crtc_state *crtc_state,
  7917. struct dpll *reduced_clock)
  7918. {
  7919. struct drm_crtc *crtc = &intel_crtc->base;
  7920. struct drm_device *dev = crtc->dev;
  7921. struct drm_i915_private *dev_priv = to_i915(dev);
  7922. u32 dpll, fp, fp2;
  7923. int factor;
  7924. /* Enable autotuning of the PLL clock (if permissible) */
  7925. factor = 21;
  7926. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7927. if ((intel_panel_use_ssc(dev_priv) &&
  7928. dev_priv->vbt.lvds_ssc_freq == 100000) ||
  7929. (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
  7930. factor = 25;
  7931. } else if (crtc_state->sdvo_tv_clock)
  7932. factor = 20;
  7933. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  7934. if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
  7935. fp |= FP_CB_TUNE;
  7936. if (reduced_clock) {
  7937. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  7938. if (reduced_clock->m < factor * reduced_clock->n)
  7939. fp2 |= FP_CB_TUNE;
  7940. } else {
  7941. fp2 = fp;
  7942. }
  7943. dpll = 0;
  7944. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  7945. dpll |= DPLLB_MODE_LVDS;
  7946. else
  7947. dpll |= DPLLB_MODE_DAC_SERIAL;
  7948. dpll |= (crtc_state->pixel_multiplier - 1)
  7949. << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
  7950. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  7951. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  7952. dpll |= DPLL_SDVO_HIGH_SPEED;
  7953. if (intel_crtc_has_dp_encoder(crtc_state))
  7954. dpll |= DPLL_SDVO_HIGH_SPEED;
  7955. /*
  7956. * The high speed IO clock is only really required for
  7957. * SDVO/HDMI/DP, but we also enable it for CRT to make it
  7958. * possible to share the DPLL between CRT and HDMI. Enabling
  7959. * the clock needlessly does no real harm, except use up a
  7960. * bit of power potentially.
  7961. *
  7962. * We'll limit this to IVB with 3 pipes, since it has only two
  7963. * DPLLs and so DPLL sharing is the only way to get three pipes
  7964. * driving PCH ports at the same time. On SNB we could do this,
  7965. * and potentially avoid enabling the second DPLL, but it's not
  7966. * clear if it''s a win or loss power wise. No point in doing
  7967. * this on ILK at all since it has a fixed DPLL<->pipe mapping.
  7968. */
  7969. if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
  7970. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
  7971. dpll |= DPLL_SDVO_HIGH_SPEED;
  7972. /* compute bitmask from p1 value */
  7973. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  7974. /* also FPA1 */
  7975. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  7976. switch (crtc_state->dpll.p2) {
  7977. case 5:
  7978. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  7979. break;
  7980. case 7:
  7981. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  7982. break;
  7983. case 10:
  7984. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  7985. break;
  7986. case 14:
  7987. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  7988. break;
  7989. }
  7990. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  7991. intel_panel_use_ssc(dev_priv))
  7992. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  7993. else
  7994. dpll |= PLL_REF_INPUT_DREFCLK;
  7995. dpll |= DPLL_VCO_ENABLE;
  7996. crtc_state->dpll_hw_state.dpll = dpll;
  7997. crtc_state->dpll_hw_state.fp0 = fp;
  7998. crtc_state->dpll_hw_state.fp1 = fp2;
  7999. }
  8000. static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
  8001. struct intel_crtc_state *crtc_state)
  8002. {
  8003. struct drm_device *dev = crtc->base.dev;
  8004. struct drm_i915_private *dev_priv = to_i915(dev);
  8005. struct dpll reduced_clock;
  8006. bool has_reduced_clock = false;
  8007. struct intel_shared_dpll *pll;
  8008. const struct intel_limit *limit;
  8009. int refclk = 120000;
  8010. memset(&crtc_state->dpll_hw_state, 0,
  8011. sizeof(crtc_state->dpll_hw_state));
  8012. crtc->lowfreq_avail = false;
  8013. /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
  8014. if (!crtc_state->has_pch_encoder)
  8015. return 0;
  8016. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  8017. if (intel_panel_use_ssc(dev_priv)) {
  8018. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
  8019. dev_priv->vbt.lvds_ssc_freq);
  8020. refclk = dev_priv->vbt.lvds_ssc_freq;
  8021. }
  8022. if (intel_is_dual_link_lvds(dev)) {
  8023. if (refclk == 100000)
  8024. limit = &intel_limits_ironlake_dual_lvds_100m;
  8025. else
  8026. limit = &intel_limits_ironlake_dual_lvds;
  8027. } else {
  8028. if (refclk == 100000)
  8029. limit = &intel_limits_ironlake_single_lvds_100m;
  8030. else
  8031. limit = &intel_limits_ironlake_single_lvds;
  8032. }
  8033. } else {
  8034. limit = &intel_limits_ironlake_dac;
  8035. }
  8036. if (!crtc_state->clock_set &&
  8037. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  8038. refclk, NULL, &crtc_state->dpll)) {
  8039. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  8040. return -EINVAL;
  8041. }
  8042. ironlake_compute_dpll(crtc, crtc_state,
  8043. has_reduced_clock ? &reduced_clock : NULL);
  8044. pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
  8045. if (pll == NULL) {
  8046. DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
  8047. pipe_name(crtc->pipe));
  8048. return -EINVAL;
  8049. }
  8050. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  8051. has_reduced_clock)
  8052. crtc->lowfreq_avail = true;
  8053. return 0;
  8054. }
  8055. static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
  8056. struct intel_link_m_n *m_n)
  8057. {
  8058. struct drm_device *dev = crtc->base.dev;
  8059. struct drm_i915_private *dev_priv = to_i915(dev);
  8060. enum pipe pipe = crtc->pipe;
  8061. m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
  8062. m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
  8063. m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
  8064. & ~TU_SIZE_MASK;
  8065. m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
  8066. m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
  8067. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8068. }
  8069. static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
  8070. enum transcoder transcoder,
  8071. struct intel_link_m_n *m_n,
  8072. struct intel_link_m_n *m2_n2)
  8073. {
  8074. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8075. enum pipe pipe = crtc->pipe;
  8076. if (INTEL_GEN(dev_priv) >= 5) {
  8077. m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
  8078. m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
  8079. m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
  8080. & ~TU_SIZE_MASK;
  8081. m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
  8082. m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
  8083. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8084. /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
  8085. * gen < 8) and if DRRS is supported (to make sure the
  8086. * registers are not unnecessarily read).
  8087. */
  8088. if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
  8089. crtc->config->has_drrs) {
  8090. m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
  8091. m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
  8092. m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
  8093. & ~TU_SIZE_MASK;
  8094. m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
  8095. m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
  8096. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8097. }
  8098. } else {
  8099. m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
  8100. m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
  8101. m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
  8102. & ~TU_SIZE_MASK;
  8103. m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
  8104. m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
  8105. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8106. }
  8107. }
  8108. void intel_dp_get_m_n(struct intel_crtc *crtc,
  8109. struct intel_crtc_state *pipe_config)
  8110. {
  8111. if (pipe_config->has_pch_encoder)
  8112. intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
  8113. else
  8114. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  8115. &pipe_config->dp_m_n,
  8116. &pipe_config->dp_m2_n2);
  8117. }
  8118. static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
  8119. struct intel_crtc_state *pipe_config)
  8120. {
  8121. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  8122. &pipe_config->fdi_m_n, NULL);
  8123. }
  8124. static void skylake_get_pfit_config(struct intel_crtc *crtc,
  8125. struct intel_crtc_state *pipe_config)
  8126. {
  8127. struct drm_device *dev = crtc->base.dev;
  8128. struct drm_i915_private *dev_priv = to_i915(dev);
  8129. struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
  8130. uint32_t ps_ctrl = 0;
  8131. int id = -1;
  8132. int i;
  8133. /* find scaler attached to this pipe */
  8134. for (i = 0; i < crtc->num_scalers; i++) {
  8135. ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
  8136. if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
  8137. id = i;
  8138. pipe_config->pch_pfit.enabled = true;
  8139. pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
  8140. pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
  8141. break;
  8142. }
  8143. }
  8144. scaler_state->scaler_id = id;
  8145. if (id >= 0) {
  8146. scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
  8147. } else {
  8148. scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
  8149. }
  8150. }
  8151. static void
  8152. skylake_get_initial_plane_config(struct intel_crtc *crtc,
  8153. struct intel_initial_plane_config *plane_config)
  8154. {
  8155. struct drm_device *dev = crtc->base.dev;
  8156. struct drm_i915_private *dev_priv = to_i915(dev);
  8157. u32 val, base, offset, stride_mult, tiling;
  8158. int pipe = crtc->pipe;
  8159. int fourcc, pixel_format;
  8160. unsigned int aligned_height;
  8161. struct drm_framebuffer *fb;
  8162. struct intel_framebuffer *intel_fb;
  8163. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  8164. if (!intel_fb) {
  8165. DRM_DEBUG_KMS("failed to alloc fb\n");
  8166. return;
  8167. }
  8168. fb = &intel_fb->base;
  8169. val = I915_READ(PLANE_CTL(pipe, 0));
  8170. if (!(val & PLANE_CTL_ENABLE))
  8171. goto error;
  8172. pixel_format = val & PLANE_CTL_FORMAT_MASK;
  8173. fourcc = skl_format_to_fourcc(pixel_format,
  8174. val & PLANE_CTL_ORDER_RGBX,
  8175. val & PLANE_CTL_ALPHA_MASK);
  8176. fb->pixel_format = fourcc;
  8177. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  8178. tiling = val & PLANE_CTL_TILED_MASK;
  8179. switch (tiling) {
  8180. case PLANE_CTL_TILED_LINEAR:
  8181. fb->modifier[0] = DRM_FORMAT_MOD_NONE;
  8182. break;
  8183. case PLANE_CTL_TILED_X:
  8184. plane_config->tiling = I915_TILING_X;
  8185. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  8186. break;
  8187. case PLANE_CTL_TILED_Y:
  8188. fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
  8189. break;
  8190. case PLANE_CTL_TILED_YF:
  8191. fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
  8192. break;
  8193. default:
  8194. MISSING_CASE(tiling);
  8195. goto error;
  8196. }
  8197. base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
  8198. plane_config->base = base;
  8199. offset = I915_READ(PLANE_OFFSET(pipe, 0));
  8200. val = I915_READ(PLANE_SIZE(pipe, 0));
  8201. fb->height = ((val >> 16) & 0xfff) + 1;
  8202. fb->width = ((val >> 0) & 0x1fff) + 1;
  8203. val = I915_READ(PLANE_STRIDE(pipe, 0));
  8204. stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
  8205. fb->pixel_format);
  8206. fb->pitches[0] = (val & 0x3ff) * stride_mult;
  8207. aligned_height = intel_fb_align_height(dev, fb->height,
  8208. fb->pixel_format,
  8209. fb->modifier[0]);
  8210. plane_config->size = fb->pitches[0] * aligned_height;
  8211. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  8212. pipe_name(pipe), fb->width, fb->height,
  8213. fb->bits_per_pixel, base, fb->pitches[0],
  8214. plane_config->size);
  8215. plane_config->fb = intel_fb;
  8216. return;
  8217. error:
  8218. kfree(intel_fb);
  8219. }
  8220. static void ironlake_get_pfit_config(struct intel_crtc *crtc,
  8221. struct intel_crtc_state *pipe_config)
  8222. {
  8223. struct drm_device *dev = crtc->base.dev;
  8224. struct drm_i915_private *dev_priv = to_i915(dev);
  8225. uint32_t tmp;
  8226. tmp = I915_READ(PF_CTL(crtc->pipe));
  8227. if (tmp & PF_ENABLE) {
  8228. pipe_config->pch_pfit.enabled = true;
  8229. pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
  8230. pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
  8231. /* We currently do not free assignements of panel fitters on
  8232. * ivb/hsw (since we don't use the higher upscaling modes which
  8233. * differentiates them) so just WARN about this case for now. */
  8234. if (IS_GEN7(dev_priv)) {
  8235. WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
  8236. PF_PIPE_SEL_IVB(crtc->pipe));
  8237. }
  8238. }
  8239. }
  8240. static void
  8241. ironlake_get_initial_plane_config(struct intel_crtc *crtc,
  8242. struct intel_initial_plane_config *plane_config)
  8243. {
  8244. struct drm_device *dev = crtc->base.dev;
  8245. struct drm_i915_private *dev_priv = to_i915(dev);
  8246. u32 val, base, offset;
  8247. int pipe = crtc->pipe;
  8248. int fourcc, pixel_format;
  8249. unsigned int aligned_height;
  8250. struct drm_framebuffer *fb;
  8251. struct intel_framebuffer *intel_fb;
  8252. val = I915_READ(DSPCNTR(pipe));
  8253. if (!(val & DISPLAY_PLANE_ENABLE))
  8254. return;
  8255. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  8256. if (!intel_fb) {
  8257. DRM_DEBUG_KMS("failed to alloc fb\n");
  8258. return;
  8259. }
  8260. fb = &intel_fb->base;
  8261. if (INTEL_GEN(dev_priv) >= 4) {
  8262. if (val & DISPPLANE_TILED) {
  8263. plane_config->tiling = I915_TILING_X;
  8264. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  8265. }
  8266. }
  8267. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  8268. fourcc = i9xx_format_to_fourcc(pixel_format);
  8269. fb->pixel_format = fourcc;
  8270. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  8271. base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
  8272. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  8273. offset = I915_READ(DSPOFFSET(pipe));
  8274. } else {
  8275. if (plane_config->tiling)
  8276. offset = I915_READ(DSPTILEOFF(pipe));
  8277. else
  8278. offset = I915_READ(DSPLINOFF(pipe));
  8279. }
  8280. plane_config->base = base;
  8281. val = I915_READ(PIPESRC(pipe));
  8282. fb->width = ((val >> 16) & 0xfff) + 1;
  8283. fb->height = ((val >> 0) & 0xfff) + 1;
  8284. val = I915_READ(DSPSTRIDE(pipe));
  8285. fb->pitches[0] = val & 0xffffffc0;
  8286. aligned_height = intel_fb_align_height(dev, fb->height,
  8287. fb->pixel_format,
  8288. fb->modifier[0]);
  8289. plane_config->size = fb->pitches[0] * aligned_height;
  8290. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  8291. pipe_name(pipe), fb->width, fb->height,
  8292. fb->bits_per_pixel, base, fb->pitches[0],
  8293. plane_config->size);
  8294. plane_config->fb = intel_fb;
  8295. }
  8296. static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  8297. struct intel_crtc_state *pipe_config)
  8298. {
  8299. struct drm_device *dev = crtc->base.dev;
  8300. struct drm_i915_private *dev_priv = to_i915(dev);
  8301. enum intel_display_power_domain power_domain;
  8302. uint32_t tmp;
  8303. bool ret;
  8304. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  8305. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8306. return false;
  8307. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  8308. pipe_config->shared_dpll = NULL;
  8309. ret = false;
  8310. tmp = I915_READ(PIPECONF(crtc->pipe));
  8311. if (!(tmp & PIPECONF_ENABLE))
  8312. goto out;
  8313. switch (tmp & PIPECONF_BPC_MASK) {
  8314. case PIPECONF_6BPC:
  8315. pipe_config->pipe_bpp = 18;
  8316. break;
  8317. case PIPECONF_8BPC:
  8318. pipe_config->pipe_bpp = 24;
  8319. break;
  8320. case PIPECONF_10BPC:
  8321. pipe_config->pipe_bpp = 30;
  8322. break;
  8323. case PIPECONF_12BPC:
  8324. pipe_config->pipe_bpp = 36;
  8325. break;
  8326. default:
  8327. break;
  8328. }
  8329. if (tmp & PIPECONF_COLOR_RANGE_SELECT)
  8330. pipe_config->limited_color_range = true;
  8331. if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
  8332. struct intel_shared_dpll *pll;
  8333. enum intel_dpll_id pll_id;
  8334. pipe_config->has_pch_encoder = true;
  8335. tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
  8336. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  8337. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  8338. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  8339. if (HAS_PCH_IBX(dev_priv)) {
  8340. /*
  8341. * The pipe->pch transcoder and pch transcoder->pll
  8342. * mapping is fixed.
  8343. */
  8344. pll_id = (enum intel_dpll_id) crtc->pipe;
  8345. } else {
  8346. tmp = I915_READ(PCH_DPLL_SEL);
  8347. if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
  8348. pll_id = DPLL_ID_PCH_PLL_B;
  8349. else
  8350. pll_id= DPLL_ID_PCH_PLL_A;
  8351. }
  8352. pipe_config->shared_dpll =
  8353. intel_get_shared_dpll_by_id(dev_priv, pll_id);
  8354. pll = pipe_config->shared_dpll;
  8355. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  8356. &pipe_config->dpll_hw_state));
  8357. tmp = pipe_config->dpll_hw_state.dpll;
  8358. pipe_config->pixel_multiplier =
  8359. ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
  8360. >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
  8361. ironlake_pch_clock_get(crtc, pipe_config);
  8362. } else {
  8363. pipe_config->pixel_multiplier = 1;
  8364. }
  8365. intel_get_pipe_timings(crtc, pipe_config);
  8366. intel_get_pipe_src_size(crtc, pipe_config);
  8367. ironlake_get_pfit_config(crtc, pipe_config);
  8368. ret = true;
  8369. out:
  8370. intel_display_power_put(dev_priv, power_domain);
  8371. return ret;
  8372. }
  8373. static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  8374. {
  8375. struct drm_device *dev = &dev_priv->drm;
  8376. struct intel_crtc *crtc;
  8377. for_each_intel_crtc(dev, crtc)
  8378. I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
  8379. pipe_name(crtc->pipe));
  8380. I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
  8381. I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
  8382. I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
  8383. I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
  8384. I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
  8385. I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
  8386. "CPU PWM1 enabled\n");
  8387. if (IS_HASWELL(dev_priv))
  8388. I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
  8389. "CPU PWM2 enabled\n");
  8390. I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
  8391. "PCH PWM1 enabled\n");
  8392. I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  8393. "Utility pin enabled\n");
  8394. I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
  8395. /*
  8396. * In theory we can still leave IRQs enabled, as long as only the HPD
  8397. * interrupts remain enabled. We used to check for that, but since it's
  8398. * gen-specific and since we only disable LCPLL after we fully disable
  8399. * the interrupts, the check below should be enough.
  8400. */
  8401. I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
  8402. }
  8403. static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
  8404. {
  8405. if (IS_HASWELL(dev_priv))
  8406. return I915_READ(D_COMP_HSW);
  8407. else
  8408. return I915_READ(D_COMP_BDW);
  8409. }
  8410. static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
  8411. {
  8412. if (IS_HASWELL(dev_priv)) {
  8413. mutex_lock(&dev_priv->rps.hw_lock);
  8414. if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
  8415. val))
  8416. DRM_DEBUG_KMS("Failed to write to D_COMP\n");
  8417. mutex_unlock(&dev_priv->rps.hw_lock);
  8418. } else {
  8419. I915_WRITE(D_COMP_BDW, val);
  8420. POSTING_READ(D_COMP_BDW);
  8421. }
  8422. }
  8423. /*
  8424. * This function implements pieces of two sequences from BSpec:
  8425. * - Sequence for display software to disable LCPLL
  8426. * - Sequence for display software to allow package C8+
  8427. * The steps implemented here are just the steps that actually touch the LCPLL
  8428. * register. Callers should take care of disabling all the display engine
  8429. * functions, doing the mode unset, fixing interrupts, etc.
  8430. */
  8431. static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  8432. bool switch_to_fclk, bool allow_power_down)
  8433. {
  8434. uint32_t val;
  8435. assert_can_disable_lcpll(dev_priv);
  8436. val = I915_READ(LCPLL_CTL);
  8437. if (switch_to_fclk) {
  8438. val |= LCPLL_CD_SOURCE_FCLK;
  8439. I915_WRITE(LCPLL_CTL, val);
  8440. if (wait_for_us(I915_READ(LCPLL_CTL) &
  8441. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  8442. DRM_ERROR("Switching to FCLK failed\n");
  8443. val = I915_READ(LCPLL_CTL);
  8444. }
  8445. val |= LCPLL_PLL_DISABLE;
  8446. I915_WRITE(LCPLL_CTL, val);
  8447. POSTING_READ(LCPLL_CTL);
  8448. if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
  8449. DRM_ERROR("LCPLL still locked\n");
  8450. val = hsw_read_dcomp(dev_priv);
  8451. val |= D_COMP_COMP_DISABLE;
  8452. hsw_write_dcomp(dev_priv, val);
  8453. ndelay(100);
  8454. if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
  8455. 1))
  8456. DRM_ERROR("D_COMP RCOMP still in progress\n");
  8457. if (allow_power_down) {
  8458. val = I915_READ(LCPLL_CTL);
  8459. val |= LCPLL_POWER_DOWN_ALLOW;
  8460. I915_WRITE(LCPLL_CTL, val);
  8461. POSTING_READ(LCPLL_CTL);
  8462. }
  8463. }
  8464. /*
  8465. * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  8466. * source.
  8467. */
  8468. static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  8469. {
  8470. uint32_t val;
  8471. val = I915_READ(LCPLL_CTL);
  8472. if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
  8473. LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
  8474. return;
  8475. /*
  8476. * Make sure we're not on PC8 state before disabling PC8, otherwise
  8477. * we'll hang the machine. To prevent PC8 state, just enable force_wake.
  8478. */
  8479. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  8480. if (val & LCPLL_POWER_DOWN_ALLOW) {
  8481. val &= ~LCPLL_POWER_DOWN_ALLOW;
  8482. I915_WRITE(LCPLL_CTL, val);
  8483. POSTING_READ(LCPLL_CTL);
  8484. }
  8485. val = hsw_read_dcomp(dev_priv);
  8486. val |= D_COMP_COMP_FORCE;
  8487. val &= ~D_COMP_COMP_DISABLE;
  8488. hsw_write_dcomp(dev_priv, val);
  8489. val = I915_READ(LCPLL_CTL);
  8490. val &= ~LCPLL_PLL_DISABLE;
  8491. I915_WRITE(LCPLL_CTL, val);
  8492. if (intel_wait_for_register(dev_priv,
  8493. LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  8494. 5))
  8495. DRM_ERROR("LCPLL not locked yet\n");
  8496. if (val & LCPLL_CD_SOURCE_FCLK) {
  8497. val = I915_READ(LCPLL_CTL);
  8498. val &= ~LCPLL_CD_SOURCE_FCLK;
  8499. I915_WRITE(LCPLL_CTL, val);
  8500. if (wait_for_us((I915_READ(LCPLL_CTL) &
  8501. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  8502. DRM_ERROR("Switching back to LCPLL failed\n");
  8503. }
  8504. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  8505. intel_update_cdclk(dev_priv);
  8506. }
  8507. /*
  8508. * Package states C8 and deeper are really deep PC states that can only be
  8509. * reached when all the devices on the system allow it, so even if the graphics
  8510. * device allows PC8+, it doesn't mean the system will actually get to these
  8511. * states. Our driver only allows PC8+ when going into runtime PM.
  8512. *
  8513. * The requirements for PC8+ are that all the outputs are disabled, the power
  8514. * well is disabled and most interrupts are disabled, and these are also
  8515. * requirements for runtime PM. When these conditions are met, we manually do
  8516. * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
  8517. * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
  8518. * hang the machine.
  8519. *
  8520. * When we really reach PC8 or deeper states (not just when we allow it) we lose
  8521. * the state of some registers, so when we come back from PC8+ we need to
  8522. * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
  8523. * need to take care of the registers kept by RC6. Notice that this happens even
  8524. * if we don't put the device in PCI D3 state (which is what currently happens
  8525. * because of the runtime PM support).
  8526. *
  8527. * For more, read "Display Sequences for Package C8" on the hardware
  8528. * documentation.
  8529. */
  8530. void hsw_enable_pc8(struct drm_i915_private *dev_priv)
  8531. {
  8532. uint32_t val;
  8533. DRM_DEBUG_KMS("Enabling package C8+\n");
  8534. if (HAS_PCH_LPT_LP(dev_priv)) {
  8535. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8536. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  8537. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8538. }
  8539. lpt_disable_clkout_dp(dev_priv);
  8540. hsw_disable_lcpll(dev_priv, true, true);
  8541. }
  8542. void hsw_disable_pc8(struct drm_i915_private *dev_priv)
  8543. {
  8544. uint32_t val;
  8545. DRM_DEBUG_KMS("Disabling package C8+\n");
  8546. hsw_restore_lcpll(dev_priv);
  8547. lpt_init_pch_refclk(dev_priv);
  8548. if (HAS_PCH_LPT_LP(dev_priv)) {
  8549. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8550. val |= PCH_LP_PARTITION_LEVEL_DISABLE;
  8551. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8552. }
  8553. }
  8554. static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8555. {
  8556. struct drm_device *dev = old_state->dev;
  8557. struct intel_atomic_state *old_intel_state =
  8558. to_intel_atomic_state(old_state);
  8559. unsigned int req_cdclk = old_intel_state->dev_cdclk;
  8560. bxt_set_cdclk(to_i915(dev), req_cdclk);
  8561. }
  8562. static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
  8563. int pixel_rate)
  8564. {
  8565. struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
  8566. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  8567. if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
  8568. pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
  8569. /* BSpec says "Do not use DisplayPort with CDCLK less than
  8570. * 432 MHz, audio enabled, port width x4, and link rate
  8571. * HBR2 (5.4 GHz), or else there may be audio corruption or
  8572. * screen corruption."
  8573. */
  8574. if (intel_crtc_has_dp_encoder(crtc_state) &&
  8575. crtc_state->has_audio &&
  8576. crtc_state->port_clock >= 540000 &&
  8577. crtc_state->lane_count == 4)
  8578. pixel_rate = max(432000, pixel_rate);
  8579. return pixel_rate;
  8580. }
  8581. /* compute the max rate for new configuration */
  8582. static int ilk_max_pixel_rate(struct drm_atomic_state *state)
  8583. {
  8584. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8585. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8586. struct drm_crtc *crtc;
  8587. struct drm_crtc_state *cstate;
  8588. struct intel_crtc_state *crtc_state;
  8589. unsigned max_pixel_rate = 0, i;
  8590. enum pipe pipe;
  8591. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  8592. sizeof(intel_state->min_pixclk));
  8593. for_each_crtc_in_state(state, crtc, cstate, i) {
  8594. int pixel_rate;
  8595. crtc_state = to_intel_crtc_state(cstate);
  8596. if (!crtc_state->base.enable) {
  8597. intel_state->min_pixclk[i] = 0;
  8598. continue;
  8599. }
  8600. pixel_rate = ilk_pipe_pixel_rate(crtc_state);
  8601. if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
  8602. pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
  8603. pixel_rate);
  8604. intel_state->min_pixclk[i] = pixel_rate;
  8605. }
  8606. for_each_pipe(dev_priv, pipe)
  8607. max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
  8608. return max_pixel_rate;
  8609. }
  8610. static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
  8611. {
  8612. struct drm_i915_private *dev_priv = to_i915(dev);
  8613. uint32_t val, data;
  8614. int ret;
  8615. if (WARN((I915_READ(LCPLL_CTL) &
  8616. (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
  8617. LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
  8618. LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
  8619. LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
  8620. "trying to change cdclk frequency with cdclk not enabled\n"))
  8621. return;
  8622. mutex_lock(&dev_priv->rps.hw_lock);
  8623. ret = sandybridge_pcode_write(dev_priv,
  8624. BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
  8625. mutex_unlock(&dev_priv->rps.hw_lock);
  8626. if (ret) {
  8627. DRM_ERROR("failed to inform pcode about cdclk change\n");
  8628. return;
  8629. }
  8630. val = I915_READ(LCPLL_CTL);
  8631. val |= LCPLL_CD_SOURCE_FCLK;
  8632. I915_WRITE(LCPLL_CTL, val);
  8633. if (wait_for_us(I915_READ(LCPLL_CTL) &
  8634. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  8635. DRM_ERROR("Switching to FCLK failed\n");
  8636. val = I915_READ(LCPLL_CTL);
  8637. val &= ~LCPLL_CLK_FREQ_MASK;
  8638. switch (cdclk) {
  8639. case 450000:
  8640. val |= LCPLL_CLK_FREQ_450;
  8641. data = 0;
  8642. break;
  8643. case 540000:
  8644. val |= LCPLL_CLK_FREQ_54O_BDW;
  8645. data = 1;
  8646. break;
  8647. case 337500:
  8648. val |= LCPLL_CLK_FREQ_337_5_BDW;
  8649. data = 2;
  8650. break;
  8651. case 675000:
  8652. val |= LCPLL_CLK_FREQ_675_BDW;
  8653. data = 3;
  8654. break;
  8655. default:
  8656. WARN(1, "invalid cdclk frequency\n");
  8657. return;
  8658. }
  8659. I915_WRITE(LCPLL_CTL, val);
  8660. val = I915_READ(LCPLL_CTL);
  8661. val &= ~LCPLL_CD_SOURCE_FCLK;
  8662. I915_WRITE(LCPLL_CTL, val);
  8663. if (wait_for_us((I915_READ(LCPLL_CTL) &
  8664. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  8665. DRM_ERROR("Switching back to LCPLL failed\n");
  8666. mutex_lock(&dev_priv->rps.hw_lock);
  8667. sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
  8668. mutex_unlock(&dev_priv->rps.hw_lock);
  8669. I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
  8670. intel_update_cdclk(dev_priv);
  8671. WARN(cdclk != dev_priv->cdclk_freq,
  8672. "cdclk requested %d kHz but got %d kHz\n",
  8673. cdclk, dev_priv->cdclk_freq);
  8674. }
  8675. static int broadwell_calc_cdclk(int max_pixclk)
  8676. {
  8677. if (max_pixclk > 540000)
  8678. return 675000;
  8679. else if (max_pixclk > 450000)
  8680. return 540000;
  8681. else if (max_pixclk > 337500)
  8682. return 450000;
  8683. else
  8684. return 337500;
  8685. }
  8686. static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
  8687. {
  8688. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8689. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8690. int max_pixclk = ilk_max_pixel_rate(state);
  8691. int cdclk;
  8692. /*
  8693. * FIXME should also account for plane ratio
  8694. * once 64bpp pixel formats are supported.
  8695. */
  8696. cdclk = broadwell_calc_cdclk(max_pixclk);
  8697. if (cdclk > dev_priv->max_cdclk_freq) {
  8698. DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  8699. cdclk, dev_priv->max_cdclk_freq);
  8700. return -EINVAL;
  8701. }
  8702. intel_state->cdclk = intel_state->dev_cdclk = cdclk;
  8703. if (!intel_state->active_crtcs)
  8704. intel_state->dev_cdclk = broadwell_calc_cdclk(0);
  8705. return 0;
  8706. }
  8707. static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8708. {
  8709. struct drm_device *dev = old_state->dev;
  8710. struct intel_atomic_state *old_intel_state =
  8711. to_intel_atomic_state(old_state);
  8712. unsigned req_cdclk = old_intel_state->dev_cdclk;
  8713. broadwell_set_cdclk(dev, req_cdclk);
  8714. }
  8715. static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
  8716. {
  8717. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8718. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8719. const int max_pixclk = ilk_max_pixel_rate(state);
  8720. int vco = intel_state->cdclk_pll_vco;
  8721. int cdclk;
  8722. /*
  8723. * FIXME should also account for plane ratio
  8724. * once 64bpp pixel formats are supported.
  8725. */
  8726. cdclk = skl_calc_cdclk(max_pixclk, vco);
  8727. /*
  8728. * FIXME move the cdclk caclulation to
  8729. * compute_config() so we can fail gracegully.
  8730. */
  8731. if (cdclk > dev_priv->max_cdclk_freq) {
  8732. DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  8733. cdclk, dev_priv->max_cdclk_freq);
  8734. cdclk = dev_priv->max_cdclk_freq;
  8735. }
  8736. intel_state->cdclk = intel_state->dev_cdclk = cdclk;
  8737. if (!intel_state->active_crtcs)
  8738. intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
  8739. return 0;
  8740. }
  8741. static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8742. {
  8743. struct drm_i915_private *dev_priv = to_i915(old_state->dev);
  8744. struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
  8745. unsigned int req_cdclk = intel_state->dev_cdclk;
  8746. unsigned int req_vco = intel_state->cdclk_pll_vco;
  8747. skl_set_cdclk(dev_priv, req_cdclk, req_vco);
  8748. }
  8749. static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
  8750. struct intel_crtc_state *crtc_state)
  8751. {
  8752. if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
  8753. if (!intel_ddi_pll_select(crtc, crtc_state))
  8754. return -EINVAL;
  8755. }
  8756. crtc->lowfreq_avail = false;
  8757. return 0;
  8758. }
  8759. static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
  8760. enum port port,
  8761. struct intel_crtc_state *pipe_config)
  8762. {
  8763. enum intel_dpll_id id;
  8764. switch (port) {
  8765. case PORT_A:
  8766. id = DPLL_ID_SKL_DPLL0;
  8767. break;
  8768. case PORT_B:
  8769. id = DPLL_ID_SKL_DPLL1;
  8770. break;
  8771. case PORT_C:
  8772. id = DPLL_ID_SKL_DPLL2;
  8773. break;
  8774. default:
  8775. DRM_ERROR("Incorrect port type\n");
  8776. return;
  8777. }
  8778. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8779. }
  8780. static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
  8781. enum port port,
  8782. struct intel_crtc_state *pipe_config)
  8783. {
  8784. enum intel_dpll_id id;
  8785. u32 temp;
  8786. temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
  8787. id = temp >> (port * 3 + 1);
  8788. if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
  8789. return;
  8790. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8791. }
  8792. static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
  8793. enum port port,
  8794. struct intel_crtc_state *pipe_config)
  8795. {
  8796. enum intel_dpll_id id;
  8797. uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
  8798. switch (ddi_pll_sel) {
  8799. case PORT_CLK_SEL_WRPLL1:
  8800. id = DPLL_ID_WRPLL1;
  8801. break;
  8802. case PORT_CLK_SEL_WRPLL2:
  8803. id = DPLL_ID_WRPLL2;
  8804. break;
  8805. case PORT_CLK_SEL_SPLL:
  8806. id = DPLL_ID_SPLL;
  8807. break;
  8808. case PORT_CLK_SEL_LCPLL_810:
  8809. id = DPLL_ID_LCPLL_810;
  8810. break;
  8811. case PORT_CLK_SEL_LCPLL_1350:
  8812. id = DPLL_ID_LCPLL_1350;
  8813. break;
  8814. case PORT_CLK_SEL_LCPLL_2700:
  8815. id = DPLL_ID_LCPLL_2700;
  8816. break;
  8817. default:
  8818. MISSING_CASE(ddi_pll_sel);
  8819. /* fall through */
  8820. case PORT_CLK_SEL_NONE:
  8821. return;
  8822. }
  8823. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8824. }
  8825. static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  8826. struct intel_crtc_state *pipe_config,
  8827. unsigned long *power_domain_mask)
  8828. {
  8829. struct drm_device *dev = crtc->base.dev;
  8830. struct drm_i915_private *dev_priv = to_i915(dev);
  8831. enum intel_display_power_domain power_domain;
  8832. u32 tmp;
  8833. /*
  8834. * The pipe->transcoder mapping is fixed with the exception of the eDP
  8835. * transcoder handled below.
  8836. */
  8837. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  8838. /*
  8839. * XXX: Do intel_display_power_get_if_enabled before reading this (for
  8840. * consistency and less surprising code; it's in always on power).
  8841. */
  8842. tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
  8843. if (tmp & TRANS_DDI_FUNC_ENABLE) {
  8844. enum pipe trans_edp_pipe;
  8845. switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
  8846. default:
  8847. WARN(1, "unknown pipe linked to edp transcoder\n");
  8848. case TRANS_DDI_EDP_INPUT_A_ONOFF:
  8849. case TRANS_DDI_EDP_INPUT_A_ON:
  8850. trans_edp_pipe = PIPE_A;
  8851. break;
  8852. case TRANS_DDI_EDP_INPUT_B_ONOFF:
  8853. trans_edp_pipe = PIPE_B;
  8854. break;
  8855. case TRANS_DDI_EDP_INPUT_C_ONOFF:
  8856. trans_edp_pipe = PIPE_C;
  8857. break;
  8858. }
  8859. if (trans_edp_pipe == crtc->pipe)
  8860. pipe_config->cpu_transcoder = TRANSCODER_EDP;
  8861. }
  8862. power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
  8863. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8864. return false;
  8865. *power_domain_mask |= BIT(power_domain);
  8866. tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
  8867. return tmp & PIPECONF_ENABLE;
  8868. }
  8869. static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  8870. struct intel_crtc_state *pipe_config,
  8871. unsigned long *power_domain_mask)
  8872. {
  8873. struct drm_device *dev = crtc->base.dev;
  8874. struct drm_i915_private *dev_priv = to_i915(dev);
  8875. enum intel_display_power_domain power_domain;
  8876. enum port port;
  8877. enum transcoder cpu_transcoder;
  8878. u32 tmp;
  8879. for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
  8880. if (port == PORT_A)
  8881. cpu_transcoder = TRANSCODER_DSI_A;
  8882. else
  8883. cpu_transcoder = TRANSCODER_DSI_C;
  8884. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  8885. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8886. continue;
  8887. *power_domain_mask |= BIT(power_domain);
  8888. /*
  8889. * The PLL needs to be enabled with a valid divider
  8890. * configuration, otherwise accessing DSI registers will hang
  8891. * the machine. See BSpec North Display Engine
  8892. * registers/MIPI[BXT]. We can break out here early, since we
  8893. * need the same DSI PLL to be enabled for both DSI ports.
  8894. */
  8895. if (!intel_dsi_pll_is_enabled(dev_priv))
  8896. break;
  8897. /* XXX: this works for video mode only */
  8898. tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
  8899. if (!(tmp & DPI_ENABLE))
  8900. continue;
  8901. tmp = I915_READ(MIPI_CTRL(port));
  8902. if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
  8903. continue;
  8904. pipe_config->cpu_transcoder = cpu_transcoder;
  8905. break;
  8906. }
  8907. return transcoder_is_dsi(pipe_config->cpu_transcoder);
  8908. }
  8909. static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  8910. struct intel_crtc_state *pipe_config)
  8911. {
  8912. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8913. struct intel_shared_dpll *pll;
  8914. enum port port;
  8915. uint32_t tmp;
  8916. tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
  8917. port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
  8918. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  8919. skylake_get_ddi_pll(dev_priv, port, pipe_config);
  8920. else if (IS_BROXTON(dev_priv))
  8921. bxt_get_ddi_pll(dev_priv, port, pipe_config);
  8922. else
  8923. haswell_get_ddi_pll(dev_priv, port, pipe_config);
  8924. pll = pipe_config->shared_dpll;
  8925. if (pll) {
  8926. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  8927. &pipe_config->dpll_hw_state));
  8928. }
  8929. /*
  8930. * Haswell has only FDI/PCH transcoder A. It is which is connected to
  8931. * DDI E. So just check whether this pipe is wired to DDI E and whether
  8932. * the PCH transcoder is on.
  8933. */
  8934. if (INTEL_GEN(dev_priv) < 9 &&
  8935. (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
  8936. pipe_config->has_pch_encoder = true;
  8937. tmp = I915_READ(FDI_RX_CTL(PIPE_A));
  8938. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  8939. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  8940. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  8941. }
  8942. }
  8943. static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  8944. struct intel_crtc_state *pipe_config)
  8945. {
  8946. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8947. enum intel_display_power_domain power_domain;
  8948. unsigned long power_domain_mask;
  8949. bool active;
  8950. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  8951. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8952. return false;
  8953. power_domain_mask = BIT(power_domain);
  8954. pipe_config->shared_dpll = NULL;
  8955. active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
  8956. if (IS_BROXTON(dev_priv) &&
  8957. bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
  8958. WARN_ON(active);
  8959. active = true;
  8960. }
  8961. if (!active)
  8962. goto out;
  8963. if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  8964. haswell_get_ddi_port_state(crtc, pipe_config);
  8965. intel_get_pipe_timings(crtc, pipe_config);
  8966. }
  8967. intel_get_pipe_src_size(crtc, pipe_config);
  8968. pipe_config->gamma_mode =
  8969. I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
  8970. if (INTEL_GEN(dev_priv) >= 9) {
  8971. skl_init_scalers(dev_priv, crtc, pipe_config);
  8972. pipe_config->scaler_state.scaler_id = -1;
  8973. pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
  8974. }
  8975. power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
  8976. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  8977. power_domain_mask |= BIT(power_domain);
  8978. if (INTEL_GEN(dev_priv) >= 9)
  8979. skylake_get_pfit_config(crtc, pipe_config);
  8980. else
  8981. ironlake_get_pfit_config(crtc, pipe_config);
  8982. }
  8983. if (IS_HASWELL(dev_priv))
  8984. pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
  8985. (I915_READ(IPS_CTL) & IPS_ENABLE);
  8986. if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
  8987. !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  8988. pipe_config->pixel_multiplier =
  8989. I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
  8990. } else {
  8991. pipe_config->pixel_multiplier = 1;
  8992. }
  8993. out:
  8994. for_each_power_domain(power_domain, power_domain_mask)
  8995. intel_display_power_put(dev_priv, power_domain);
  8996. return active;
  8997. }
  8998. static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
  8999. const struct intel_plane_state *plane_state)
  9000. {
  9001. struct drm_device *dev = crtc->dev;
  9002. struct drm_i915_private *dev_priv = to_i915(dev);
  9003. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9004. uint32_t cntl = 0, size = 0;
  9005. if (plane_state && plane_state->base.visible) {
  9006. unsigned int width = plane_state->base.crtc_w;
  9007. unsigned int height = plane_state->base.crtc_h;
  9008. unsigned int stride = roundup_pow_of_two(width) * 4;
  9009. switch (stride) {
  9010. default:
  9011. WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
  9012. width, stride);
  9013. stride = 256;
  9014. /* fallthrough */
  9015. case 256:
  9016. case 512:
  9017. case 1024:
  9018. case 2048:
  9019. break;
  9020. }
  9021. cntl |= CURSOR_ENABLE |
  9022. CURSOR_GAMMA_ENABLE |
  9023. CURSOR_FORMAT_ARGB |
  9024. CURSOR_STRIDE(stride);
  9025. size = (height << 12) | width;
  9026. }
  9027. if (intel_crtc->cursor_cntl != 0 &&
  9028. (intel_crtc->cursor_base != base ||
  9029. intel_crtc->cursor_size != size ||
  9030. intel_crtc->cursor_cntl != cntl)) {
  9031. /* On these chipsets we can only modify the base/size/stride
  9032. * whilst the cursor is disabled.
  9033. */
  9034. I915_WRITE(CURCNTR(PIPE_A), 0);
  9035. POSTING_READ(CURCNTR(PIPE_A));
  9036. intel_crtc->cursor_cntl = 0;
  9037. }
  9038. if (intel_crtc->cursor_base != base) {
  9039. I915_WRITE(CURBASE(PIPE_A), base);
  9040. intel_crtc->cursor_base = base;
  9041. }
  9042. if (intel_crtc->cursor_size != size) {
  9043. I915_WRITE(CURSIZE, size);
  9044. intel_crtc->cursor_size = size;
  9045. }
  9046. if (intel_crtc->cursor_cntl != cntl) {
  9047. I915_WRITE(CURCNTR(PIPE_A), cntl);
  9048. POSTING_READ(CURCNTR(PIPE_A));
  9049. intel_crtc->cursor_cntl = cntl;
  9050. }
  9051. }
  9052. static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
  9053. const struct intel_plane_state *plane_state)
  9054. {
  9055. struct drm_device *dev = crtc->dev;
  9056. struct drm_i915_private *dev_priv = to_i915(dev);
  9057. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9058. int pipe = intel_crtc->pipe;
  9059. uint32_t cntl = 0;
  9060. if (plane_state && plane_state->base.visible) {
  9061. cntl = MCURSOR_GAMMA_ENABLE;
  9062. switch (plane_state->base.crtc_w) {
  9063. case 64:
  9064. cntl |= CURSOR_MODE_64_ARGB_AX;
  9065. break;
  9066. case 128:
  9067. cntl |= CURSOR_MODE_128_ARGB_AX;
  9068. break;
  9069. case 256:
  9070. cntl |= CURSOR_MODE_256_ARGB_AX;
  9071. break;
  9072. default:
  9073. MISSING_CASE(plane_state->base.crtc_w);
  9074. return;
  9075. }
  9076. cntl |= pipe << 28; /* Connect to correct pipe */
  9077. if (HAS_DDI(dev_priv))
  9078. cntl |= CURSOR_PIPE_CSC_ENABLE;
  9079. if (plane_state->base.rotation & DRM_ROTATE_180)
  9080. cntl |= CURSOR_ROTATE_180;
  9081. }
  9082. if (intel_crtc->cursor_cntl != cntl) {
  9083. I915_WRITE(CURCNTR(pipe), cntl);
  9084. POSTING_READ(CURCNTR(pipe));
  9085. intel_crtc->cursor_cntl = cntl;
  9086. }
  9087. /* and commit changes on next vblank */
  9088. I915_WRITE(CURBASE(pipe), base);
  9089. POSTING_READ(CURBASE(pipe));
  9090. intel_crtc->cursor_base = base;
  9091. }
  9092. /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
  9093. static void intel_crtc_update_cursor(struct drm_crtc *crtc,
  9094. const struct intel_plane_state *plane_state)
  9095. {
  9096. struct drm_device *dev = crtc->dev;
  9097. struct drm_i915_private *dev_priv = to_i915(dev);
  9098. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9099. int pipe = intel_crtc->pipe;
  9100. u32 base = intel_crtc->cursor_addr;
  9101. u32 pos = 0;
  9102. if (plane_state) {
  9103. int x = plane_state->base.crtc_x;
  9104. int y = plane_state->base.crtc_y;
  9105. if (x < 0) {
  9106. pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
  9107. x = -x;
  9108. }
  9109. pos |= x << CURSOR_X_SHIFT;
  9110. if (y < 0) {
  9111. pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
  9112. y = -y;
  9113. }
  9114. pos |= y << CURSOR_Y_SHIFT;
  9115. /* ILK+ do this automagically */
  9116. if (HAS_GMCH_DISPLAY(dev_priv) &&
  9117. plane_state->base.rotation & DRM_ROTATE_180) {
  9118. base += (plane_state->base.crtc_h *
  9119. plane_state->base.crtc_w - 1) * 4;
  9120. }
  9121. }
  9122. I915_WRITE(CURPOS(pipe), pos);
  9123. if (IS_845G(dev_priv) || IS_I865G(dev_priv))
  9124. i845_update_cursor(crtc, base, plane_state);
  9125. else
  9126. i9xx_update_cursor(crtc, base, plane_state);
  9127. }
  9128. static bool cursor_size_ok(struct drm_i915_private *dev_priv,
  9129. uint32_t width, uint32_t height)
  9130. {
  9131. if (width == 0 || height == 0)
  9132. return false;
  9133. /*
  9134. * 845g/865g are special in that they are only limited by
  9135. * the width of their cursors, the height is arbitrary up to
  9136. * the precision of the register. Everything else requires
  9137. * square cursors, limited to a few power-of-two sizes.
  9138. */
  9139. if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
  9140. if ((width & 63) != 0)
  9141. return false;
  9142. if (width > (IS_845G(dev_priv) ? 64 : 512))
  9143. return false;
  9144. if (height > 1023)
  9145. return false;
  9146. } else {
  9147. switch (width | height) {
  9148. case 256:
  9149. case 128:
  9150. if (IS_GEN2(dev_priv))
  9151. return false;
  9152. case 64:
  9153. break;
  9154. default:
  9155. return false;
  9156. }
  9157. }
  9158. return true;
  9159. }
  9160. /* VESA 640x480x72Hz mode to set on the pipe */
  9161. static struct drm_display_mode load_detect_mode = {
  9162. DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
  9163. 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
  9164. };
  9165. struct drm_framebuffer *
  9166. __intel_framebuffer_create(struct drm_device *dev,
  9167. struct drm_mode_fb_cmd2 *mode_cmd,
  9168. struct drm_i915_gem_object *obj)
  9169. {
  9170. struct intel_framebuffer *intel_fb;
  9171. int ret;
  9172. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  9173. if (!intel_fb)
  9174. return ERR_PTR(-ENOMEM);
  9175. ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
  9176. if (ret)
  9177. goto err;
  9178. return &intel_fb->base;
  9179. err:
  9180. kfree(intel_fb);
  9181. return ERR_PTR(ret);
  9182. }
  9183. static struct drm_framebuffer *
  9184. intel_framebuffer_create(struct drm_device *dev,
  9185. struct drm_mode_fb_cmd2 *mode_cmd,
  9186. struct drm_i915_gem_object *obj)
  9187. {
  9188. struct drm_framebuffer *fb;
  9189. int ret;
  9190. ret = i915_mutex_lock_interruptible(dev);
  9191. if (ret)
  9192. return ERR_PTR(ret);
  9193. fb = __intel_framebuffer_create(dev, mode_cmd, obj);
  9194. mutex_unlock(&dev->struct_mutex);
  9195. return fb;
  9196. }
  9197. static u32
  9198. intel_framebuffer_pitch_for_width(int width, int bpp)
  9199. {
  9200. u32 pitch = DIV_ROUND_UP(width * bpp, 8);
  9201. return ALIGN(pitch, 64);
  9202. }
  9203. static u32
  9204. intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
  9205. {
  9206. u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
  9207. return PAGE_ALIGN(pitch * mode->vdisplay);
  9208. }
  9209. static struct drm_framebuffer *
  9210. intel_framebuffer_create_for_mode(struct drm_device *dev,
  9211. struct drm_display_mode *mode,
  9212. int depth, int bpp)
  9213. {
  9214. struct drm_framebuffer *fb;
  9215. struct drm_i915_gem_object *obj;
  9216. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  9217. obj = i915_gem_object_create(to_i915(dev),
  9218. intel_framebuffer_size_for_mode(mode, bpp));
  9219. if (IS_ERR(obj))
  9220. return ERR_CAST(obj);
  9221. mode_cmd.width = mode->hdisplay;
  9222. mode_cmd.height = mode->vdisplay;
  9223. mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
  9224. bpp);
  9225. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  9226. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  9227. if (IS_ERR(fb))
  9228. i915_gem_object_put(obj);
  9229. return fb;
  9230. }
  9231. static struct drm_framebuffer *
  9232. mode_fits_in_fbdev(struct drm_device *dev,
  9233. struct drm_display_mode *mode)
  9234. {
  9235. #ifdef CONFIG_DRM_FBDEV_EMULATION
  9236. struct drm_i915_private *dev_priv = to_i915(dev);
  9237. struct drm_i915_gem_object *obj;
  9238. struct drm_framebuffer *fb;
  9239. if (!dev_priv->fbdev)
  9240. return NULL;
  9241. if (!dev_priv->fbdev->fb)
  9242. return NULL;
  9243. obj = dev_priv->fbdev->fb->obj;
  9244. BUG_ON(!obj);
  9245. fb = &dev_priv->fbdev->fb->base;
  9246. if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
  9247. fb->bits_per_pixel))
  9248. return NULL;
  9249. if (obj->base.size < mode->vdisplay * fb->pitches[0])
  9250. return NULL;
  9251. drm_framebuffer_reference(fb);
  9252. return fb;
  9253. #else
  9254. return NULL;
  9255. #endif
  9256. }
  9257. static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
  9258. struct drm_crtc *crtc,
  9259. struct drm_display_mode *mode,
  9260. struct drm_framebuffer *fb,
  9261. int x, int y)
  9262. {
  9263. struct drm_plane_state *plane_state;
  9264. int hdisplay, vdisplay;
  9265. int ret;
  9266. plane_state = drm_atomic_get_plane_state(state, crtc->primary);
  9267. if (IS_ERR(plane_state))
  9268. return PTR_ERR(plane_state);
  9269. if (mode)
  9270. drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
  9271. else
  9272. hdisplay = vdisplay = 0;
  9273. ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
  9274. if (ret)
  9275. return ret;
  9276. drm_atomic_set_fb_for_plane(plane_state, fb);
  9277. plane_state->crtc_x = 0;
  9278. plane_state->crtc_y = 0;
  9279. plane_state->crtc_w = hdisplay;
  9280. plane_state->crtc_h = vdisplay;
  9281. plane_state->src_x = x << 16;
  9282. plane_state->src_y = y << 16;
  9283. plane_state->src_w = hdisplay << 16;
  9284. plane_state->src_h = vdisplay << 16;
  9285. return 0;
  9286. }
  9287. bool intel_get_load_detect_pipe(struct drm_connector *connector,
  9288. struct drm_display_mode *mode,
  9289. struct intel_load_detect_pipe *old,
  9290. struct drm_modeset_acquire_ctx *ctx)
  9291. {
  9292. struct intel_crtc *intel_crtc;
  9293. struct intel_encoder *intel_encoder =
  9294. intel_attached_encoder(connector);
  9295. struct drm_crtc *possible_crtc;
  9296. struct drm_encoder *encoder = &intel_encoder->base;
  9297. struct drm_crtc *crtc = NULL;
  9298. struct drm_device *dev = encoder->dev;
  9299. struct drm_i915_private *dev_priv = to_i915(dev);
  9300. struct drm_framebuffer *fb;
  9301. struct drm_mode_config *config = &dev->mode_config;
  9302. struct drm_atomic_state *state = NULL, *restore_state = NULL;
  9303. struct drm_connector_state *connector_state;
  9304. struct intel_crtc_state *crtc_state;
  9305. int ret, i = -1;
  9306. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  9307. connector->base.id, connector->name,
  9308. encoder->base.id, encoder->name);
  9309. old->restore_state = NULL;
  9310. retry:
  9311. ret = drm_modeset_lock(&config->connection_mutex, ctx);
  9312. if (ret)
  9313. goto fail;
  9314. /*
  9315. * Algorithm gets a little messy:
  9316. *
  9317. * - if the connector already has an assigned crtc, use it (but make
  9318. * sure it's on first)
  9319. *
  9320. * - try to find the first unused crtc that can drive this connector,
  9321. * and use that if we find one
  9322. */
  9323. /* See if we already have a CRTC for this connector */
  9324. if (connector->state->crtc) {
  9325. crtc = connector->state->crtc;
  9326. ret = drm_modeset_lock(&crtc->mutex, ctx);
  9327. if (ret)
  9328. goto fail;
  9329. /* Make sure the crtc and connector are running */
  9330. goto found;
  9331. }
  9332. /* Find an unused one (if possible) */
  9333. for_each_crtc(dev, possible_crtc) {
  9334. i++;
  9335. if (!(encoder->possible_crtcs & (1 << i)))
  9336. continue;
  9337. ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
  9338. if (ret)
  9339. goto fail;
  9340. if (possible_crtc->state->enable) {
  9341. drm_modeset_unlock(&possible_crtc->mutex);
  9342. continue;
  9343. }
  9344. crtc = possible_crtc;
  9345. break;
  9346. }
  9347. /*
  9348. * If we didn't find an unused CRTC, don't use any.
  9349. */
  9350. if (!crtc) {
  9351. DRM_DEBUG_KMS("no pipe available for load-detect\n");
  9352. goto fail;
  9353. }
  9354. found:
  9355. intel_crtc = to_intel_crtc(crtc);
  9356. ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
  9357. if (ret)
  9358. goto fail;
  9359. state = drm_atomic_state_alloc(dev);
  9360. restore_state = drm_atomic_state_alloc(dev);
  9361. if (!state || !restore_state) {
  9362. ret = -ENOMEM;
  9363. goto fail;
  9364. }
  9365. state->acquire_ctx = ctx;
  9366. restore_state->acquire_ctx = ctx;
  9367. connector_state = drm_atomic_get_connector_state(state, connector);
  9368. if (IS_ERR(connector_state)) {
  9369. ret = PTR_ERR(connector_state);
  9370. goto fail;
  9371. }
  9372. ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
  9373. if (ret)
  9374. goto fail;
  9375. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  9376. if (IS_ERR(crtc_state)) {
  9377. ret = PTR_ERR(crtc_state);
  9378. goto fail;
  9379. }
  9380. crtc_state->base.active = crtc_state->base.enable = true;
  9381. if (!mode)
  9382. mode = &load_detect_mode;
  9383. /* We need a framebuffer large enough to accommodate all accesses
  9384. * that the plane may generate whilst we perform load detection.
  9385. * We can not rely on the fbcon either being present (we get called
  9386. * during its initialisation to detect all boot displays, or it may
  9387. * not even exist) or that it is large enough to satisfy the
  9388. * requested mode.
  9389. */
  9390. fb = mode_fits_in_fbdev(dev, mode);
  9391. if (fb == NULL) {
  9392. DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
  9393. fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
  9394. } else
  9395. DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
  9396. if (IS_ERR(fb)) {
  9397. DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
  9398. goto fail;
  9399. }
  9400. ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
  9401. if (ret)
  9402. goto fail;
  9403. drm_framebuffer_unreference(fb);
  9404. ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
  9405. if (ret)
  9406. goto fail;
  9407. ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
  9408. if (!ret)
  9409. ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
  9410. if (!ret)
  9411. ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
  9412. if (ret) {
  9413. DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
  9414. goto fail;
  9415. }
  9416. ret = drm_atomic_commit(state);
  9417. if (ret) {
  9418. DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
  9419. goto fail;
  9420. }
  9421. old->restore_state = restore_state;
  9422. /* let the connector get through one full cycle before testing */
  9423. intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
  9424. return true;
  9425. fail:
  9426. if (state) {
  9427. drm_atomic_state_put(state);
  9428. state = NULL;
  9429. }
  9430. if (restore_state) {
  9431. drm_atomic_state_put(restore_state);
  9432. restore_state = NULL;
  9433. }
  9434. if (ret == -EDEADLK) {
  9435. drm_modeset_backoff(ctx);
  9436. goto retry;
  9437. }
  9438. return false;
  9439. }
  9440. void intel_release_load_detect_pipe(struct drm_connector *connector,
  9441. struct intel_load_detect_pipe *old,
  9442. struct drm_modeset_acquire_ctx *ctx)
  9443. {
  9444. struct intel_encoder *intel_encoder =
  9445. intel_attached_encoder(connector);
  9446. struct drm_encoder *encoder = &intel_encoder->base;
  9447. struct drm_atomic_state *state = old->restore_state;
  9448. int ret;
  9449. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  9450. connector->base.id, connector->name,
  9451. encoder->base.id, encoder->name);
  9452. if (!state)
  9453. return;
  9454. ret = drm_atomic_commit(state);
  9455. if (ret)
  9456. DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
  9457. drm_atomic_state_put(state);
  9458. }
  9459. static int i9xx_pll_refclk(struct drm_device *dev,
  9460. const struct intel_crtc_state *pipe_config)
  9461. {
  9462. struct drm_i915_private *dev_priv = to_i915(dev);
  9463. u32 dpll = pipe_config->dpll_hw_state.dpll;
  9464. if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
  9465. return dev_priv->vbt.lvds_ssc_freq;
  9466. else if (HAS_PCH_SPLIT(dev_priv))
  9467. return 120000;
  9468. else if (!IS_GEN2(dev_priv))
  9469. return 96000;
  9470. else
  9471. return 48000;
  9472. }
  9473. /* Returns the clock of the currently programmed mode of the given pipe. */
  9474. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  9475. struct intel_crtc_state *pipe_config)
  9476. {
  9477. struct drm_device *dev = crtc->base.dev;
  9478. struct drm_i915_private *dev_priv = to_i915(dev);
  9479. int pipe = pipe_config->cpu_transcoder;
  9480. u32 dpll = pipe_config->dpll_hw_state.dpll;
  9481. u32 fp;
  9482. struct dpll clock;
  9483. int port_clock;
  9484. int refclk = i9xx_pll_refclk(dev, pipe_config);
  9485. if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
  9486. fp = pipe_config->dpll_hw_state.fp0;
  9487. else
  9488. fp = pipe_config->dpll_hw_state.fp1;
  9489. clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
  9490. if (IS_PINEVIEW(dev_priv)) {
  9491. clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
  9492. clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
  9493. } else {
  9494. clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
  9495. clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
  9496. }
  9497. if (!IS_GEN2(dev_priv)) {
  9498. if (IS_PINEVIEW(dev_priv))
  9499. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
  9500. DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
  9501. else
  9502. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
  9503. DPLL_FPA01_P1_POST_DIV_SHIFT);
  9504. switch (dpll & DPLL_MODE_MASK) {
  9505. case DPLLB_MODE_DAC_SERIAL:
  9506. clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
  9507. 5 : 10;
  9508. break;
  9509. case DPLLB_MODE_LVDS:
  9510. clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
  9511. 7 : 14;
  9512. break;
  9513. default:
  9514. DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
  9515. "mode\n", (int)(dpll & DPLL_MODE_MASK));
  9516. return;
  9517. }
  9518. if (IS_PINEVIEW(dev_priv))
  9519. port_clock = pnv_calc_dpll_params(refclk, &clock);
  9520. else
  9521. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  9522. } else {
  9523. u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
  9524. bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
  9525. if (is_lvds) {
  9526. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
  9527. DPLL_FPA01_P1_POST_DIV_SHIFT);
  9528. if (lvds & LVDS_CLKB_POWER_UP)
  9529. clock.p2 = 7;
  9530. else
  9531. clock.p2 = 14;
  9532. } else {
  9533. if (dpll & PLL_P1_DIVIDE_BY_TWO)
  9534. clock.p1 = 2;
  9535. else {
  9536. clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
  9537. DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
  9538. }
  9539. if (dpll & PLL_P2_DIVIDE_BY_4)
  9540. clock.p2 = 4;
  9541. else
  9542. clock.p2 = 2;
  9543. }
  9544. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  9545. }
  9546. /*
  9547. * This value includes pixel_multiplier. We will use
  9548. * port_clock to compute adjusted_mode.crtc_clock in the
  9549. * encoder's get_config() function.
  9550. */
  9551. pipe_config->port_clock = port_clock;
  9552. }
  9553. int intel_dotclock_calculate(int link_freq,
  9554. const struct intel_link_m_n *m_n)
  9555. {
  9556. /*
  9557. * The calculation for the data clock is:
  9558. * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
  9559. * But we want to avoid losing precison if possible, so:
  9560. * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
  9561. *
  9562. * and the link clock is simpler:
  9563. * link_clock = (m * link_clock) / n
  9564. */
  9565. if (!m_n->link_n)
  9566. return 0;
  9567. return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
  9568. }
  9569. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  9570. struct intel_crtc_state *pipe_config)
  9571. {
  9572. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  9573. /* read out port_clock from the DPLL */
  9574. i9xx_crtc_clock_get(crtc, pipe_config);
  9575. /*
  9576. * In case there is an active pipe without active ports,
  9577. * we may need some idea for the dotclock anyway.
  9578. * Calculate one based on the FDI configuration.
  9579. */
  9580. pipe_config->base.adjusted_mode.crtc_clock =
  9581. intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  9582. &pipe_config->fdi_m_n);
  9583. }
  9584. /** Returns the currently programmed mode of the given pipe. */
  9585. struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
  9586. struct drm_crtc *crtc)
  9587. {
  9588. struct drm_i915_private *dev_priv = to_i915(dev);
  9589. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9590. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  9591. struct drm_display_mode *mode;
  9592. struct intel_crtc_state *pipe_config;
  9593. int htot = I915_READ(HTOTAL(cpu_transcoder));
  9594. int hsync = I915_READ(HSYNC(cpu_transcoder));
  9595. int vtot = I915_READ(VTOTAL(cpu_transcoder));
  9596. int vsync = I915_READ(VSYNC(cpu_transcoder));
  9597. enum pipe pipe = intel_crtc->pipe;
  9598. mode = kzalloc(sizeof(*mode), GFP_KERNEL);
  9599. if (!mode)
  9600. return NULL;
  9601. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  9602. if (!pipe_config) {
  9603. kfree(mode);
  9604. return NULL;
  9605. }
  9606. /*
  9607. * Construct a pipe_config sufficient for getting the clock info
  9608. * back out of crtc_clock_get.
  9609. *
  9610. * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
  9611. * to use a real value here instead.
  9612. */
  9613. pipe_config->cpu_transcoder = (enum transcoder) pipe;
  9614. pipe_config->pixel_multiplier = 1;
  9615. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
  9616. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
  9617. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
  9618. i9xx_crtc_clock_get(intel_crtc, pipe_config);
  9619. mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
  9620. mode->hdisplay = (htot & 0xffff) + 1;
  9621. mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
  9622. mode->hsync_start = (hsync & 0xffff) + 1;
  9623. mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
  9624. mode->vdisplay = (vtot & 0xffff) + 1;
  9625. mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
  9626. mode->vsync_start = (vsync & 0xffff) + 1;
  9627. mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
  9628. drm_mode_set_name(mode);
  9629. kfree(pipe_config);
  9630. return mode;
  9631. }
  9632. static void intel_crtc_destroy(struct drm_crtc *crtc)
  9633. {
  9634. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9635. struct drm_device *dev = crtc->dev;
  9636. struct intel_flip_work *work;
  9637. spin_lock_irq(&dev->event_lock);
  9638. work = intel_crtc->flip_work;
  9639. intel_crtc->flip_work = NULL;
  9640. spin_unlock_irq(&dev->event_lock);
  9641. if (work) {
  9642. cancel_work_sync(&work->mmio_work);
  9643. cancel_work_sync(&work->unpin_work);
  9644. kfree(work);
  9645. }
  9646. drm_crtc_cleanup(crtc);
  9647. kfree(intel_crtc);
  9648. }
  9649. static void intel_unpin_work_fn(struct work_struct *__work)
  9650. {
  9651. struct intel_flip_work *work =
  9652. container_of(__work, struct intel_flip_work, unpin_work);
  9653. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  9654. struct drm_device *dev = crtc->base.dev;
  9655. struct drm_plane *primary = crtc->base.primary;
  9656. if (is_mmio_work(work))
  9657. flush_work(&work->mmio_work);
  9658. mutex_lock(&dev->struct_mutex);
  9659. intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
  9660. i915_gem_object_put(work->pending_flip_obj);
  9661. mutex_unlock(&dev->struct_mutex);
  9662. i915_gem_request_put(work->flip_queued_req);
  9663. intel_frontbuffer_flip_complete(to_i915(dev),
  9664. to_intel_plane(primary)->frontbuffer_bit);
  9665. intel_fbc_post_update(crtc);
  9666. drm_framebuffer_unreference(work->old_fb);
  9667. BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
  9668. atomic_dec(&crtc->unpin_work_count);
  9669. kfree(work);
  9670. }
  9671. /* Is 'a' after or equal to 'b'? */
  9672. static bool g4x_flip_count_after_eq(u32 a, u32 b)
  9673. {
  9674. return !((a - b) & 0x80000000);
  9675. }
  9676. static bool __pageflip_finished_cs(struct intel_crtc *crtc,
  9677. struct intel_flip_work *work)
  9678. {
  9679. struct drm_device *dev = crtc->base.dev;
  9680. struct drm_i915_private *dev_priv = to_i915(dev);
  9681. if (abort_flip_on_reset(crtc))
  9682. return true;
  9683. /*
  9684. * The relevant registers doen't exist on pre-ctg.
  9685. * As the flip done interrupt doesn't trigger for mmio
  9686. * flips on gmch platforms, a flip count check isn't
  9687. * really needed there. But since ctg has the registers,
  9688. * include it in the check anyway.
  9689. */
  9690. if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
  9691. return true;
  9692. /*
  9693. * BDW signals flip done immediately if the plane
  9694. * is disabled, even if the plane enable is already
  9695. * armed to occur at the next vblank :(
  9696. */
  9697. /*
  9698. * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
  9699. * used the same base address. In that case the mmio flip might
  9700. * have completed, but the CS hasn't even executed the flip yet.
  9701. *
  9702. * A flip count check isn't enough as the CS might have updated
  9703. * the base address just after start of vblank, but before we
  9704. * managed to process the interrupt. This means we'd complete the
  9705. * CS flip too soon.
  9706. *
  9707. * Combining both checks should get us a good enough result. It may
  9708. * still happen that the CS flip has been executed, but has not
  9709. * yet actually completed. But in case the base address is the same
  9710. * anyway, we don't really care.
  9711. */
  9712. return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
  9713. crtc->flip_work->gtt_offset &&
  9714. g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
  9715. crtc->flip_work->flip_count);
  9716. }
  9717. static bool
  9718. __pageflip_finished_mmio(struct intel_crtc *crtc,
  9719. struct intel_flip_work *work)
  9720. {
  9721. /*
  9722. * MMIO work completes when vblank is different from
  9723. * flip_queued_vblank.
  9724. *
  9725. * Reset counter value doesn't matter, this is handled by
  9726. * i915_wait_request finishing early, so no need to handle
  9727. * reset here.
  9728. */
  9729. return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
  9730. }
  9731. static bool pageflip_finished(struct intel_crtc *crtc,
  9732. struct intel_flip_work *work)
  9733. {
  9734. if (!atomic_read(&work->pending))
  9735. return false;
  9736. smp_rmb();
  9737. if (is_mmio_work(work))
  9738. return __pageflip_finished_mmio(crtc, work);
  9739. else
  9740. return __pageflip_finished_cs(crtc, work);
  9741. }
  9742. void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
  9743. {
  9744. struct drm_device *dev = &dev_priv->drm;
  9745. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  9746. struct intel_flip_work *work;
  9747. unsigned long flags;
  9748. /* Ignore early vblank irqs */
  9749. if (!crtc)
  9750. return;
  9751. /*
  9752. * This is called both by irq handlers and the reset code (to complete
  9753. * lost pageflips) so needs the full irqsave spinlocks.
  9754. */
  9755. spin_lock_irqsave(&dev->event_lock, flags);
  9756. work = crtc->flip_work;
  9757. if (work != NULL &&
  9758. !is_mmio_work(work) &&
  9759. pageflip_finished(crtc, work))
  9760. page_flip_completed(crtc);
  9761. spin_unlock_irqrestore(&dev->event_lock, flags);
  9762. }
  9763. void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
  9764. {
  9765. struct drm_device *dev = &dev_priv->drm;
  9766. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  9767. struct intel_flip_work *work;
  9768. unsigned long flags;
  9769. /* Ignore early vblank irqs */
  9770. if (!crtc)
  9771. return;
  9772. /*
  9773. * This is called both by irq handlers and the reset code (to complete
  9774. * lost pageflips) so needs the full irqsave spinlocks.
  9775. */
  9776. spin_lock_irqsave(&dev->event_lock, flags);
  9777. work = crtc->flip_work;
  9778. if (work != NULL &&
  9779. is_mmio_work(work) &&
  9780. pageflip_finished(crtc, work))
  9781. page_flip_completed(crtc);
  9782. spin_unlock_irqrestore(&dev->event_lock, flags);
  9783. }
  9784. static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
  9785. struct intel_flip_work *work)
  9786. {
  9787. work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
  9788. /* Ensure that the work item is consistent when activating it ... */
  9789. smp_mb__before_atomic();
  9790. atomic_set(&work->pending, 1);
  9791. }
  9792. static int intel_gen2_queue_flip(struct drm_device *dev,
  9793. struct drm_crtc *crtc,
  9794. struct drm_framebuffer *fb,
  9795. struct drm_i915_gem_object *obj,
  9796. struct drm_i915_gem_request *req,
  9797. uint32_t flags)
  9798. {
  9799. struct intel_ring *ring = req->ring;
  9800. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9801. u32 flip_mask;
  9802. int ret;
  9803. ret = intel_ring_begin(req, 6);
  9804. if (ret)
  9805. return ret;
  9806. /* Can't queue multiple flips, so wait for the previous
  9807. * one to finish before executing the next.
  9808. */
  9809. if (intel_crtc->plane)
  9810. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9811. else
  9812. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9813. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  9814. intel_ring_emit(ring, MI_NOOP);
  9815. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9816. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9817. intel_ring_emit(ring, fb->pitches[0]);
  9818. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9819. intel_ring_emit(ring, 0); /* aux display base address, unused */
  9820. return 0;
  9821. }
  9822. static int intel_gen3_queue_flip(struct drm_device *dev,
  9823. struct drm_crtc *crtc,
  9824. struct drm_framebuffer *fb,
  9825. struct drm_i915_gem_object *obj,
  9826. struct drm_i915_gem_request *req,
  9827. uint32_t flags)
  9828. {
  9829. struct intel_ring *ring = req->ring;
  9830. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9831. u32 flip_mask;
  9832. int ret;
  9833. ret = intel_ring_begin(req, 6);
  9834. if (ret)
  9835. return ret;
  9836. if (intel_crtc->plane)
  9837. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9838. else
  9839. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9840. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  9841. intel_ring_emit(ring, MI_NOOP);
  9842. intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
  9843. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9844. intel_ring_emit(ring, fb->pitches[0]);
  9845. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9846. intel_ring_emit(ring, MI_NOOP);
  9847. return 0;
  9848. }
  9849. static int intel_gen4_queue_flip(struct drm_device *dev,
  9850. struct drm_crtc *crtc,
  9851. struct drm_framebuffer *fb,
  9852. struct drm_i915_gem_object *obj,
  9853. struct drm_i915_gem_request *req,
  9854. uint32_t flags)
  9855. {
  9856. struct intel_ring *ring = req->ring;
  9857. struct drm_i915_private *dev_priv = to_i915(dev);
  9858. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9859. uint32_t pf, pipesrc;
  9860. int ret;
  9861. ret = intel_ring_begin(req, 4);
  9862. if (ret)
  9863. return ret;
  9864. /* i965+ uses the linear or tiled offsets from the
  9865. * Display Registers (which do not change across a page-flip)
  9866. * so we need only reprogram the base address.
  9867. */
  9868. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9869. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9870. intel_ring_emit(ring, fb->pitches[0]);
  9871. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
  9872. intel_fb_modifier_to_tiling(fb->modifier[0]));
  9873. /* XXX Enabling the panel-fitter across page-flip is so far
  9874. * untested on non-native modes, so ignore it for now.
  9875. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
  9876. */
  9877. pf = 0;
  9878. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9879. intel_ring_emit(ring, pf | pipesrc);
  9880. return 0;
  9881. }
  9882. static int intel_gen6_queue_flip(struct drm_device *dev,
  9883. struct drm_crtc *crtc,
  9884. struct drm_framebuffer *fb,
  9885. struct drm_i915_gem_object *obj,
  9886. struct drm_i915_gem_request *req,
  9887. uint32_t flags)
  9888. {
  9889. struct intel_ring *ring = req->ring;
  9890. struct drm_i915_private *dev_priv = to_i915(dev);
  9891. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9892. uint32_t pf, pipesrc;
  9893. int ret;
  9894. ret = intel_ring_begin(req, 4);
  9895. if (ret)
  9896. return ret;
  9897. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9898. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9899. intel_ring_emit(ring, fb->pitches[0] |
  9900. intel_fb_modifier_to_tiling(fb->modifier[0]));
  9901. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9902. /* Contrary to the suggestions in the documentation,
  9903. * "Enable Panel Fitter" does not seem to be required when page
  9904. * flipping with a non-native mode, and worse causes a normal
  9905. * modeset to fail.
  9906. * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
  9907. */
  9908. pf = 0;
  9909. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9910. intel_ring_emit(ring, pf | pipesrc);
  9911. return 0;
  9912. }
  9913. static int intel_gen7_queue_flip(struct drm_device *dev,
  9914. struct drm_crtc *crtc,
  9915. struct drm_framebuffer *fb,
  9916. struct drm_i915_gem_object *obj,
  9917. struct drm_i915_gem_request *req,
  9918. uint32_t flags)
  9919. {
  9920. struct drm_i915_private *dev_priv = to_i915(dev);
  9921. struct intel_ring *ring = req->ring;
  9922. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9923. uint32_t plane_bit = 0;
  9924. int len, ret;
  9925. switch (intel_crtc->plane) {
  9926. case PLANE_A:
  9927. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
  9928. break;
  9929. case PLANE_B:
  9930. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
  9931. break;
  9932. case PLANE_C:
  9933. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
  9934. break;
  9935. default:
  9936. WARN_ONCE(1, "unknown plane in flip command\n");
  9937. return -ENODEV;
  9938. }
  9939. len = 4;
  9940. if (req->engine->id == RCS) {
  9941. len += 6;
  9942. /*
  9943. * On Gen 8, SRM is now taking an extra dword to accommodate
  9944. * 48bits addresses, and we need a NOOP for the batch size to
  9945. * stay even.
  9946. */
  9947. if (IS_GEN8(dev_priv))
  9948. len += 2;
  9949. }
  9950. /*
  9951. * BSpec MI_DISPLAY_FLIP for IVB:
  9952. * "The full packet must be contained within the same cache line."
  9953. *
  9954. * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
  9955. * cacheline, if we ever start emitting more commands before
  9956. * the MI_DISPLAY_FLIP we may need to first emit everything else,
  9957. * then do the cacheline alignment, and finally emit the
  9958. * MI_DISPLAY_FLIP.
  9959. */
  9960. ret = intel_ring_cacheline_align(req);
  9961. if (ret)
  9962. return ret;
  9963. ret = intel_ring_begin(req, len);
  9964. if (ret)
  9965. return ret;
  9966. /* Unmask the flip-done completion message. Note that the bspec says that
  9967. * we should do this for both the BCS and RCS, and that we must not unmask
  9968. * more than one flip event at any time (or ensure that one flip message
  9969. * can be sent by waiting for flip-done prior to queueing new flips).
  9970. * Experimentation says that BCS works despite DERRMR masking all
  9971. * flip-done completion events and that unmasking all planes at once
  9972. * for the RCS also doesn't appear to drop events. Setting the DERRMR
  9973. * to zero does lead to lockups within MI_DISPLAY_FLIP.
  9974. */
  9975. if (req->engine->id == RCS) {
  9976. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  9977. intel_ring_emit_reg(ring, DERRMR);
  9978. intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
  9979. DERRMR_PIPEB_PRI_FLIP_DONE |
  9980. DERRMR_PIPEC_PRI_FLIP_DONE));
  9981. if (IS_GEN8(dev_priv))
  9982. intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
  9983. MI_SRM_LRM_GLOBAL_GTT);
  9984. else
  9985. intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
  9986. MI_SRM_LRM_GLOBAL_GTT);
  9987. intel_ring_emit_reg(ring, DERRMR);
  9988. intel_ring_emit(ring,
  9989. i915_ggtt_offset(req->engine->scratch) + 256);
  9990. if (IS_GEN8(dev_priv)) {
  9991. intel_ring_emit(ring, 0);
  9992. intel_ring_emit(ring, MI_NOOP);
  9993. }
  9994. }
  9995. intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
  9996. intel_ring_emit(ring, fb->pitches[0] |
  9997. intel_fb_modifier_to_tiling(fb->modifier[0]));
  9998. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9999. intel_ring_emit(ring, (MI_NOOP));
  10000. return 0;
  10001. }
  10002. static bool use_mmio_flip(struct intel_engine_cs *engine,
  10003. struct drm_i915_gem_object *obj)
  10004. {
  10005. /*
  10006. * This is not being used for older platforms, because
  10007. * non-availability of flip done interrupt forces us to use
  10008. * CS flips. Older platforms derive flip done using some clever
  10009. * tricks involving the flip_pending status bits and vblank irqs.
  10010. * So using MMIO flips there would disrupt this mechanism.
  10011. */
  10012. if (engine == NULL)
  10013. return true;
  10014. if (INTEL_GEN(engine->i915) < 5)
  10015. return false;
  10016. if (i915.use_mmio_flip < 0)
  10017. return false;
  10018. else if (i915.use_mmio_flip > 0)
  10019. return true;
  10020. else if (i915.enable_execlists)
  10021. return true;
  10022. return engine != i915_gem_object_last_write_engine(obj);
  10023. }
  10024. static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
  10025. unsigned int rotation,
  10026. struct intel_flip_work *work)
  10027. {
  10028. struct drm_device *dev = intel_crtc->base.dev;
  10029. struct drm_i915_private *dev_priv = to_i915(dev);
  10030. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  10031. const enum pipe pipe = intel_crtc->pipe;
  10032. u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
  10033. ctl = I915_READ(PLANE_CTL(pipe, 0));
  10034. ctl &= ~PLANE_CTL_TILED_MASK;
  10035. switch (fb->modifier[0]) {
  10036. case DRM_FORMAT_MOD_NONE:
  10037. break;
  10038. case I915_FORMAT_MOD_X_TILED:
  10039. ctl |= PLANE_CTL_TILED_X;
  10040. break;
  10041. case I915_FORMAT_MOD_Y_TILED:
  10042. ctl |= PLANE_CTL_TILED_Y;
  10043. break;
  10044. case I915_FORMAT_MOD_Yf_TILED:
  10045. ctl |= PLANE_CTL_TILED_YF;
  10046. break;
  10047. default:
  10048. MISSING_CASE(fb->modifier[0]);
  10049. }
  10050. /*
  10051. * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
  10052. * PLANE_SURF updates, the update is then guaranteed to be atomic.
  10053. */
  10054. I915_WRITE(PLANE_CTL(pipe, 0), ctl);
  10055. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  10056. I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
  10057. POSTING_READ(PLANE_SURF(pipe, 0));
  10058. }
  10059. static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
  10060. struct intel_flip_work *work)
  10061. {
  10062. struct drm_device *dev = intel_crtc->base.dev;
  10063. struct drm_i915_private *dev_priv = to_i915(dev);
  10064. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  10065. i915_reg_t reg = DSPCNTR(intel_crtc->plane);
  10066. u32 dspcntr;
  10067. dspcntr = I915_READ(reg);
  10068. if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
  10069. dspcntr |= DISPPLANE_TILED;
  10070. else
  10071. dspcntr &= ~DISPPLANE_TILED;
  10072. I915_WRITE(reg, dspcntr);
  10073. I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
  10074. POSTING_READ(DSPSURF(intel_crtc->plane));
  10075. }
  10076. static void intel_mmio_flip_work_func(struct work_struct *w)
  10077. {
  10078. struct intel_flip_work *work =
  10079. container_of(w, struct intel_flip_work, mmio_work);
  10080. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  10081. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  10082. struct intel_framebuffer *intel_fb =
  10083. to_intel_framebuffer(crtc->base.primary->fb);
  10084. struct drm_i915_gem_object *obj = intel_fb->obj;
  10085. WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
  10086. intel_pipe_update_start(crtc);
  10087. if (INTEL_GEN(dev_priv) >= 9)
  10088. skl_do_mmio_flip(crtc, work->rotation, work);
  10089. else
  10090. /* use_mmio_flip() retricts MMIO flips to ilk+ */
  10091. ilk_do_mmio_flip(crtc, work);
  10092. intel_pipe_update_end(crtc, work);
  10093. }
  10094. static int intel_default_queue_flip(struct drm_device *dev,
  10095. struct drm_crtc *crtc,
  10096. struct drm_framebuffer *fb,
  10097. struct drm_i915_gem_object *obj,
  10098. struct drm_i915_gem_request *req,
  10099. uint32_t flags)
  10100. {
  10101. return -ENODEV;
  10102. }
  10103. static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
  10104. struct intel_crtc *intel_crtc,
  10105. struct intel_flip_work *work)
  10106. {
  10107. u32 addr, vblank;
  10108. if (!atomic_read(&work->pending))
  10109. return false;
  10110. smp_rmb();
  10111. vblank = intel_crtc_get_vblank_counter(intel_crtc);
  10112. if (work->flip_ready_vblank == 0) {
  10113. if (work->flip_queued_req &&
  10114. !i915_gem_request_completed(work->flip_queued_req))
  10115. return false;
  10116. work->flip_ready_vblank = vblank;
  10117. }
  10118. if (vblank - work->flip_ready_vblank < 3)
  10119. return false;
  10120. /* Potential stall - if we see that the flip has happened,
  10121. * assume a missed interrupt. */
  10122. if (INTEL_GEN(dev_priv) >= 4)
  10123. addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
  10124. else
  10125. addr = I915_READ(DSPADDR(intel_crtc->plane));
  10126. /* There is a potential issue here with a false positive after a flip
  10127. * to the same address. We could address this by checking for a
  10128. * non-incrementing frame counter.
  10129. */
  10130. return addr == work->gtt_offset;
  10131. }
  10132. void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
  10133. {
  10134. struct drm_device *dev = &dev_priv->drm;
  10135. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  10136. struct intel_flip_work *work;
  10137. WARN_ON(!in_interrupt());
  10138. if (crtc == NULL)
  10139. return;
  10140. spin_lock(&dev->event_lock);
  10141. work = crtc->flip_work;
  10142. if (work != NULL && !is_mmio_work(work) &&
  10143. __pageflip_stall_check_cs(dev_priv, crtc, work)) {
  10144. WARN_ONCE(1,
  10145. "Kicking stuck page flip: queued at %d, now %d\n",
  10146. work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
  10147. page_flip_completed(crtc);
  10148. work = NULL;
  10149. }
  10150. if (work != NULL && !is_mmio_work(work) &&
  10151. intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
  10152. intel_queue_rps_boost_for_request(work->flip_queued_req);
  10153. spin_unlock(&dev->event_lock);
  10154. }
  10155. static int intel_crtc_page_flip(struct drm_crtc *crtc,
  10156. struct drm_framebuffer *fb,
  10157. struct drm_pending_vblank_event *event,
  10158. uint32_t page_flip_flags)
  10159. {
  10160. struct drm_device *dev = crtc->dev;
  10161. struct drm_i915_private *dev_priv = to_i915(dev);
  10162. struct drm_framebuffer *old_fb = crtc->primary->fb;
  10163. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  10164. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10165. struct drm_plane *primary = crtc->primary;
  10166. enum pipe pipe = intel_crtc->pipe;
  10167. struct intel_flip_work *work;
  10168. struct intel_engine_cs *engine;
  10169. bool mmio_flip;
  10170. struct drm_i915_gem_request *request;
  10171. struct i915_vma *vma;
  10172. int ret;
  10173. /*
  10174. * drm_mode_page_flip_ioctl() should already catch this, but double
  10175. * check to be safe. In the future we may enable pageflipping from
  10176. * a disabled primary plane.
  10177. */
  10178. if (WARN_ON(intel_fb_obj(old_fb) == NULL))
  10179. return -EBUSY;
  10180. /* Can't change pixel format via MI display flips. */
  10181. if (fb->pixel_format != crtc->primary->fb->pixel_format)
  10182. return -EINVAL;
  10183. /*
  10184. * TILEOFF/LINOFF registers can't be changed via MI display flips.
  10185. * Note that pitch changes could also affect these register.
  10186. */
  10187. if (INTEL_GEN(dev_priv) > 3 &&
  10188. (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
  10189. fb->pitches[0] != crtc->primary->fb->pitches[0]))
  10190. return -EINVAL;
  10191. if (i915_terminally_wedged(&dev_priv->gpu_error))
  10192. goto out_hang;
  10193. work = kzalloc(sizeof(*work), GFP_KERNEL);
  10194. if (work == NULL)
  10195. return -ENOMEM;
  10196. work->event = event;
  10197. work->crtc = crtc;
  10198. work->old_fb = old_fb;
  10199. INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
  10200. ret = drm_crtc_vblank_get(crtc);
  10201. if (ret)
  10202. goto free_work;
  10203. /* We borrow the event spin lock for protecting flip_work */
  10204. spin_lock_irq(&dev->event_lock);
  10205. if (intel_crtc->flip_work) {
  10206. /* Before declaring the flip queue wedged, check if
  10207. * the hardware completed the operation behind our backs.
  10208. */
  10209. if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
  10210. DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
  10211. page_flip_completed(intel_crtc);
  10212. } else {
  10213. DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
  10214. spin_unlock_irq(&dev->event_lock);
  10215. drm_crtc_vblank_put(crtc);
  10216. kfree(work);
  10217. return -EBUSY;
  10218. }
  10219. }
  10220. intel_crtc->flip_work = work;
  10221. spin_unlock_irq(&dev->event_lock);
  10222. if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
  10223. flush_workqueue(dev_priv->wq);
  10224. /* Reference the objects for the scheduled work. */
  10225. drm_framebuffer_reference(work->old_fb);
  10226. crtc->primary->fb = fb;
  10227. update_state_fb(crtc->primary);
  10228. work->pending_flip_obj = i915_gem_object_get(obj);
  10229. ret = i915_mutex_lock_interruptible(dev);
  10230. if (ret)
  10231. goto cleanup;
  10232. intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
  10233. if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
  10234. ret = -EIO;
  10235. goto unlock;
  10236. }
  10237. atomic_inc(&intel_crtc->unpin_work_count);
  10238. if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  10239. work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
  10240. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  10241. engine = dev_priv->engine[BCS];
  10242. if (fb->modifier[0] != old_fb->modifier[0])
  10243. /* vlv: DISPLAY_FLIP fails to change tiling */
  10244. engine = NULL;
  10245. } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
  10246. engine = dev_priv->engine[BCS];
  10247. } else if (INTEL_GEN(dev_priv) >= 7) {
  10248. engine = i915_gem_object_last_write_engine(obj);
  10249. if (engine == NULL || engine->id != RCS)
  10250. engine = dev_priv->engine[BCS];
  10251. } else {
  10252. engine = dev_priv->engine[RCS];
  10253. }
  10254. mmio_flip = use_mmio_flip(engine, obj);
  10255. vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
  10256. if (IS_ERR(vma)) {
  10257. ret = PTR_ERR(vma);
  10258. goto cleanup_pending;
  10259. }
  10260. work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
  10261. work->gtt_offset += intel_crtc->dspaddr_offset;
  10262. work->rotation = crtc->primary->state->rotation;
  10263. /*
  10264. * There's the potential that the next frame will not be compatible with
  10265. * FBC, so we want to call pre_update() before the actual page flip.
  10266. * The problem is that pre_update() caches some information about the fb
  10267. * object, so we want to do this only after the object is pinned. Let's
  10268. * be on the safe side and do this immediately before scheduling the
  10269. * flip.
  10270. */
  10271. intel_fbc_pre_update(intel_crtc, intel_crtc->config,
  10272. to_intel_plane_state(primary->state));
  10273. if (mmio_flip) {
  10274. INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
  10275. queue_work(system_unbound_wq, &work->mmio_work);
  10276. } else {
  10277. request = i915_gem_request_alloc(engine, engine->last_context);
  10278. if (IS_ERR(request)) {
  10279. ret = PTR_ERR(request);
  10280. goto cleanup_unpin;
  10281. }
  10282. ret = i915_gem_request_await_object(request, obj, false);
  10283. if (ret)
  10284. goto cleanup_request;
  10285. ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
  10286. page_flip_flags);
  10287. if (ret)
  10288. goto cleanup_request;
  10289. intel_mark_page_flip_active(intel_crtc, work);
  10290. work->flip_queued_req = i915_gem_request_get(request);
  10291. i915_add_request_no_flush(request);
  10292. }
  10293. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  10294. i915_gem_track_fb(intel_fb_obj(old_fb), obj,
  10295. to_intel_plane(primary)->frontbuffer_bit);
  10296. mutex_unlock(&dev->struct_mutex);
  10297. intel_frontbuffer_flip_prepare(to_i915(dev),
  10298. to_intel_plane(primary)->frontbuffer_bit);
  10299. trace_i915_flip_request(intel_crtc->plane, obj);
  10300. return 0;
  10301. cleanup_request:
  10302. i915_add_request_no_flush(request);
  10303. cleanup_unpin:
  10304. intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
  10305. cleanup_pending:
  10306. atomic_dec(&intel_crtc->unpin_work_count);
  10307. unlock:
  10308. mutex_unlock(&dev->struct_mutex);
  10309. cleanup:
  10310. crtc->primary->fb = old_fb;
  10311. update_state_fb(crtc->primary);
  10312. i915_gem_object_put(obj);
  10313. drm_framebuffer_unreference(work->old_fb);
  10314. spin_lock_irq(&dev->event_lock);
  10315. intel_crtc->flip_work = NULL;
  10316. spin_unlock_irq(&dev->event_lock);
  10317. drm_crtc_vblank_put(crtc);
  10318. free_work:
  10319. kfree(work);
  10320. if (ret == -EIO) {
  10321. struct drm_atomic_state *state;
  10322. struct drm_plane_state *plane_state;
  10323. out_hang:
  10324. state = drm_atomic_state_alloc(dev);
  10325. if (!state)
  10326. return -ENOMEM;
  10327. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  10328. retry:
  10329. plane_state = drm_atomic_get_plane_state(state, primary);
  10330. ret = PTR_ERR_OR_ZERO(plane_state);
  10331. if (!ret) {
  10332. drm_atomic_set_fb_for_plane(plane_state, fb);
  10333. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  10334. if (!ret)
  10335. ret = drm_atomic_commit(state);
  10336. }
  10337. if (ret == -EDEADLK) {
  10338. drm_modeset_backoff(state->acquire_ctx);
  10339. drm_atomic_state_clear(state);
  10340. goto retry;
  10341. }
  10342. drm_atomic_state_put(state);
  10343. if (ret == 0 && event) {
  10344. spin_lock_irq(&dev->event_lock);
  10345. drm_crtc_send_vblank_event(crtc, event);
  10346. spin_unlock_irq(&dev->event_lock);
  10347. }
  10348. }
  10349. return ret;
  10350. }
  10351. /**
  10352. * intel_wm_need_update - Check whether watermarks need updating
  10353. * @plane: drm plane
  10354. * @state: new plane state
  10355. *
  10356. * Check current plane state versus the new one to determine whether
  10357. * watermarks need to be recalculated.
  10358. *
  10359. * Returns true or false.
  10360. */
  10361. static bool intel_wm_need_update(struct drm_plane *plane,
  10362. struct drm_plane_state *state)
  10363. {
  10364. struct intel_plane_state *new = to_intel_plane_state(state);
  10365. struct intel_plane_state *cur = to_intel_plane_state(plane->state);
  10366. /* Update watermarks on tiling or size changes. */
  10367. if (new->base.visible != cur->base.visible)
  10368. return true;
  10369. if (!cur->base.fb || !new->base.fb)
  10370. return false;
  10371. if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
  10372. cur->base.rotation != new->base.rotation ||
  10373. drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
  10374. drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
  10375. drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
  10376. drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
  10377. return true;
  10378. return false;
  10379. }
  10380. static bool needs_scaling(struct intel_plane_state *state)
  10381. {
  10382. int src_w = drm_rect_width(&state->base.src) >> 16;
  10383. int src_h = drm_rect_height(&state->base.src) >> 16;
  10384. int dst_w = drm_rect_width(&state->base.dst);
  10385. int dst_h = drm_rect_height(&state->base.dst);
  10386. return (src_w != dst_w || src_h != dst_h);
  10387. }
  10388. int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
  10389. struct drm_plane_state *plane_state)
  10390. {
  10391. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
  10392. struct drm_crtc *crtc = crtc_state->crtc;
  10393. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10394. struct drm_plane *plane = plane_state->plane;
  10395. struct drm_device *dev = crtc->dev;
  10396. struct drm_i915_private *dev_priv = to_i915(dev);
  10397. struct intel_plane_state *old_plane_state =
  10398. to_intel_plane_state(plane->state);
  10399. bool mode_changed = needs_modeset(crtc_state);
  10400. bool was_crtc_enabled = crtc->state->active;
  10401. bool is_crtc_enabled = crtc_state->active;
  10402. bool turn_off, turn_on, visible, was_visible;
  10403. struct drm_framebuffer *fb = plane_state->fb;
  10404. int ret;
  10405. if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
  10406. ret = skl_update_scaler_plane(
  10407. to_intel_crtc_state(crtc_state),
  10408. to_intel_plane_state(plane_state));
  10409. if (ret)
  10410. return ret;
  10411. }
  10412. was_visible = old_plane_state->base.visible;
  10413. visible = to_intel_plane_state(plane_state)->base.visible;
  10414. if (!was_crtc_enabled && WARN_ON(was_visible))
  10415. was_visible = false;
  10416. /*
  10417. * Visibility is calculated as if the crtc was on, but
  10418. * after scaler setup everything depends on it being off
  10419. * when the crtc isn't active.
  10420. *
  10421. * FIXME this is wrong for watermarks. Watermarks should also
  10422. * be computed as if the pipe would be active. Perhaps move
  10423. * per-plane wm computation to the .check_plane() hook, and
  10424. * only combine the results from all planes in the current place?
  10425. */
  10426. if (!is_crtc_enabled)
  10427. to_intel_plane_state(plane_state)->base.visible = visible = false;
  10428. if (!was_visible && !visible)
  10429. return 0;
  10430. if (fb != old_plane_state->base.fb)
  10431. pipe_config->fb_changed = true;
  10432. turn_off = was_visible && (!visible || mode_changed);
  10433. turn_on = visible && (!was_visible || mode_changed);
  10434. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
  10435. intel_crtc->base.base.id,
  10436. intel_crtc->base.name,
  10437. plane->base.id, plane->name,
  10438. fb ? fb->base.id : -1);
  10439. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
  10440. plane->base.id, plane->name,
  10441. was_visible, visible,
  10442. turn_off, turn_on, mode_changed);
  10443. if (turn_on) {
  10444. pipe_config->update_wm_pre = true;
  10445. /* must disable cxsr around plane enable/disable */
  10446. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  10447. pipe_config->disable_cxsr = true;
  10448. } else if (turn_off) {
  10449. pipe_config->update_wm_post = true;
  10450. /* must disable cxsr around plane enable/disable */
  10451. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  10452. pipe_config->disable_cxsr = true;
  10453. } else if (intel_wm_need_update(plane, plane_state)) {
  10454. /* FIXME bollocks */
  10455. pipe_config->update_wm_pre = true;
  10456. pipe_config->update_wm_post = true;
  10457. }
  10458. /* Pre-gen9 platforms need two-step watermark updates */
  10459. if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
  10460. INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
  10461. to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
  10462. if (visible || was_visible)
  10463. pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
  10464. /*
  10465. * WaCxSRDisabledForSpriteScaling:ivb
  10466. *
  10467. * cstate->update_wm was already set above, so this flag will
  10468. * take effect when we commit and program watermarks.
  10469. */
  10470. if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
  10471. needs_scaling(to_intel_plane_state(plane_state)) &&
  10472. !needs_scaling(old_plane_state))
  10473. pipe_config->disable_lp_wm = true;
  10474. return 0;
  10475. }
  10476. static bool encoders_cloneable(const struct intel_encoder *a,
  10477. const struct intel_encoder *b)
  10478. {
  10479. /* masks could be asymmetric, so check both ways */
  10480. return a == b || (a->cloneable & (1 << b->type) &&
  10481. b->cloneable & (1 << a->type));
  10482. }
  10483. static bool check_single_encoder_cloning(struct drm_atomic_state *state,
  10484. struct intel_crtc *crtc,
  10485. struct intel_encoder *encoder)
  10486. {
  10487. struct intel_encoder *source_encoder;
  10488. struct drm_connector *connector;
  10489. struct drm_connector_state *connector_state;
  10490. int i;
  10491. for_each_connector_in_state(state, connector, connector_state, i) {
  10492. if (connector_state->crtc != &crtc->base)
  10493. continue;
  10494. source_encoder =
  10495. to_intel_encoder(connector_state->best_encoder);
  10496. if (!encoders_cloneable(encoder, source_encoder))
  10497. return false;
  10498. }
  10499. return true;
  10500. }
  10501. static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  10502. struct drm_crtc_state *crtc_state)
  10503. {
  10504. struct drm_device *dev = crtc->dev;
  10505. struct drm_i915_private *dev_priv = to_i915(dev);
  10506. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10507. struct intel_crtc_state *pipe_config =
  10508. to_intel_crtc_state(crtc_state);
  10509. struct drm_atomic_state *state = crtc_state->state;
  10510. int ret;
  10511. bool mode_changed = needs_modeset(crtc_state);
  10512. if (mode_changed && !crtc_state->active)
  10513. pipe_config->update_wm_post = true;
  10514. if (mode_changed && crtc_state->enable &&
  10515. dev_priv->display.crtc_compute_clock &&
  10516. !WARN_ON(pipe_config->shared_dpll)) {
  10517. ret = dev_priv->display.crtc_compute_clock(intel_crtc,
  10518. pipe_config);
  10519. if (ret)
  10520. return ret;
  10521. }
  10522. if (crtc_state->color_mgmt_changed) {
  10523. ret = intel_color_check(crtc, crtc_state);
  10524. if (ret)
  10525. return ret;
  10526. /*
  10527. * Changing color management on Intel hardware is
  10528. * handled as part of planes update.
  10529. */
  10530. crtc_state->planes_changed = true;
  10531. }
  10532. ret = 0;
  10533. if (dev_priv->display.compute_pipe_wm) {
  10534. ret = dev_priv->display.compute_pipe_wm(pipe_config);
  10535. if (ret) {
  10536. DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
  10537. return ret;
  10538. }
  10539. }
  10540. if (dev_priv->display.compute_intermediate_wm &&
  10541. !to_intel_atomic_state(state)->skip_intermediate_wm) {
  10542. if (WARN_ON(!dev_priv->display.compute_pipe_wm))
  10543. return 0;
  10544. /*
  10545. * Calculate 'intermediate' watermarks that satisfy both the
  10546. * old state and the new state. We can program these
  10547. * immediately.
  10548. */
  10549. ret = dev_priv->display.compute_intermediate_wm(dev,
  10550. intel_crtc,
  10551. pipe_config);
  10552. if (ret) {
  10553. DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
  10554. return ret;
  10555. }
  10556. } else if (dev_priv->display.compute_intermediate_wm) {
  10557. if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
  10558. pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
  10559. }
  10560. if (INTEL_GEN(dev_priv) >= 9) {
  10561. if (mode_changed)
  10562. ret = skl_update_scaler_crtc(pipe_config);
  10563. if (!ret)
  10564. ret = intel_atomic_setup_scalers(dev, intel_crtc,
  10565. pipe_config);
  10566. }
  10567. return ret;
  10568. }
  10569. static const struct drm_crtc_helper_funcs intel_helper_funcs = {
  10570. .mode_set_base_atomic = intel_pipe_set_base_atomic,
  10571. .atomic_begin = intel_begin_crtc_commit,
  10572. .atomic_flush = intel_finish_crtc_commit,
  10573. .atomic_check = intel_crtc_atomic_check,
  10574. };
  10575. static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
  10576. {
  10577. struct intel_connector *connector;
  10578. for_each_intel_connector(dev, connector) {
  10579. if (connector->base.state->crtc)
  10580. drm_connector_unreference(&connector->base);
  10581. if (connector->base.encoder) {
  10582. connector->base.state->best_encoder =
  10583. connector->base.encoder;
  10584. connector->base.state->crtc =
  10585. connector->base.encoder->crtc;
  10586. drm_connector_reference(&connector->base);
  10587. } else {
  10588. connector->base.state->best_encoder = NULL;
  10589. connector->base.state->crtc = NULL;
  10590. }
  10591. }
  10592. }
  10593. static void
  10594. connected_sink_compute_bpp(struct intel_connector *connector,
  10595. struct intel_crtc_state *pipe_config)
  10596. {
  10597. const struct drm_display_info *info = &connector->base.display_info;
  10598. int bpp = pipe_config->pipe_bpp;
  10599. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
  10600. connector->base.base.id,
  10601. connector->base.name);
  10602. /* Don't use an invalid EDID bpc value */
  10603. if (info->bpc != 0 && info->bpc * 3 < bpp) {
  10604. DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
  10605. bpp, info->bpc * 3);
  10606. pipe_config->pipe_bpp = info->bpc * 3;
  10607. }
  10608. /* Clamp bpp to 8 on screens without EDID 1.4 */
  10609. if (info->bpc == 0 && bpp > 24) {
  10610. DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
  10611. bpp);
  10612. pipe_config->pipe_bpp = 24;
  10613. }
  10614. }
  10615. static int
  10616. compute_baseline_pipe_bpp(struct intel_crtc *crtc,
  10617. struct intel_crtc_state *pipe_config)
  10618. {
  10619. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  10620. struct drm_atomic_state *state;
  10621. struct drm_connector *connector;
  10622. struct drm_connector_state *connector_state;
  10623. int bpp, i;
  10624. if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  10625. IS_CHERRYVIEW(dev_priv)))
  10626. bpp = 10*3;
  10627. else if (INTEL_GEN(dev_priv) >= 5)
  10628. bpp = 12*3;
  10629. else
  10630. bpp = 8*3;
  10631. pipe_config->pipe_bpp = bpp;
  10632. state = pipe_config->base.state;
  10633. /* Clamp display bpp to EDID value */
  10634. for_each_connector_in_state(state, connector, connector_state, i) {
  10635. if (connector_state->crtc != &crtc->base)
  10636. continue;
  10637. connected_sink_compute_bpp(to_intel_connector(connector),
  10638. pipe_config);
  10639. }
  10640. return bpp;
  10641. }
  10642. static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
  10643. {
  10644. DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
  10645. "type: 0x%x flags: 0x%x\n",
  10646. mode->crtc_clock,
  10647. mode->crtc_hdisplay, mode->crtc_hsync_start,
  10648. mode->crtc_hsync_end, mode->crtc_htotal,
  10649. mode->crtc_vdisplay, mode->crtc_vsync_start,
  10650. mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
  10651. }
  10652. static inline void
  10653. intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
  10654. unsigned int lane_count, struct intel_link_m_n *m_n)
  10655. {
  10656. DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
  10657. id, lane_count,
  10658. m_n->gmch_m, m_n->gmch_n,
  10659. m_n->link_m, m_n->link_n, m_n->tu);
  10660. }
  10661. static void intel_dump_pipe_config(struct intel_crtc *crtc,
  10662. struct intel_crtc_state *pipe_config,
  10663. const char *context)
  10664. {
  10665. struct drm_device *dev = crtc->base.dev;
  10666. struct drm_i915_private *dev_priv = to_i915(dev);
  10667. struct drm_plane *plane;
  10668. struct intel_plane *intel_plane;
  10669. struct intel_plane_state *state;
  10670. struct drm_framebuffer *fb;
  10671. DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
  10672. crtc->base.base.id, crtc->base.name, context);
  10673. DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
  10674. transcoder_name(pipe_config->cpu_transcoder),
  10675. pipe_config->pipe_bpp, pipe_config->dither);
  10676. if (pipe_config->has_pch_encoder)
  10677. intel_dump_m_n_config(pipe_config, "fdi",
  10678. pipe_config->fdi_lanes,
  10679. &pipe_config->fdi_m_n);
  10680. if (intel_crtc_has_dp_encoder(pipe_config)) {
  10681. intel_dump_m_n_config(pipe_config, "dp m_n",
  10682. pipe_config->lane_count, &pipe_config->dp_m_n);
  10683. if (pipe_config->has_drrs)
  10684. intel_dump_m_n_config(pipe_config, "dp m2_n2",
  10685. pipe_config->lane_count,
  10686. &pipe_config->dp_m2_n2);
  10687. }
  10688. DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
  10689. pipe_config->has_audio, pipe_config->has_infoframe);
  10690. DRM_DEBUG_KMS("requested mode:\n");
  10691. drm_mode_debug_printmodeline(&pipe_config->base.mode);
  10692. DRM_DEBUG_KMS("adjusted mode:\n");
  10693. drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
  10694. intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
  10695. DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
  10696. pipe_config->port_clock,
  10697. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  10698. if (INTEL_GEN(dev_priv) >= 9)
  10699. DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
  10700. crtc->num_scalers,
  10701. pipe_config->scaler_state.scaler_users,
  10702. pipe_config->scaler_state.scaler_id);
  10703. if (HAS_GMCH_DISPLAY(dev_priv))
  10704. DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
  10705. pipe_config->gmch_pfit.control,
  10706. pipe_config->gmch_pfit.pgm_ratios,
  10707. pipe_config->gmch_pfit.lvds_border_bits);
  10708. else
  10709. DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
  10710. pipe_config->pch_pfit.pos,
  10711. pipe_config->pch_pfit.size,
  10712. enableddisabled(pipe_config->pch_pfit.enabled));
  10713. DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
  10714. pipe_config->ips_enabled, pipe_config->double_wide);
  10715. if (IS_BROXTON(dev_priv)) {
  10716. DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
  10717. "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
  10718. "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
  10719. pipe_config->dpll_hw_state.ebb0,
  10720. pipe_config->dpll_hw_state.ebb4,
  10721. pipe_config->dpll_hw_state.pll0,
  10722. pipe_config->dpll_hw_state.pll1,
  10723. pipe_config->dpll_hw_state.pll2,
  10724. pipe_config->dpll_hw_state.pll3,
  10725. pipe_config->dpll_hw_state.pll6,
  10726. pipe_config->dpll_hw_state.pll8,
  10727. pipe_config->dpll_hw_state.pll9,
  10728. pipe_config->dpll_hw_state.pll10,
  10729. pipe_config->dpll_hw_state.pcsdw12);
  10730. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  10731. DRM_DEBUG_KMS("dpll_hw_state: "
  10732. "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
  10733. pipe_config->dpll_hw_state.ctrl1,
  10734. pipe_config->dpll_hw_state.cfgcr1,
  10735. pipe_config->dpll_hw_state.cfgcr2);
  10736. } else if (HAS_DDI(dev_priv)) {
  10737. DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
  10738. pipe_config->dpll_hw_state.wrpll,
  10739. pipe_config->dpll_hw_state.spll);
  10740. } else {
  10741. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  10742. "fp0: 0x%x, fp1: 0x%x\n",
  10743. pipe_config->dpll_hw_state.dpll,
  10744. pipe_config->dpll_hw_state.dpll_md,
  10745. pipe_config->dpll_hw_state.fp0,
  10746. pipe_config->dpll_hw_state.fp1);
  10747. }
  10748. DRM_DEBUG_KMS("planes on this crtc\n");
  10749. list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
  10750. struct drm_format_name_buf format_name;
  10751. intel_plane = to_intel_plane(plane);
  10752. if (intel_plane->pipe != crtc->pipe)
  10753. continue;
  10754. state = to_intel_plane_state(plane->state);
  10755. fb = state->base.fb;
  10756. if (!fb) {
  10757. DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
  10758. plane->base.id, plane->name, state->scaler_id);
  10759. continue;
  10760. }
  10761. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
  10762. plane->base.id, plane->name,
  10763. fb->base.id, fb->width, fb->height,
  10764. drm_get_format_name(fb->pixel_format, &format_name));
  10765. if (INTEL_GEN(dev_priv) >= 9)
  10766. DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
  10767. state->scaler_id,
  10768. state->base.src.x1 >> 16,
  10769. state->base.src.y1 >> 16,
  10770. drm_rect_width(&state->base.src) >> 16,
  10771. drm_rect_height(&state->base.src) >> 16,
  10772. state->base.dst.x1, state->base.dst.y1,
  10773. drm_rect_width(&state->base.dst),
  10774. drm_rect_height(&state->base.dst));
  10775. }
  10776. }
  10777. static bool check_digital_port_conflicts(struct drm_atomic_state *state)
  10778. {
  10779. struct drm_device *dev = state->dev;
  10780. struct drm_connector *connector;
  10781. unsigned int used_ports = 0;
  10782. unsigned int used_mst_ports = 0;
  10783. /*
  10784. * Walk the connector list instead of the encoder
  10785. * list to detect the problem on ddi platforms
  10786. * where there's just one encoder per digital port.
  10787. */
  10788. drm_for_each_connector(connector, dev) {
  10789. struct drm_connector_state *connector_state;
  10790. struct intel_encoder *encoder;
  10791. connector_state = drm_atomic_get_existing_connector_state(state, connector);
  10792. if (!connector_state)
  10793. connector_state = connector->state;
  10794. if (!connector_state->best_encoder)
  10795. continue;
  10796. encoder = to_intel_encoder(connector_state->best_encoder);
  10797. WARN_ON(!connector_state->crtc);
  10798. switch (encoder->type) {
  10799. unsigned int port_mask;
  10800. case INTEL_OUTPUT_UNKNOWN:
  10801. if (WARN_ON(!HAS_DDI(to_i915(dev))))
  10802. break;
  10803. case INTEL_OUTPUT_DP:
  10804. case INTEL_OUTPUT_HDMI:
  10805. case INTEL_OUTPUT_EDP:
  10806. port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
  10807. /* the same port mustn't appear more than once */
  10808. if (used_ports & port_mask)
  10809. return false;
  10810. used_ports |= port_mask;
  10811. break;
  10812. case INTEL_OUTPUT_DP_MST:
  10813. used_mst_ports |=
  10814. 1 << enc_to_mst(&encoder->base)->primary->port;
  10815. break;
  10816. default:
  10817. break;
  10818. }
  10819. }
  10820. /* can't mix MST and SST/HDMI on the same port */
  10821. if (used_ports & used_mst_ports)
  10822. return false;
  10823. return true;
  10824. }
  10825. static void
  10826. clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  10827. {
  10828. struct drm_crtc_state tmp_state;
  10829. struct intel_crtc_scaler_state scaler_state;
  10830. struct intel_dpll_hw_state dpll_hw_state;
  10831. struct intel_shared_dpll *shared_dpll;
  10832. bool force_thru;
  10833. /* FIXME: before the switch to atomic started, a new pipe_config was
  10834. * kzalloc'd. Code that depends on any field being zero should be
  10835. * fixed, so that the crtc_state can be safely duplicated. For now,
  10836. * only fields that are know to not cause problems are preserved. */
  10837. tmp_state = crtc_state->base;
  10838. scaler_state = crtc_state->scaler_state;
  10839. shared_dpll = crtc_state->shared_dpll;
  10840. dpll_hw_state = crtc_state->dpll_hw_state;
  10841. force_thru = crtc_state->pch_pfit.force_thru;
  10842. memset(crtc_state, 0, sizeof *crtc_state);
  10843. crtc_state->base = tmp_state;
  10844. crtc_state->scaler_state = scaler_state;
  10845. crtc_state->shared_dpll = shared_dpll;
  10846. crtc_state->dpll_hw_state = dpll_hw_state;
  10847. crtc_state->pch_pfit.force_thru = force_thru;
  10848. }
  10849. static int
  10850. intel_modeset_pipe_config(struct drm_crtc *crtc,
  10851. struct intel_crtc_state *pipe_config)
  10852. {
  10853. struct drm_atomic_state *state = pipe_config->base.state;
  10854. struct intel_encoder *encoder;
  10855. struct drm_connector *connector;
  10856. struct drm_connector_state *connector_state;
  10857. int base_bpp, ret = -EINVAL;
  10858. int i;
  10859. bool retry = true;
  10860. clear_intel_crtc_state(pipe_config);
  10861. pipe_config->cpu_transcoder =
  10862. (enum transcoder) to_intel_crtc(crtc)->pipe;
  10863. /*
  10864. * Sanitize sync polarity flags based on requested ones. If neither
  10865. * positive or negative polarity is requested, treat this as meaning
  10866. * negative polarity.
  10867. */
  10868. if (!(pipe_config->base.adjusted_mode.flags &
  10869. (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
  10870. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
  10871. if (!(pipe_config->base.adjusted_mode.flags &
  10872. (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
  10873. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
  10874. base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
  10875. pipe_config);
  10876. if (base_bpp < 0)
  10877. goto fail;
  10878. /*
  10879. * Determine the real pipe dimensions. Note that stereo modes can
  10880. * increase the actual pipe size due to the frame doubling and
  10881. * insertion of additional space for blanks between the frame. This
  10882. * is stored in the crtc timings. We use the requested mode to do this
  10883. * computation to clearly distinguish it from the adjusted mode, which
  10884. * can be changed by the connectors in the below retry loop.
  10885. */
  10886. drm_crtc_get_hv_timing(&pipe_config->base.mode,
  10887. &pipe_config->pipe_src_w,
  10888. &pipe_config->pipe_src_h);
  10889. for_each_connector_in_state(state, connector, connector_state, i) {
  10890. if (connector_state->crtc != crtc)
  10891. continue;
  10892. encoder = to_intel_encoder(connector_state->best_encoder);
  10893. if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
  10894. DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
  10895. goto fail;
  10896. }
  10897. /*
  10898. * Determine output_types before calling the .compute_config()
  10899. * hooks so that the hooks can use this information safely.
  10900. */
  10901. pipe_config->output_types |= 1 << encoder->type;
  10902. }
  10903. encoder_retry:
  10904. /* Ensure the port clock defaults are reset when retrying. */
  10905. pipe_config->port_clock = 0;
  10906. pipe_config->pixel_multiplier = 1;
  10907. /* Fill in default crtc timings, allow encoders to overwrite them. */
  10908. drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
  10909. CRTC_STEREO_DOUBLE);
  10910. /* Pass our mode to the connectors and the CRTC to give them a chance to
  10911. * adjust it according to limitations or connector properties, and also
  10912. * a chance to reject the mode entirely.
  10913. */
  10914. for_each_connector_in_state(state, connector, connector_state, i) {
  10915. if (connector_state->crtc != crtc)
  10916. continue;
  10917. encoder = to_intel_encoder(connector_state->best_encoder);
  10918. if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
  10919. DRM_DEBUG_KMS("Encoder config failure\n");
  10920. goto fail;
  10921. }
  10922. }
  10923. /* Set default port clock if not overwritten by the encoder. Needs to be
  10924. * done afterwards in case the encoder adjusts the mode. */
  10925. if (!pipe_config->port_clock)
  10926. pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
  10927. * pipe_config->pixel_multiplier;
  10928. ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
  10929. if (ret < 0) {
  10930. DRM_DEBUG_KMS("CRTC fixup failed\n");
  10931. goto fail;
  10932. }
  10933. if (ret == RETRY) {
  10934. if (WARN(!retry, "loop in pipe configuration computation\n")) {
  10935. ret = -EINVAL;
  10936. goto fail;
  10937. }
  10938. DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
  10939. retry = false;
  10940. goto encoder_retry;
  10941. }
  10942. /* Dithering seems to not pass-through bits correctly when it should, so
  10943. * only enable it on 6bpc panels. */
  10944. pipe_config->dither = pipe_config->pipe_bpp == 6*3;
  10945. DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
  10946. base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  10947. fail:
  10948. return ret;
  10949. }
  10950. static void
  10951. intel_modeset_update_crtc_state(struct drm_atomic_state *state)
  10952. {
  10953. struct drm_crtc *crtc;
  10954. struct drm_crtc_state *crtc_state;
  10955. int i;
  10956. /* Double check state. */
  10957. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  10958. to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
  10959. /* Update hwmode for vblank functions */
  10960. if (crtc->state->active)
  10961. crtc->hwmode = crtc->state->adjusted_mode;
  10962. else
  10963. crtc->hwmode.crtc_clock = 0;
  10964. /*
  10965. * Update legacy state to satisfy fbc code. This can
  10966. * be removed when fbc uses the atomic state.
  10967. */
  10968. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  10969. struct drm_plane_state *plane_state = crtc->primary->state;
  10970. crtc->primary->fb = plane_state->fb;
  10971. crtc->x = plane_state->src_x >> 16;
  10972. crtc->y = plane_state->src_y >> 16;
  10973. }
  10974. }
  10975. }
  10976. static bool intel_fuzzy_clock_check(int clock1, int clock2)
  10977. {
  10978. int diff;
  10979. if (clock1 == clock2)
  10980. return true;
  10981. if (!clock1 || !clock2)
  10982. return false;
  10983. diff = abs(clock1 - clock2);
  10984. if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
  10985. return true;
  10986. return false;
  10987. }
  10988. static bool
  10989. intel_compare_m_n(unsigned int m, unsigned int n,
  10990. unsigned int m2, unsigned int n2,
  10991. bool exact)
  10992. {
  10993. if (m == m2 && n == n2)
  10994. return true;
  10995. if (exact || !m || !n || !m2 || !n2)
  10996. return false;
  10997. BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
  10998. if (n > n2) {
  10999. while (n > n2) {
  11000. m2 <<= 1;
  11001. n2 <<= 1;
  11002. }
  11003. } else if (n < n2) {
  11004. while (n < n2) {
  11005. m <<= 1;
  11006. n <<= 1;
  11007. }
  11008. }
  11009. if (n != n2)
  11010. return false;
  11011. return intel_fuzzy_clock_check(m, m2);
  11012. }
  11013. static bool
  11014. intel_compare_link_m_n(const struct intel_link_m_n *m_n,
  11015. struct intel_link_m_n *m2_n2,
  11016. bool adjust)
  11017. {
  11018. if (m_n->tu == m2_n2->tu &&
  11019. intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
  11020. m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
  11021. intel_compare_m_n(m_n->link_m, m_n->link_n,
  11022. m2_n2->link_m, m2_n2->link_n, !adjust)) {
  11023. if (adjust)
  11024. *m2_n2 = *m_n;
  11025. return true;
  11026. }
  11027. return false;
  11028. }
  11029. static bool
  11030. intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  11031. struct intel_crtc_state *current_config,
  11032. struct intel_crtc_state *pipe_config,
  11033. bool adjust)
  11034. {
  11035. bool ret = true;
  11036. #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
  11037. do { \
  11038. if (!adjust) \
  11039. DRM_ERROR(fmt, ##__VA_ARGS__); \
  11040. else \
  11041. DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
  11042. } while (0)
  11043. #define PIPE_CONF_CHECK_X(name) \
  11044. if (current_config->name != pipe_config->name) { \
  11045. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11046. "(expected 0x%08x, found 0x%08x)\n", \
  11047. current_config->name, \
  11048. pipe_config->name); \
  11049. ret = false; \
  11050. }
  11051. #define PIPE_CONF_CHECK_I(name) \
  11052. if (current_config->name != pipe_config->name) { \
  11053. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11054. "(expected %i, found %i)\n", \
  11055. current_config->name, \
  11056. pipe_config->name); \
  11057. ret = false; \
  11058. }
  11059. #define PIPE_CONF_CHECK_P(name) \
  11060. if (current_config->name != pipe_config->name) { \
  11061. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11062. "(expected %p, found %p)\n", \
  11063. current_config->name, \
  11064. pipe_config->name); \
  11065. ret = false; \
  11066. }
  11067. #define PIPE_CONF_CHECK_M_N(name) \
  11068. if (!intel_compare_link_m_n(&current_config->name, \
  11069. &pipe_config->name,\
  11070. adjust)) { \
  11071. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11072. "(expected tu %i gmch %i/%i link %i/%i, " \
  11073. "found tu %i, gmch %i/%i link %i/%i)\n", \
  11074. current_config->name.tu, \
  11075. current_config->name.gmch_m, \
  11076. current_config->name.gmch_n, \
  11077. current_config->name.link_m, \
  11078. current_config->name.link_n, \
  11079. pipe_config->name.tu, \
  11080. pipe_config->name.gmch_m, \
  11081. pipe_config->name.gmch_n, \
  11082. pipe_config->name.link_m, \
  11083. pipe_config->name.link_n); \
  11084. ret = false; \
  11085. }
  11086. /* This is required for BDW+ where there is only one set of registers for
  11087. * switching between high and low RR.
  11088. * This macro can be used whenever a comparison has to be made between one
  11089. * hw state and multiple sw state variables.
  11090. */
  11091. #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
  11092. if (!intel_compare_link_m_n(&current_config->name, \
  11093. &pipe_config->name, adjust) && \
  11094. !intel_compare_link_m_n(&current_config->alt_name, \
  11095. &pipe_config->name, adjust)) { \
  11096. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11097. "(expected tu %i gmch %i/%i link %i/%i, " \
  11098. "or tu %i gmch %i/%i link %i/%i, " \
  11099. "found tu %i, gmch %i/%i link %i/%i)\n", \
  11100. current_config->name.tu, \
  11101. current_config->name.gmch_m, \
  11102. current_config->name.gmch_n, \
  11103. current_config->name.link_m, \
  11104. current_config->name.link_n, \
  11105. current_config->alt_name.tu, \
  11106. current_config->alt_name.gmch_m, \
  11107. current_config->alt_name.gmch_n, \
  11108. current_config->alt_name.link_m, \
  11109. current_config->alt_name.link_n, \
  11110. pipe_config->name.tu, \
  11111. pipe_config->name.gmch_m, \
  11112. pipe_config->name.gmch_n, \
  11113. pipe_config->name.link_m, \
  11114. pipe_config->name.link_n); \
  11115. ret = false; \
  11116. }
  11117. #define PIPE_CONF_CHECK_FLAGS(name, mask) \
  11118. if ((current_config->name ^ pipe_config->name) & (mask)) { \
  11119. INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
  11120. "(expected %i, found %i)\n", \
  11121. current_config->name & (mask), \
  11122. pipe_config->name & (mask)); \
  11123. ret = false; \
  11124. }
  11125. #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
  11126. if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
  11127. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11128. "(expected %i, found %i)\n", \
  11129. current_config->name, \
  11130. pipe_config->name); \
  11131. ret = false; \
  11132. }
  11133. #define PIPE_CONF_QUIRK(quirk) \
  11134. ((current_config->quirks | pipe_config->quirks) & (quirk))
  11135. PIPE_CONF_CHECK_I(cpu_transcoder);
  11136. PIPE_CONF_CHECK_I(has_pch_encoder);
  11137. PIPE_CONF_CHECK_I(fdi_lanes);
  11138. PIPE_CONF_CHECK_M_N(fdi_m_n);
  11139. PIPE_CONF_CHECK_I(lane_count);
  11140. PIPE_CONF_CHECK_X(lane_lat_optim_mask);
  11141. if (INTEL_GEN(dev_priv) < 8) {
  11142. PIPE_CONF_CHECK_M_N(dp_m_n);
  11143. if (current_config->has_drrs)
  11144. PIPE_CONF_CHECK_M_N(dp_m2_n2);
  11145. } else
  11146. PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
  11147. PIPE_CONF_CHECK_X(output_types);
  11148. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
  11149. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
  11150. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
  11151. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
  11152. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
  11153. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
  11154. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
  11155. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
  11156. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
  11157. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
  11158. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
  11159. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
  11160. PIPE_CONF_CHECK_I(pixel_multiplier);
  11161. PIPE_CONF_CHECK_I(has_hdmi_sink);
  11162. if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
  11163. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  11164. PIPE_CONF_CHECK_I(limited_color_range);
  11165. PIPE_CONF_CHECK_I(has_infoframe);
  11166. PIPE_CONF_CHECK_I(has_audio);
  11167. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11168. DRM_MODE_FLAG_INTERLACE);
  11169. if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
  11170. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11171. DRM_MODE_FLAG_PHSYNC);
  11172. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11173. DRM_MODE_FLAG_NHSYNC);
  11174. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11175. DRM_MODE_FLAG_PVSYNC);
  11176. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11177. DRM_MODE_FLAG_NVSYNC);
  11178. }
  11179. PIPE_CONF_CHECK_X(gmch_pfit.control);
  11180. /* pfit ratios are autocomputed by the hw on gen4+ */
  11181. if (INTEL_GEN(dev_priv) < 4)
  11182. PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
  11183. PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
  11184. if (!adjust) {
  11185. PIPE_CONF_CHECK_I(pipe_src_w);
  11186. PIPE_CONF_CHECK_I(pipe_src_h);
  11187. PIPE_CONF_CHECK_I(pch_pfit.enabled);
  11188. if (current_config->pch_pfit.enabled) {
  11189. PIPE_CONF_CHECK_X(pch_pfit.pos);
  11190. PIPE_CONF_CHECK_X(pch_pfit.size);
  11191. }
  11192. PIPE_CONF_CHECK_I(scaler_state.scaler_id);
  11193. }
  11194. /* BDW+ don't expose a synchronous way to read the state */
  11195. if (IS_HASWELL(dev_priv))
  11196. PIPE_CONF_CHECK_I(ips_enabled);
  11197. PIPE_CONF_CHECK_I(double_wide);
  11198. PIPE_CONF_CHECK_P(shared_dpll);
  11199. PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
  11200. PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
  11201. PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
  11202. PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
  11203. PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
  11204. PIPE_CONF_CHECK_X(dpll_hw_state.spll);
  11205. PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
  11206. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
  11207. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
  11208. PIPE_CONF_CHECK_X(dsi_pll.ctrl);
  11209. PIPE_CONF_CHECK_X(dsi_pll.div);
  11210. if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
  11211. PIPE_CONF_CHECK_I(pipe_bpp);
  11212. PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
  11213. PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
  11214. #undef PIPE_CONF_CHECK_X
  11215. #undef PIPE_CONF_CHECK_I
  11216. #undef PIPE_CONF_CHECK_P
  11217. #undef PIPE_CONF_CHECK_FLAGS
  11218. #undef PIPE_CONF_CHECK_CLOCK_FUZZY
  11219. #undef PIPE_CONF_QUIRK
  11220. #undef INTEL_ERR_OR_DBG_KMS
  11221. return ret;
  11222. }
  11223. static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
  11224. const struct intel_crtc_state *pipe_config)
  11225. {
  11226. if (pipe_config->has_pch_encoder) {
  11227. int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  11228. &pipe_config->fdi_m_n);
  11229. int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
  11230. /*
  11231. * FDI already provided one idea for the dotclock.
  11232. * Yell if the encoder disagrees.
  11233. */
  11234. WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
  11235. "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
  11236. fdi_dotclock, dotclock);
  11237. }
  11238. }
  11239. static void verify_wm_state(struct drm_crtc *crtc,
  11240. struct drm_crtc_state *new_state)
  11241. {
  11242. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  11243. struct skl_ddb_allocation hw_ddb, *sw_ddb;
  11244. struct skl_pipe_wm hw_wm, *sw_wm;
  11245. struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
  11246. struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
  11247. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11248. const enum pipe pipe = intel_crtc->pipe;
  11249. int plane, level, max_level = ilk_wm_max_level(dev_priv);
  11250. if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
  11251. return;
  11252. skl_pipe_wm_get_hw_state(crtc, &hw_wm);
  11253. sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
  11254. skl_ddb_get_hw_state(dev_priv, &hw_ddb);
  11255. sw_ddb = &dev_priv->wm.skl_hw.ddb;
  11256. /* planes */
  11257. for_each_universal_plane(dev_priv, pipe, plane) {
  11258. hw_plane_wm = &hw_wm.planes[plane];
  11259. sw_plane_wm = &sw_wm->planes[plane];
  11260. /* Watermarks */
  11261. for (level = 0; level <= max_level; level++) {
  11262. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  11263. &sw_plane_wm->wm[level]))
  11264. continue;
  11265. DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11266. pipe_name(pipe), plane + 1, level,
  11267. sw_plane_wm->wm[level].plane_en,
  11268. sw_plane_wm->wm[level].plane_res_b,
  11269. sw_plane_wm->wm[level].plane_res_l,
  11270. hw_plane_wm->wm[level].plane_en,
  11271. hw_plane_wm->wm[level].plane_res_b,
  11272. hw_plane_wm->wm[level].plane_res_l);
  11273. }
  11274. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  11275. &sw_plane_wm->trans_wm)) {
  11276. DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11277. pipe_name(pipe), plane + 1,
  11278. sw_plane_wm->trans_wm.plane_en,
  11279. sw_plane_wm->trans_wm.plane_res_b,
  11280. sw_plane_wm->trans_wm.plane_res_l,
  11281. hw_plane_wm->trans_wm.plane_en,
  11282. hw_plane_wm->trans_wm.plane_res_b,
  11283. hw_plane_wm->trans_wm.plane_res_l);
  11284. }
  11285. /* DDB */
  11286. hw_ddb_entry = &hw_ddb.plane[pipe][plane];
  11287. sw_ddb_entry = &sw_ddb->plane[pipe][plane];
  11288. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  11289. DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
  11290. pipe_name(pipe), plane + 1,
  11291. sw_ddb_entry->start, sw_ddb_entry->end,
  11292. hw_ddb_entry->start, hw_ddb_entry->end);
  11293. }
  11294. }
  11295. /*
  11296. * cursor
  11297. * If the cursor plane isn't active, we may not have updated it's ddb
  11298. * allocation. In that case since the ddb allocation will be updated
  11299. * once the plane becomes visible, we can skip this check
  11300. */
  11301. if (intel_crtc->cursor_addr) {
  11302. hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
  11303. sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
  11304. /* Watermarks */
  11305. for (level = 0; level <= max_level; level++) {
  11306. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  11307. &sw_plane_wm->wm[level]))
  11308. continue;
  11309. DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11310. pipe_name(pipe), level,
  11311. sw_plane_wm->wm[level].plane_en,
  11312. sw_plane_wm->wm[level].plane_res_b,
  11313. sw_plane_wm->wm[level].plane_res_l,
  11314. hw_plane_wm->wm[level].plane_en,
  11315. hw_plane_wm->wm[level].plane_res_b,
  11316. hw_plane_wm->wm[level].plane_res_l);
  11317. }
  11318. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  11319. &sw_plane_wm->trans_wm)) {
  11320. DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11321. pipe_name(pipe),
  11322. sw_plane_wm->trans_wm.plane_en,
  11323. sw_plane_wm->trans_wm.plane_res_b,
  11324. sw_plane_wm->trans_wm.plane_res_l,
  11325. hw_plane_wm->trans_wm.plane_en,
  11326. hw_plane_wm->trans_wm.plane_res_b,
  11327. hw_plane_wm->trans_wm.plane_res_l);
  11328. }
  11329. /* DDB */
  11330. hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
  11331. sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
  11332. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  11333. DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
  11334. pipe_name(pipe),
  11335. sw_ddb_entry->start, sw_ddb_entry->end,
  11336. hw_ddb_entry->start, hw_ddb_entry->end);
  11337. }
  11338. }
  11339. }
  11340. static void
  11341. verify_connector_state(struct drm_device *dev,
  11342. struct drm_atomic_state *state,
  11343. struct drm_crtc *crtc)
  11344. {
  11345. struct drm_connector *connector;
  11346. struct drm_connector_state *old_conn_state;
  11347. int i;
  11348. for_each_connector_in_state(state, connector, old_conn_state, i) {
  11349. struct drm_encoder *encoder = connector->encoder;
  11350. struct drm_connector_state *state = connector->state;
  11351. if (state->crtc != crtc)
  11352. continue;
  11353. intel_connector_verify_state(to_intel_connector(connector));
  11354. I915_STATE_WARN(state->best_encoder != encoder,
  11355. "connector's atomic encoder doesn't match legacy encoder\n");
  11356. }
  11357. }
  11358. static void
  11359. verify_encoder_state(struct drm_device *dev)
  11360. {
  11361. struct intel_encoder *encoder;
  11362. struct intel_connector *connector;
  11363. for_each_intel_encoder(dev, encoder) {
  11364. bool enabled = false;
  11365. enum pipe pipe;
  11366. DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
  11367. encoder->base.base.id,
  11368. encoder->base.name);
  11369. for_each_intel_connector(dev, connector) {
  11370. if (connector->base.state->best_encoder != &encoder->base)
  11371. continue;
  11372. enabled = true;
  11373. I915_STATE_WARN(connector->base.state->crtc !=
  11374. encoder->base.crtc,
  11375. "connector's crtc doesn't match encoder crtc\n");
  11376. }
  11377. I915_STATE_WARN(!!encoder->base.crtc != enabled,
  11378. "encoder's enabled state mismatch "
  11379. "(expected %i, found %i)\n",
  11380. !!encoder->base.crtc, enabled);
  11381. if (!encoder->base.crtc) {
  11382. bool active;
  11383. active = encoder->get_hw_state(encoder, &pipe);
  11384. I915_STATE_WARN(active,
  11385. "encoder detached but still enabled on pipe %c.\n",
  11386. pipe_name(pipe));
  11387. }
  11388. }
  11389. }
  11390. static void
  11391. verify_crtc_state(struct drm_crtc *crtc,
  11392. struct drm_crtc_state *old_crtc_state,
  11393. struct drm_crtc_state *new_crtc_state)
  11394. {
  11395. struct drm_device *dev = crtc->dev;
  11396. struct drm_i915_private *dev_priv = to_i915(dev);
  11397. struct intel_encoder *encoder;
  11398. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11399. struct intel_crtc_state *pipe_config, *sw_config;
  11400. struct drm_atomic_state *old_state;
  11401. bool active;
  11402. old_state = old_crtc_state->state;
  11403. __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
  11404. pipe_config = to_intel_crtc_state(old_crtc_state);
  11405. memset(pipe_config, 0, sizeof(*pipe_config));
  11406. pipe_config->base.crtc = crtc;
  11407. pipe_config->base.state = old_state;
  11408. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  11409. active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
  11410. /* hw state is inconsistent with the pipe quirk */
  11411. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  11412. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  11413. active = new_crtc_state->active;
  11414. I915_STATE_WARN(new_crtc_state->active != active,
  11415. "crtc active state doesn't match with hw state "
  11416. "(expected %i, found %i)\n", new_crtc_state->active, active);
  11417. I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
  11418. "transitional active state does not match atomic hw state "
  11419. "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
  11420. for_each_encoder_on_crtc(dev, crtc, encoder) {
  11421. enum pipe pipe;
  11422. active = encoder->get_hw_state(encoder, &pipe);
  11423. I915_STATE_WARN(active != new_crtc_state->active,
  11424. "[ENCODER:%i] active %i with crtc active %i\n",
  11425. encoder->base.base.id, active, new_crtc_state->active);
  11426. I915_STATE_WARN(active && intel_crtc->pipe != pipe,
  11427. "Encoder connected to wrong pipe %c\n",
  11428. pipe_name(pipe));
  11429. if (active) {
  11430. pipe_config->output_types |= 1 << encoder->type;
  11431. encoder->get_config(encoder, pipe_config);
  11432. }
  11433. }
  11434. if (!new_crtc_state->active)
  11435. return;
  11436. intel_pipe_config_sanity_check(dev_priv, pipe_config);
  11437. sw_config = to_intel_crtc_state(crtc->state);
  11438. if (!intel_pipe_config_compare(dev_priv, sw_config,
  11439. pipe_config, false)) {
  11440. I915_STATE_WARN(1, "pipe state doesn't match!\n");
  11441. intel_dump_pipe_config(intel_crtc, pipe_config,
  11442. "[hw state]");
  11443. intel_dump_pipe_config(intel_crtc, sw_config,
  11444. "[sw state]");
  11445. }
  11446. }
  11447. static void
  11448. verify_single_dpll_state(struct drm_i915_private *dev_priv,
  11449. struct intel_shared_dpll *pll,
  11450. struct drm_crtc *crtc,
  11451. struct drm_crtc_state *new_state)
  11452. {
  11453. struct intel_dpll_hw_state dpll_hw_state;
  11454. unsigned crtc_mask;
  11455. bool active;
  11456. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  11457. DRM_DEBUG_KMS("%s\n", pll->name);
  11458. active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
  11459. if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
  11460. I915_STATE_WARN(!pll->on && pll->active_mask,
  11461. "pll in active use but not on in sw tracking\n");
  11462. I915_STATE_WARN(pll->on && !pll->active_mask,
  11463. "pll is on but not used by any active crtc\n");
  11464. I915_STATE_WARN(pll->on != active,
  11465. "pll on state mismatch (expected %i, found %i)\n",
  11466. pll->on, active);
  11467. }
  11468. if (!crtc) {
  11469. I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
  11470. "more active pll users than references: %x vs %x\n",
  11471. pll->active_mask, pll->config.crtc_mask);
  11472. return;
  11473. }
  11474. crtc_mask = 1 << drm_crtc_index(crtc);
  11475. if (new_state->active)
  11476. I915_STATE_WARN(!(pll->active_mask & crtc_mask),
  11477. "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
  11478. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  11479. else
  11480. I915_STATE_WARN(pll->active_mask & crtc_mask,
  11481. "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
  11482. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  11483. I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
  11484. "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
  11485. crtc_mask, pll->config.crtc_mask);
  11486. I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
  11487. &dpll_hw_state,
  11488. sizeof(dpll_hw_state)),
  11489. "pll hw state mismatch\n");
  11490. }
  11491. static void
  11492. verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
  11493. struct drm_crtc_state *old_crtc_state,
  11494. struct drm_crtc_state *new_crtc_state)
  11495. {
  11496. struct drm_i915_private *dev_priv = to_i915(dev);
  11497. struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
  11498. struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
  11499. if (new_state->shared_dpll)
  11500. verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
  11501. if (old_state->shared_dpll &&
  11502. old_state->shared_dpll != new_state->shared_dpll) {
  11503. unsigned crtc_mask = 1 << drm_crtc_index(crtc);
  11504. struct intel_shared_dpll *pll = old_state->shared_dpll;
  11505. I915_STATE_WARN(pll->active_mask & crtc_mask,
  11506. "pll active mismatch (didn't expect pipe %c in active mask)\n",
  11507. pipe_name(drm_crtc_index(crtc)));
  11508. I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
  11509. "pll enabled crtcs mismatch (found %x in enabled mask)\n",
  11510. pipe_name(drm_crtc_index(crtc)));
  11511. }
  11512. }
  11513. static void
  11514. intel_modeset_verify_crtc(struct drm_crtc *crtc,
  11515. struct drm_atomic_state *state,
  11516. struct drm_crtc_state *old_state,
  11517. struct drm_crtc_state *new_state)
  11518. {
  11519. if (!needs_modeset(new_state) &&
  11520. !to_intel_crtc_state(new_state)->update_pipe)
  11521. return;
  11522. verify_wm_state(crtc, new_state);
  11523. verify_connector_state(crtc->dev, state, crtc);
  11524. verify_crtc_state(crtc, old_state, new_state);
  11525. verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
  11526. }
  11527. static void
  11528. verify_disabled_dpll_state(struct drm_device *dev)
  11529. {
  11530. struct drm_i915_private *dev_priv = to_i915(dev);
  11531. int i;
  11532. for (i = 0; i < dev_priv->num_shared_dpll; i++)
  11533. verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
  11534. }
  11535. static void
  11536. intel_modeset_verify_disabled(struct drm_device *dev,
  11537. struct drm_atomic_state *state)
  11538. {
  11539. verify_encoder_state(dev);
  11540. verify_connector_state(dev, state, NULL);
  11541. verify_disabled_dpll_state(dev);
  11542. }
  11543. static void update_scanline_offset(struct intel_crtc *crtc)
  11544. {
  11545. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  11546. /*
  11547. * The scanline counter increments at the leading edge of hsync.
  11548. *
  11549. * On most platforms it starts counting from vtotal-1 on the
  11550. * first active line. That means the scanline counter value is
  11551. * always one less than what we would expect. Ie. just after
  11552. * start of vblank, which also occurs at start of hsync (on the
  11553. * last active line), the scanline counter will read vblank_start-1.
  11554. *
  11555. * On gen2 the scanline counter starts counting from 1 instead
  11556. * of vtotal-1, so we have to subtract one (or rather add vtotal-1
  11557. * to keep the value positive), instead of adding one.
  11558. *
  11559. * On HSW+ the behaviour of the scanline counter depends on the output
  11560. * type. For DP ports it behaves like most other platforms, but on HDMI
  11561. * there's an extra 1 line difference. So we need to add two instead of
  11562. * one to the value.
  11563. */
  11564. if (IS_GEN2(dev_priv)) {
  11565. const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
  11566. int vtotal;
  11567. vtotal = adjusted_mode->crtc_vtotal;
  11568. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
  11569. vtotal /= 2;
  11570. crtc->scanline_offset = vtotal - 1;
  11571. } else if (HAS_DDI(dev_priv) &&
  11572. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
  11573. crtc->scanline_offset = 2;
  11574. } else
  11575. crtc->scanline_offset = 1;
  11576. }
  11577. static void intel_modeset_clear_plls(struct drm_atomic_state *state)
  11578. {
  11579. struct drm_device *dev = state->dev;
  11580. struct drm_i915_private *dev_priv = to_i915(dev);
  11581. struct intel_shared_dpll_config *shared_dpll = NULL;
  11582. struct drm_crtc *crtc;
  11583. struct drm_crtc_state *crtc_state;
  11584. int i;
  11585. if (!dev_priv->display.crtc_compute_clock)
  11586. return;
  11587. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11588. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11589. struct intel_shared_dpll *old_dpll =
  11590. to_intel_crtc_state(crtc->state)->shared_dpll;
  11591. if (!needs_modeset(crtc_state))
  11592. continue;
  11593. to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
  11594. if (!old_dpll)
  11595. continue;
  11596. if (!shared_dpll)
  11597. shared_dpll = intel_atomic_get_shared_dpll_state(state);
  11598. intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
  11599. }
  11600. }
  11601. /*
  11602. * This implements the workaround described in the "notes" section of the mode
  11603. * set sequence documentation. When going from no pipes or single pipe to
  11604. * multiple pipes, and planes are enabled after the pipe, we need to wait at
  11605. * least 2 vblanks on the first pipe before enabling planes on the second pipe.
  11606. */
  11607. static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
  11608. {
  11609. struct drm_crtc_state *crtc_state;
  11610. struct intel_crtc *intel_crtc;
  11611. struct drm_crtc *crtc;
  11612. struct intel_crtc_state *first_crtc_state = NULL;
  11613. struct intel_crtc_state *other_crtc_state = NULL;
  11614. enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
  11615. int i;
  11616. /* look at all crtc's that are going to be enabled in during modeset */
  11617. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11618. intel_crtc = to_intel_crtc(crtc);
  11619. if (!crtc_state->active || !needs_modeset(crtc_state))
  11620. continue;
  11621. if (first_crtc_state) {
  11622. other_crtc_state = to_intel_crtc_state(crtc_state);
  11623. break;
  11624. } else {
  11625. first_crtc_state = to_intel_crtc_state(crtc_state);
  11626. first_pipe = intel_crtc->pipe;
  11627. }
  11628. }
  11629. /* No workaround needed? */
  11630. if (!first_crtc_state)
  11631. return 0;
  11632. /* w/a possibly needed, check how many crtc's are already enabled. */
  11633. for_each_intel_crtc(state->dev, intel_crtc) {
  11634. struct intel_crtc_state *pipe_config;
  11635. pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
  11636. if (IS_ERR(pipe_config))
  11637. return PTR_ERR(pipe_config);
  11638. pipe_config->hsw_workaround_pipe = INVALID_PIPE;
  11639. if (!pipe_config->base.active ||
  11640. needs_modeset(&pipe_config->base))
  11641. continue;
  11642. /* 2 or more enabled crtcs means no need for w/a */
  11643. if (enabled_pipe != INVALID_PIPE)
  11644. return 0;
  11645. enabled_pipe = intel_crtc->pipe;
  11646. }
  11647. if (enabled_pipe != INVALID_PIPE)
  11648. first_crtc_state->hsw_workaround_pipe = enabled_pipe;
  11649. else if (other_crtc_state)
  11650. other_crtc_state->hsw_workaround_pipe = first_pipe;
  11651. return 0;
  11652. }
  11653. static int intel_lock_all_pipes(struct drm_atomic_state *state)
  11654. {
  11655. struct drm_crtc *crtc;
  11656. /* Add all pipes to the state */
  11657. for_each_crtc(state->dev, crtc) {
  11658. struct drm_crtc_state *crtc_state;
  11659. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  11660. if (IS_ERR(crtc_state))
  11661. return PTR_ERR(crtc_state);
  11662. }
  11663. return 0;
  11664. }
  11665. static int intel_modeset_all_pipes(struct drm_atomic_state *state)
  11666. {
  11667. struct drm_crtc *crtc;
  11668. /*
  11669. * Add all pipes to the state, and force
  11670. * a modeset on all the active ones.
  11671. */
  11672. for_each_crtc(state->dev, crtc) {
  11673. struct drm_crtc_state *crtc_state;
  11674. int ret;
  11675. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  11676. if (IS_ERR(crtc_state))
  11677. return PTR_ERR(crtc_state);
  11678. if (!crtc_state->active || needs_modeset(crtc_state))
  11679. continue;
  11680. crtc_state->mode_changed = true;
  11681. ret = drm_atomic_add_affected_connectors(state, crtc);
  11682. if (ret)
  11683. return ret;
  11684. ret = drm_atomic_add_affected_planes(state, crtc);
  11685. if (ret)
  11686. return ret;
  11687. }
  11688. return 0;
  11689. }
  11690. static int intel_modeset_checks(struct drm_atomic_state *state)
  11691. {
  11692. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11693. struct drm_i915_private *dev_priv = to_i915(state->dev);
  11694. struct drm_crtc *crtc;
  11695. struct drm_crtc_state *crtc_state;
  11696. int ret = 0, i;
  11697. if (!check_digital_port_conflicts(state)) {
  11698. DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
  11699. return -EINVAL;
  11700. }
  11701. intel_state->modeset = true;
  11702. intel_state->active_crtcs = dev_priv->active_crtcs;
  11703. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11704. if (crtc_state->active)
  11705. intel_state->active_crtcs |= 1 << i;
  11706. else
  11707. intel_state->active_crtcs &= ~(1 << i);
  11708. if (crtc_state->active != crtc->state->active)
  11709. intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
  11710. }
  11711. /*
  11712. * See if the config requires any additional preparation, e.g.
  11713. * to adjust global state with pipes off. We need to do this
  11714. * here so we can get the modeset_pipe updated config for the new
  11715. * mode set on this crtc. For other crtcs we need to use the
  11716. * adjusted_mode bits in the crtc directly.
  11717. */
  11718. if (dev_priv->display.modeset_calc_cdclk) {
  11719. if (!intel_state->cdclk_pll_vco)
  11720. intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
  11721. if (!intel_state->cdclk_pll_vco)
  11722. intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
  11723. ret = dev_priv->display.modeset_calc_cdclk(state);
  11724. if (ret < 0)
  11725. return ret;
  11726. /*
  11727. * Writes to dev_priv->atomic_cdclk_freq must protected by
  11728. * holding all the crtc locks, even if we don't end up
  11729. * touching the hardware
  11730. */
  11731. if (intel_state->cdclk != dev_priv->atomic_cdclk_freq) {
  11732. ret = intel_lock_all_pipes(state);
  11733. if (ret < 0)
  11734. return ret;
  11735. }
  11736. /* All pipes must be switched off while we change the cdclk. */
  11737. if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
  11738. intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) {
  11739. ret = intel_modeset_all_pipes(state);
  11740. if (ret < 0)
  11741. return ret;
  11742. }
  11743. DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
  11744. intel_state->cdclk, intel_state->dev_cdclk);
  11745. } else {
  11746. to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
  11747. }
  11748. intel_modeset_clear_plls(state);
  11749. if (IS_HASWELL(dev_priv))
  11750. return haswell_mode_set_planes_workaround(state);
  11751. return 0;
  11752. }
  11753. /*
  11754. * Handle calculation of various watermark data at the end of the atomic check
  11755. * phase. The code here should be run after the per-crtc and per-plane 'check'
  11756. * handlers to ensure that all derived state has been updated.
  11757. */
  11758. static int calc_watermark_data(struct drm_atomic_state *state)
  11759. {
  11760. struct drm_device *dev = state->dev;
  11761. struct drm_i915_private *dev_priv = to_i915(dev);
  11762. /* Is there platform-specific watermark information to calculate? */
  11763. if (dev_priv->display.compute_global_watermarks)
  11764. return dev_priv->display.compute_global_watermarks(state);
  11765. return 0;
  11766. }
  11767. /**
  11768. * intel_atomic_check - validate state object
  11769. * @dev: drm device
  11770. * @state: state to validate
  11771. */
  11772. static int intel_atomic_check(struct drm_device *dev,
  11773. struct drm_atomic_state *state)
  11774. {
  11775. struct drm_i915_private *dev_priv = to_i915(dev);
  11776. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11777. struct drm_crtc *crtc;
  11778. struct drm_crtc_state *crtc_state;
  11779. int ret, i;
  11780. bool any_ms = false;
  11781. ret = drm_atomic_helper_check_modeset(dev, state);
  11782. if (ret)
  11783. return ret;
  11784. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11785. struct intel_crtc_state *pipe_config =
  11786. to_intel_crtc_state(crtc_state);
  11787. /* Catch I915_MODE_FLAG_INHERITED */
  11788. if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
  11789. crtc_state->mode_changed = true;
  11790. if (!needs_modeset(crtc_state))
  11791. continue;
  11792. if (!crtc_state->enable) {
  11793. any_ms = true;
  11794. continue;
  11795. }
  11796. /* FIXME: For only active_changed we shouldn't need to do any
  11797. * state recomputation at all. */
  11798. ret = drm_atomic_add_affected_connectors(state, crtc);
  11799. if (ret)
  11800. return ret;
  11801. ret = intel_modeset_pipe_config(crtc, pipe_config);
  11802. if (ret) {
  11803. intel_dump_pipe_config(to_intel_crtc(crtc),
  11804. pipe_config, "[failed]");
  11805. return ret;
  11806. }
  11807. if (i915.fastboot &&
  11808. intel_pipe_config_compare(dev_priv,
  11809. to_intel_crtc_state(crtc->state),
  11810. pipe_config, true)) {
  11811. crtc_state->mode_changed = false;
  11812. to_intel_crtc_state(crtc_state)->update_pipe = true;
  11813. }
  11814. if (needs_modeset(crtc_state))
  11815. any_ms = true;
  11816. ret = drm_atomic_add_affected_planes(state, crtc);
  11817. if (ret)
  11818. return ret;
  11819. intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
  11820. needs_modeset(crtc_state) ?
  11821. "[modeset]" : "[fastset]");
  11822. }
  11823. if (any_ms) {
  11824. ret = intel_modeset_checks(state);
  11825. if (ret)
  11826. return ret;
  11827. } else {
  11828. intel_state->cdclk = dev_priv->atomic_cdclk_freq;
  11829. }
  11830. ret = drm_atomic_helper_check_planes(dev, state);
  11831. if (ret)
  11832. return ret;
  11833. intel_fbc_choose_crtc(dev_priv, state);
  11834. return calc_watermark_data(state);
  11835. }
  11836. static int intel_atomic_prepare_commit(struct drm_device *dev,
  11837. struct drm_atomic_state *state)
  11838. {
  11839. struct drm_i915_private *dev_priv = to_i915(dev);
  11840. struct drm_crtc_state *crtc_state;
  11841. struct drm_crtc *crtc;
  11842. int i, ret;
  11843. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11844. if (state->legacy_cursor_update)
  11845. continue;
  11846. ret = intel_crtc_wait_for_pending_flips(crtc);
  11847. if (ret)
  11848. return ret;
  11849. if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
  11850. flush_workqueue(dev_priv->wq);
  11851. }
  11852. ret = mutex_lock_interruptible(&dev->struct_mutex);
  11853. if (ret)
  11854. return ret;
  11855. ret = drm_atomic_helper_prepare_planes(dev, state);
  11856. mutex_unlock(&dev->struct_mutex);
  11857. return ret;
  11858. }
  11859. u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
  11860. {
  11861. struct drm_device *dev = crtc->base.dev;
  11862. if (!dev->max_vblank_count)
  11863. return drm_accurate_vblank_count(&crtc->base);
  11864. return dev->driver->get_vblank_counter(dev, crtc->pipe);
  11865. }
  11866. static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
  11867. struct drm_i915_private *dev_priv,
  11868. unsigned crtc_mask)
  11869. {
  11870. unsigned last_vblank_count[I915_MAX_PIPES];
  11871. enum pipe pipe;
  11872. int ret;
  11873. if (!crtc_mask)
  11874. return;
  11875. for_each_pipe(dev_priv, pipe) {
  11876. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  11877. pipe);
  11878. if (!((1 << pipe) & crtc_mask))
  11879. continue;
  11880. ret = drm_crtc_vblank_get(&crtc->base);
  11881. if (WARN_ON(ret != 0)) {
  11882. crtc_mask &= ~(1 << pipe);
  11883. continue;
  11884. }
  11885. last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
  11886. }
  11887. for_each_pipe(dev_priv, pipe) {
  11888. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  11889. pipe);
  11890. long lret;
  11891. if (!((1 << pipe) & crtc_mask))
  11892. continue;
  11893. lret = wait_event_timeout(dev->vblank[pipe].queue,
  11894. last_vblank_count[pipe] !=
  11895. drm_crtc_vblank_count(&crtc->base),
  11896. msecs_to_jiffies(50));
  11897. WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
  11898. drm_crtc_vblank_put(&crtc->base);
  11899. }
  11900. }
  11901. static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
  11902. {
  11903. /* fb updated, need to unpin old fb */
  11904. if (crtc_state->fb_changed)
  11905. return true;
  11906. /* wm changes, need vblank before final wm's */
  11907. if (crtc_state->update_wm_post)
  11908. return true;
  11909. /*
  11910. * cxsr is re-enabled after vblank.
  11911. * This is already handled by crtc_state->update_wm_post,
  11912. * but added for clarity.
  11913. */
  11914. if (crtc_state->disable_cxsr)
  11915. return true;
  11916. return false;
  11917. }
  11918. static void intel_update_crtc(struct drm_crtc *crtc,
  11919. struct drm_atomic_state *state,
  11920. struct drm_crtc_state *old_crtc_state,
  11921. unsigned int *crtc_vblank_mask)
  11922. {
  11923. struct drm_device *dev = crtc->dev;
  11924. struct drm_i915_private *dev_priv = to_i915(dev);
  11925. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11926. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
  11927. bool modeset = needs_modeset(crtc->state);
  11928. if (modeset) {
  11929. update_scanline_offset(intel_crtc);
  11930. dev_priv->display.crtc_enable(pipe_config, state);
  11931. } else {
  11932. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  11933. }
  11934. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  11935. intel_fbc_enable(
  11936. intel_crtc, pipe_config,
  11937. to_intel_plane_state(crtc->primary->state));
  11938. }
  11939. drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
  11940. if (needs_vblank_wait(pipe_config))
  11941. *crtc_vblank_mask |= drm_crtc_mask(crtc);
  11942. }
  11943. static void intel_update_crtcs(struct drm_atomic_state *state,
  11944. unsigned int *crtc_vblank_mask)
  11945. {
  11946. struct drm_crtc *crtc;
  11947. struct drm_crtc_state *old_crtc_state;
  11948. int i;
  11949. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11950. if (!crtc->state->active)
  11951. continue;
  11952. intel_update_crtc(crtc, state, old_crtc_state,
  11953. crtc_vblank_mask);
  11954. }
  11955. }
  11956. static void skl_update_crtcs(struct drm_atomic_state *state,
  11957. unsigned int *crtc_vblank_mask)
  11958. {
  11959. struct drm_i915_private *dev_priv = to_i915(state->dev);
  11960. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11961. struct drm_crtc *crtc;
  11962. struct intel_crtc *intel_crtc;
  11963. struct drm_crtc_state *old_crtc_state;
  11964. struct intel_crtc_state *cstate;
  11965. unsigned int updated = 0;
  11966. bool progress;
  11967. enum pipe pipe;
  11968. int i;
  11969. const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
  11970. for_each_crtc_in_state(state, crtc, old_crtc_state, i)
  11971. /* ignore allocations for crtc's that have been turned off. */
  11972. if (crtc->state->active)
  11973. entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
  11974. /*
  11975. * Whenever the number of active pipes changes, we need to make sure we
  11976. * update the pipes in the right order so that their ddb allocations
  11977. * never overlap with eachother inbetween CRTC updates. Otherwise we'll
  11978. * cause pipe underruns and other bad stuff.
  11979. */
  11980. do {
  11981. progress = false;
  11982. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11983. bool vbl_wait = false;
  11984. unsigned int cmask = drm_crtc_mask(crtc);
  11985. intel_crtc = to_intel_crtc(crtc);
  11986. cstate = to_intel_crtc_state(crtc->state);
  11987. pipe = intel_crtc->pipe;
  11988. if (updated & cmask || !cstate->base.active)
  11989. continue;
  11990. if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
  11991. continue;
  11992. updated |= cmask;
  11993. entries[i] = &cstate->wm.skl.ddb;
  11994. /*
  11995. * If this is an already active pipe, it's DDB changed,
  11996. * and this isn't the last pipe that needs updating
  11997. * then we need to wait for a vblank to pass for the
  11998. * new ddb allocation to take effect.
  11999. */
  12000. if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
  12001. &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
  12002. !crtc->state->active_changed &&
  12003. intel_state->wm_results.dirty_pipes != updated)
  12004. vbl_wait = true;
  12005. intel_update_crtc(crtc, state, old_crtc_state,
  12006. crtc_vblank_mask);
  12007. if (vbl_wait)
  12008. intel_wait_for_vblank(dev_priv, pipe);
  12009. progress = true;
  12010. }
  12011. } while (progress);
  12012. }
  12013. static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  12014. {
  12015. struct drm_device *dev = state->dev;
  12016. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  12017. struct drm_i915_private *dev_priv = to_i915(dev);
  12018. struct drm_crtc_state *old_crtc_state;
  12019. struct drm_crtc *crtc;
  12020. struct intel_crtc_state *intel_cstate;
  12021. bool hw_check = intel_state->modeset;
  12022. unsigned long put_domains[I915_MAX_PIPES] = {};
  12023. unsigned crtc_vblank_mask = 0;
  12024. int i;
  12025. drm_atomic_helper_wait_for_dependencies(state);
  12026. if (intel_state->modeset)
  12027. intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
  12028. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12029. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12030. if (needs_modeset(crtc->state) ||
  12031. to_intel_crtc_state(crtc->state)->update_pipe) {
  12032. hw_check = true;
  12033. put_domains[to_intel_crtc(crtc)->pipe] =
  12034. modeset_get_crtc_power_domains(crtc,
  12035. to_intel_crtc_state(crtc->state));
  12036. }
  12037. if (!needs_modeset(crtc->state))
  12038. continue;
  12039. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  12040. if (old_crtc_state->active) {
  12041. intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
  12042. dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
  12043. intel_crtc->active = false;
  12044. intel_fbc_disable(intel_crtc);
  12045. intel_disable_shared_dpll(intel_crtc);
  12046. /*
  12047. * Underruns don't always raise
  12048. * interrupts, so check manually.
  12049. */
  12050. intel_check_cpu_fifo_underruns(dev_priv);
  12051. intel_check_pch_fifo_underruns(dev_priv);
  12052. if (!crtc->state->active) {
  12053. /*
  12054. * Make sure we don't call initial_watermarks
  12055. * for ILK-style watermark updates.
  12056. */
  12057. if (dev_priv->display.atomic_update_watermarks)
  12058. dev_priv->display.initial_watermarks(intel_state,
  12059. to_intel_crtc_state(crtc->state));
  12060. else
  12061. intel_update_watermarks(intel_crtc);
  12062. }
  12063. }
  12064. }
  12065. /* Only after disabling all output pipelines that will be changed can we
  12066. * update the the output configuration. */
  12067. intel_modeset_update_crtc_state(state);
  12068. if (intel_state->modeset) {
  12069. drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
  12070. if (dev_priv->display.modeset_commit_cdclk &&
  12071. (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
  12072. intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
  12073. dev_priv->display.modeset_commit_cdclk(state);
  12074. /*
  12075. * SKL workaround: bspec recommends we disable the SAGV when we
  12076. * have more then one pipe enabled
  12077. */
  12078. if (!intel_can_enable_sagv(state))
  12079. intel_disable_sagv(dev_priv);
  12080. intel_modeset_verify_disabled(dev, state);
  12081. }
  12082. /* Complete the events for pipes that have now been disabled */
  12083. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12084. bool modeset = needs_modeset(crtc->state);
  12085. /* Complete events for now disable pipes here. */
  12086. if (modeset && !crtc->state->active && crtc->state->event) {
  12087. spin_lock_irq(&dev->event_lock);
  12088. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  12089. spin_unlock_irq(&dev->event_lock);
  12090. crtc->state->event = NULL;
  12091. }
  12092. }
  12093. /* Now enable the clocks, plane, pipe, and connectors that we set up. */
  12094. dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
  12095. /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
  12096. * already, but still need the state for the delayed optimization. To
  12097. * fix this:
  12098. * - wrap the optimization/post_plane_update stuff into a per-crtc work.
  12099. * - schedule that vblank worker _before_ calling hw_done
  12100. * - at the start of commit_tail, cancel it _synchrously
  12101. * - switch over to the vblank wait helper in the core after that since
  12102. * we don't need out special handling any more.
  12103. */
  12104. if (!state->legacy_cursor_update)
  12105. intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
  12106. /*
  12107. * Now that the vblank has passed, we can go ahead and program the
  12108. * optimal watermarks on platforms that need two-step watermark
  12109. * programming.
  12110. *
  12111. * TODO: Move this (and other cleanup) to an async worker eventually.
  12112. */
  12113. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12114. intel_cstate = to_intel_crtc_state(crtc->state);
  12115. if (dev_priv->display.optimize_watermarks)
  12116. dev_priv->display.optimize_watermarks(intel_state,
  12117. intel_cstate);
  12118. }
  12119. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12120. intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
  12121. if (put_domains[i])
  12122. modeset_put_power_domains(dev_priv, put_domains[i]);
  12123. intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
  12124. }
  12125. if (intel_state->modeset && intel_can_enable_sagv(state))
  12126. intel_enable_sagv(dev_priv);
  12127. drm_atomic_helper_commit_hw_done(state);
  12128. if (intel_state->modeset)
  12129. intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
  12130. mutex_lock(&dev->struct_mutex);
  12131. drm_atomic_helper_cleanup_planes(dev, state);
  12132. mutex_unlock(&dev->struct_mutex);
  12133. drm_atomic_helper_commit_cleanup_done(state);
  12134. drm_atomic_state_put(state);
  12135. /* As one of the primary mmio accessors, KMS has a high likelihood
  12136. * of triggering bugs in unclaimed access. After we finish
  12137. * modesetting, see if an error has been flagged, and if so
  12138. * enable debugging for the next modeset - and hope we catch
  12139. * the culprit.
  12140. *
  12141. * XXX note that we assume display power is on at this point.
  12142. * This might hold true now but we need to add pm helper to check
  12143. * unclaimed only when the hardware is on, as atomic commits
  12144. * can happen also when the device is completely off.
  12145. */
  12146. intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
  12147. }
  12148. static void intel_atomic_commit_work(struct work_struct *work)
  12149. {
  12150. struct drm_atomic_state *state =
  12151. container_of(work, struct drm_atomic_state, commit_work);
  12152. intel_atomic_commit_tail(state);
  12153. }
  12154. static int __i915_sw_fence_call
  12155. intel_atomic_commit_ready(struct i915_sw_fence *fence,
  12156. enum i915_sw_fence_notify notify)
  12157. {
  12158. struct intel_atomic_state *state =
  12159. container_of(fence, struct intel_atomic_state, commit_ready);
  12160. switch (notify) {
  12161. case FENCE_COMPLETE:
  12162. if (state->base.commit_work.func)
  12163. queue_work(system_unbound_wq, &state->base.commit_work);
  12164. break;
  12165. case FENCE_FREE:
  12166. drm_atomic_state_put(&state->base);
  12167. break;
  12168. }
  12169. return NOTIFY_DONE;
  12170. }
  12171. static void intel_atomic_track_fbs(struct drm_atomic_state *state)
  12172. {
  12173. struct drm_plane_state *old_plane_state;
  12174. struct drm_plane *plane;
  12175. int i;
  12176. for_each_plane_in_state(state, plane, old_plane_state, i)
  12177. i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
  12178. intel_fb_obj(plane->state->fb),
  12179. to_intel_plane(plane)->frontbuffer_bit);
  12180. }
  12181. /**
  12182. * intel_atomic_commit - commit validated state object
  12183. * @dev: DRM device
  12184. * @state: the top-level driver state object
  12185. * @nonblock: nonblocking commit
  12186. *
  12187. * This function commits a top-level state object that has been validated
  12188. * with drm_atomic_helper_check().
  12189. *
  12190. * RETURNS
  12191. * Zero for success or -errno.
  12192. */
  12193. static int intel_atomic_commit(struct drm_device *dev,
  12194. struct drm_atomic_state *state,
  12195. bool nonblock)
  12196. {
  12197. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  12198. struct drm_i915_private *dev_priv = to_i915(dev);
  12199. int ret = 0;
  12200. ret = drm_atomic_helper_setup_commit(state, nonblock);
  12201. if (ret)
  12202. return ret;
  12203. drm_atomic_state_get(state);
  12204. i915_sw_fence_init(&intel_state->commit_ready,
  12205. intel_atomic_commit_ready);
  12206. ret = intel_atomic_prepare_commit(dev, state);
  12207. if (ret) {
  12208. DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
  12209. i915_sw_fence_commit(&intel_state->commit_ready);
  12210. return ret;
  12211. }
  12212. drm_atomic_helper_swap_state(state, true);
  12213. dev_priv->wm.distrust_bios_wm = false;
  12214. intel_shared_dpll_commit(state);
  12215. intel_atomic_track_fbs(state);
  12216. if (intel_state->modeset) {
  12217. memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
  12218. sizeof(intel_state->min_pixclk));
  12219. dev_priv->active_crtcs = intel_state->active_crtcs;
  12220. dev_priv->atomic_cdclk_freq = intel_state->cdclk;
  12221. }
  12222. drm_atomic_state_get(state);
  12223. INIT_WORK(&state->commit_work,
  12224. nonblock ? intel_atomic_commit_work : NULL);
  12225. i915_sw_fence_commit(&intel_state->commit_ready);
  12226. if (!nonblock) {
  12227. i915_sw_fence_wait(&intel_state->commit_ready);
  12228. intel_atomic_commit_tail(state);
  12229. }
  12230. return 0;
  12231. }
  12232. void intel_crtc_restore_mode(struct drm_crtc *crtc)
  12233. {
  12234. struct drm_device *dev = crtc->dev;
  12235. struct drm_atomic_state *state;
  12236. struct drm_crtc_state *crtc_state;
  12237. int ret;
  12238. state = drm_atomic_state_alloc(dev);
  12239. if (!state) {
  12240. DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
  12241. crtc->base.id, crtc->name);
  12242. return;
  12243. }
  12244. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  12245. retry:
  12246. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  12247. ret = PTR_ERR_OR_ZERO(crtc_state);
  12248. if (!ret) {
  12249. if (!crtc_state->active)
  12250. goto out;
  12251. crtc_state->mode_changed = true;
  12252. ret = drm_atomic_commit(state);
  12253. }
  12254. if (ret == -EDEADLK) {
  12255. drm_atomic_state_clear(state);
  12256. drm_modeset_backoff(state->acquire_ctx);
  12257. goto retry;
  12258. }
  12259. out:
  12260. drm_atomic_state_put(state);
  12261. }
  12262. /*
  12263. * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  12264. * drm_atomic_helper_legacy_gamma_set() directly.
  12265. */
  12266. static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
  12267. u16 *red, u16 *green, u16 *blue,
  12268. uint32_t size)
  12269. {
  12270. struct drm_device *dev = crtc->dev;
  12271. struct drm_mode_config *config = &dev->mode_config;
  12272. struct drm_crtc_state *state;
  12273. int ret;
  12274. ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
  12275. if (ret)
  12276. return ret;
  12277. /*
  12278. * Make sure we update the legacy properties so this works when
  12279. * atomic is not enabled.
  12280. */
  12281. state = crtc->state;
  12282. drm_object_property_set_value(&crtc->base,
  12283. config->degamma_lut_property,
  12284. (state->degamma_lut) ?
  12285. state->degamma_lut->base.id : 0);
  12286. drm_object_property_set_value(&crtc->base,
  12287. config->ctm_property,
  12288. (state->ctm) ?
  12289. state->ctm->base.id : 0);
  12290. drm_object_property_set_value(&crtc->base,
  12291. config->gamma_lut_property,
  12292. (state->gamma_lut) ?
  12293. state->gamma_lut->base.id : 0);
  12294. return 0;
  12295. }
  12296. static const struct drm_crtc_funcs intel_crtc_funcs = {
  12297. .gamma_set = intel_atomic_legacy_gamma_set,
  12298. .set_config = drm_atomic_helper_set_config,
  12299. .set_property = drm_atomic_helper_crtc_set_property,
  12300. .destroy = intel_crtc_destroy,
  12301. .page_flip = intel_crtc_page_flip,
  12302. .atomic_duplicate_state = intel_crtc_duplicate_state,
  12303. .atomic_destroy_state = intel_crtc_destroy_state,
  12304. };
  12305. /**
  12306. * intel_prepare_plane_fb - Prepare fb for usage on plane
  12307. * @plane: drm plane to prepare for
  12308. * @fb: framebuffer to prepare for presentation
  12309. *
  12310. * Prepares a framebuffer for usage on a display plane. Generally this
  12311. * involves pinning the underlying object and updating the frontbuffer tracking
  12312. * bits. Some older platforms need special physical address handling for
  12313. * cursor planes.
  12314. *
  12315. * Must be called with struct_mutex held.
  12316. *
  12317. * Returns 0 on success, negative error code on failure.
  12318. */
  12319. int
  12320. intel_prepare_plane_fb(struct drm_plane *plane,
  12321. struct drm_plane_state *new_state)
  12322. {
  12323. struct intel_atomic_state *intel_state =
  12324. to_intel_atomic_state(new_state->state);
  12325. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12326. struct drm_framebuffer *fb = new_state->fb;
  12327. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  12328. struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
  12329. int ret;
  12330. if (!obj && !old_obj)
  12331. return 0;
  12332. if (old_obj) {
  12333. struct drm_crtc_state *crtc_state =
  12334. drm_atomic_get_existing_crtc_state(new_state->state,
  12335. plane->state->crtc);
  12336. /* Big Hammer, we also need to ensure that any pending
  12337. * MI_WAIT_FOR_EVENT inside a user batch buffer on the
  12338. * current scanout is retired before unpinning the old
  12339. * framebuffer. Note that we rely on userspace rendering
  12340. * into the buffer attached to the pipe they are waiting
  12341. * on. If not, userspace generates a GPU hang with IPEHR
  12342. * point to the MI_WAIT_FOR_EVENT.
  12343. *
  12344. * This should only fail upon a hung GPU, in which case we
  12345. * can safely continue.
  12346. */
  12347. if (needs_modeset(crtc_state)) {
  12348. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  12349. old_obj->resv, NULL,
  12350. false, 0,
  12351. GFP_KERNEL);
  12352. if (ret < 0)
  12353. return ret;
  12354. }
  12355. }
  12356. if (new_state->fence) { /* explicit fencing */
  12357. ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
  12358. new_state->fence,
  12359. I915_FENCE_TIMEOUT,
  12360. GFP_KERNEL);
  12361. if (ret < 0)
  12362. return ret;
  12363. }
  12364. if (!obj)
  12365. return 0;
  12366. if (!new_state->fence) { /* implicit fencing */
  12367. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  12368. obj->resv, NULL,
  12369. false, I915_FENCE_TIMEOUT,
  12370. GFP_KERNEL);
  12371. if (ret < 0)
  12372. return ret;
  12373. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  12374. }
  12375. if (plane->type == DRM_PLANE_TYPE_CURSOR &&
  12376. INTEL_INFO(dev_priv)->cursor_needs_physical) {
  12377. int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
  12378. ret = i915_gem_object_attach_phys(obj, align);
  12379. if (ret) {
  12380. DRM_DEBUG_KMS("failed to attach phys object\n");
  12381. return ret;
  12382. }
  12383. } else {
  12384. struct i915_vma *vma;
  12385. vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
  12386. if (IS_ERR(vma)) {
  12387. DRM_DEBUG_KMS("failed to pin object\n");
  12388. return PTR_ERR(vma);
  12389. }
  12390. }
  12391. return 0;
  12392. }
  12393. /**
  12394. * intel_cleanup_plane_fb - Cleans up an fb after plane use
  12395. * @plane: drm plane to clean up for
  12396. * @fb: old framebuffer that was on plane
  12397. *
  12398. * Cleans up a framebuffer that has just been removed from a plane.
  12399. *
  12400. * Must be called with struct_mutex held.
  12401. */
  12402. void
  12403. intel_cleanup_plane_fb(struct drm_plane *plane,
  12404. struct drm_plane_state *old_state)
  12405. {
  12406. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12407. struct intel_plane_state *old_intel_state;
  12408. struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
  12409. struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
  12410. old_intel_state = to_intel_plane_state(old_state);
  12411. if (!obj && !old_obj)
  12412. return;
  12413. if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
  12414. !INTEL_INFO(dev_priv)->cursor_needs_physical))
  12415. intel_unpin_fb_obj(old_state->fb, old_state->rotation);
  12416. }
  12417. int
  12418. skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
  12419. {
  12420. int max_scale;
  12421. int crtc_clock, cdclk;
  12422. if (!intel_crtc || !crtc_state->base.enable)
  12423. return DRM_PLANE_HELPER_NO_SCALING;
  12424. crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
  12425. cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
  12426. if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
  12427. return DRM_PLANE_HELPER_NO_SCALING;
  12428. /*
  12429. * skl max scale is lower of:
  12430. * close to 3 but not 3, -1 is for that purpose
  12431. * or
  12432. * cdclk/crtc_clock
  12433. */
  12434. max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
  12435. return max_scale;
  12436. }
  12437. static int
  12438. intel_check_primary_plane(struct drm_plane *plane,
  12439. struct intel_crtc_state *crtc_state,
  12440. struct intel_plane_state *state)
  12441. {
  12442. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12443. struct drm_crtc *crtc = state->base.crtc;
  12444. int min_scale = DRM_PLANE_HELPER_NO_SCALING;
  12445. int max_scale = DRM_PLANE_HELPER_NO_SCALING;
  12446. bool can_position = false;
  12447. int ret;
  12448. if (INTEL_GEN(dev_priv) >= 9) {
  12449. /* use scaler when colorkey is not required */
  12450. if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
  12451. min_scale = 1;
  12452. max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
  12453. }
  12454. can_position = true;
  12455. }
  12456. ret = drm_plane_helper_check_state(&state->base,
  12457. &state->clip,
  12458. min_scale, max_scale,
  12459. can_position, true);
  12460. if (ret)
  12461. return ret;
  12462. if (!state->base.fb)
  12463. return 0;
  12464. if (INTEL_GEN(dev_priv) >= 9) {
  12465. ret = skl_check_plane_surface(state);
  12466. if (ret)
  12467. return ret;
  12468. }
  12469. return 0;
  12470. }
  12471. static void intel_begin_crtc_commit(struct drm_crtc *crtc,
  12472. struct drm_crtc_state *old_crtc_state)
  12473. {
  12474. struct drm_device *dev = crtc->dev;
  12475. struct drm_i915_private *dev_priv = to_i915(dev);
  12476. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12477. struct intel_crtc_state *intel_cstate =
  12478. to_intel_crtc_state(crtc->state);
  12479. struct intel_crtc_state *old_intel_cstate =
  12480. to_intel_crtc_state(old_crtc_state);
  12481. struct intel_atomic_state *old_intel_state =
  12482. to_intel_atomic_state(old_crtc_state->state);
  12483. bool modeset = needs_modeset(crtc->state);
  12484. /* Perform vblank evasion around commit operation */
  12485. intel_pipe_update_start(intel_crtc);
  12486. if (modeset)
  12487. goto out;
  12488. if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
  12489. intel_color_set_csc(crtc->state);
  12490. intel_color_load_luts(crtc->state);
  12491. }
  12492. if (intel_cstate->update_pipe)
  12493. intel_update_pipe_config(intel_crtc, old_intel_cstate);
  12494. else if (INTEL_GEN(dev_priv) >= 9)
  12495. skl_detach_scalers(intel_crtc);
  12496. out:
  12497. if (dev_priv->display.atomic_update_watermarks)
  12498. dev_priv->display.atomic_update_watermarks(old_intel_state,
  12499. intel_cstate);
  12500. }
  12501. static void intel_finish_crtc_commit(struct drm_crtc *crtc,
  12502. struct drm_crtc_state *old_crtc_state)
  12503. {
  12504. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12505. intel_pipe_update_end(intel_crtc, NULL);
  12506. }
  12507. /**
  12508. * intel_plane_destroy - destroy a plane
  12509. * @plane: plane to destroy
  12510. *
  12511. * Common destruction function for all types of planes (primary, cursor,
  12512. * sprite).
  12513. */
  12514. void intel_plane_destroy(struct drm_plane *plane)
  12515. {
  12516. drm_plane_cleanup(plane);
  12517. kfree(to_intel_plane(plane));
  12518. }
  12519. const struct drm_plane_funcs intel_plane_funcs = {
  12520. .update_plane = drm_atomic_helper_update_plane,
  12521. .disable_plane = drm_atomic_helper_disable_plane,
  12522. .destroy = intel_plane_destroy,
  12523. .set_property = drm_atomic_helper_plane_set_property,
  12524. .atomic_get_property = intel_plane_atomic_get_property,
  12525. .atomic_set_property = intel_plane_atomic_set_property,
  12526. .atomic_duplicate_state = intel_plane_duplicate_state,
  12527. .atomic_destroy_state = intel_plane_destroy_state,
  12528. };
  12529. static struct intel_plane *
  12530. intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  12531. {
  12532. struct intel_plane *primary = NULL;
  12533. struct intel_plane_state *state = NULL;
  12534. const uint32_t *intel_primary_formats;
  12535. unsigned int supported_rotations;
  12536. unsigned int num_formats;
  12537. int ret;
  12538. primary = kzalloc(sizeof(*primary), GFP_KERNEL);
  12539. if (!primary) {
  12540. ret = -ENOMEM;
  12541. goto fail;
  12542. }
  12543. state = intel_create_plane_state(&primary->base);
  12544. if (!state) {
  12545. ret = -ENOMEM;
  12546. goto fail;
  12547. }
  12548. primary->base.state = &state->base;
  12549. primary->can_scale = false;
  12550. primary->max_downscale = 1;
  12551. if (INTEL_GEN(dev_priv) >= 9) {
  12552. primary->can_scale = true;
  12553. state->scaler_id = -1;
  12554. }
  12555. primary->pipe = pipe;
  12556. /*
  12557. * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
  12558. * port is hooked to pipe B. Hence we want plane A feeding pipe B.
  12559. */
  12560. if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
  12561. primary->plane = (enum plane) !pipe;
  12562. else
  12563. primary->plane = (enum plane) pipe;
  12564. primary->id = PLANE_PRIMARY;
  12565. primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
  12566. primary->check_plane = intel_check_primary_plane;
  12567. if (INTEL_GEN(dev_priv) >= 9) {
  12568. intel_primary_formats = skl_primary_formats;
  12569. num_formats = ARRAY_SIZE(skl_primary_formats);
  12570. primary->update_plane = skylake_update_primary_plane;
  12571. primary->disable_plane = skylake_disable_primary_plane;
  12572. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12573. intel_primary_formats = i965_primary_formats;
  12574. num_formats = ARRAY_SIZE(i965_primary_formats);
  12575. primary->update_plane = ironlake_update_primary_plane;
  12576. primary->disable_plane = i9xx_disable_primary_plane;
  12577. } else if (INTEL_GEN(dev_priv) >= 4) {
  12578. intel_primary_formats = i965_primary_formats;
  12579. num_formats = ARRAY_SIZE(i965_primary_formats);
  12580. primary->update_plane = i9xx_update_primary_plane;
  12581. primary->disable_plane = i9xx_disable_primary_plane;
  12582. } else {
  12583. intel_primary_formats = i8xx_primary_formats;
  12584. num_formats = ARRAY_SIZE(i8xx_primary_formats);
  12585. primary->update_plane = i9xx_update_primary_plane;
  12586. primary->disable_plane = i9xx_disable_primary_plane;
  12587. }
  12588. if (INTEL_GEN(dev_priv) >= 9)
  12589. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12590. 0, &intel_plane_funcs,
  12591. intel_primary_formats, num_formats,
  12592. DRM_PLANE_TYPE_PRIMARY,
  12593. "plane 1%c", pipe_name(pipe));
  12594. else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  12595. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12596. 0, &intel_plane_funcs,
  12597. intel_primary_formats, num_formats,
  12598. DRM_PLANE_TYPE_PRIMARY,
  12599. "primary %c", pipe_name(pipe));
  12600. else
  12601. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12602. 0, &intel_plane_funcs,
  12603. intel_primary_formats, num_formats,
  12604. DRM_PLANE_TYPE_PRIMARY,
  12605. "plane %c", plane_name(primary->plane));
  12606. if (ret)
  12607. goto fail;
  12608. if (INTEL_GEN(dev_priv) >= 9) {
  12609. supported_rotations =
  12610. DRM_ROTATE_0 | DRM_ROTATE_90 |
  12611. DRM_ROTATE_180 | DRM_ROTATE_270;
  12612. } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  12613. supported_rotations =
  12614. DRM_ROTATE_0 | DRM_ROTATE_180 |
  12615. DRM_REFLECT_X;
  12616. } else if (INTEL_GEN(dev_priv) >= 4) {
  12617. supported_rotations =
  12618. DRM_ROTATE_0 | DRM_ROTATE_180;
  12619. } else {
  12620. supported_rotations = DRM_ROTATE_0;
  12621. }
  12622. if (INTEL_GEN(dev_priv) >= 4)
  12623. drm_plane_create_rotation_property(&primary->base,
  12624. DRM_ROTATE_0,
  12625. supported_rotations);
  12626. drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
  12627. return primary;
  12628. fail:
  12629. kfree(state);
  12630. kfree(primary);
  12631. return ERR_PTR(ret);
  12632. }
  12633. static int
  12634. intel_check_cursor_plane(struct drm_plane *plane,
  12635. struct intel_crtc_state *crtc_state,
  12636. struct intel_plane_state *state)
  12637. {
  12638. struct drm_framebuffer *fb = state->base.fb;
  12639. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  12640. enum pipe pipe = to_intel_plane(plane)->pipe;
  12641. unsigned stride;
  12642. int ret;
  12643. ret = drm_plane_helper_check_state(&state->base,
  12644. &state->clip,
  12645. DRM_PLANE_HELPER_NO_SCALING,
  12646. DRM_PLANE_HELPER_NO_SCALING,
  12647. true, true);
  12648. if (ret)
  12649. return ret;
  12650. /* if we want to turn off the cursor ignore width and height */
  12651. if (!obj)
  12652. return 0;
  12653. /* Check for which cursor types we support */
  12654. if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
  12655. state->base.crtc_h)) {
  12656. DRM_DEBUG("Cursor dimension %dx%d not supported\n",
  12657. state->base.crtc_w, state->base.crtc_h);
  12658. return -EINVAL;
  12659. }
  12660. stride = roundup_pow_of_two(state->base.crtc_w) * 4;
  12661. if (obj->base.size < stride * state->base.crtc_h) {
  12662. DRM_DEBUG_KMS("buffer is too small\n");
  12663. return -ENOMEM;
  12664. }
  12665. if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
  12666. DRM_DEBUG_KMS("cursor cannot be tiled\n");
  12667. return -EINVAL;
  12668. }
  12669. /*
  12670. * There's something wrong with the cursor on CHV pipe C.
  12671. * If it straddles the left edge of the screen then
  12672. * moving it away from the edge or disabling it often
  12673. * results in a pipe underrun, and often that can lead to
  12674. * dead pipe (constant underrun reported, and it scans
  12675. * out just a solid color). To recover from that, the
  12676. * display power well must be turned off and on again.
  12677. * Refuse the put the cursor into that compromised position.
  12678. */
  12679. if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
  12680. state->base.visible && state->base.crtc_x < 0) {
  12681. DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
  12682. return -EINVAL;
  12683. }
  12684. return 0;
  12685. }
  12686. static void
  12687. intel_disable_cursor_plane(struct drm_plane *plane,
  12688. struct drm_crtc *crtc)
  12689. {
  12690. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12691. intel_crtc->cursor_addr = 0;
  12692. intel_crtc_update_cursor(crtc, NULL);
  12693. }
  12694. static void
  12695. intel_update_cursor_plane(struct drm_plane *plane,
  12696. const struct intel_crtc_state *crtc_state,
  12697. const struct intel_plane_state *state)
  12698. {
  12699. struct drm_crtc *crtc = crtc_state->base.crtc;
  12700. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12701. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12702. struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
  12703. uint32_t addr;
  12704. if (!obj)
  12705. addr = 0;
  12706. else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
  12707. addr = i915_gem_object_ggtt_offset(obj, NULL);
  12708. else
  12709. addr = obj->phys_handle->busaddr;
  12710. intel_crtc->cursor_addr = addr;
  12711. intel_crtc_update_cursor(crtc, state);
  12712. }
  12713. static struct intel_plane *
  12714. intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  12715. {
  12716. struct intel_plane *cursor = NULL;
  12717. struct intel_plane_state *state = NULL;
  12718. int ret;
  12719. cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
  12720. if (!cursor) {
  12721. ret = -ENOMEM;
  12722. goto fail;
  12723. }
  12724. state = intel_create_plane_state(&cursor->base);
  12725. if (!state) {
  12726. ret = -ENOMEM;
  12727. goto fail;
  12728. }
  12729. cursor->base.state = &state->base;
  12730. cursor->can_scale = false;
  12731. cursor->max_downscale = 1;
  12732. cursor->pipe = pipe;
  12733. cursor->plane = pipe;
  12734. cursor->id = PLANE_CURSOR;
  12735. cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
  12736. cursor->check_plane = intel_check_cursor_plane;
  12737. cursor->update_plane = intel_update_cursor_plane;
  12738. cursor->disable_plane = intel_disable_cursor_plane;
  12739. ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
  12740. 0, &intel_plane_funcs,
  12741. intel_cursor_formats,
  12742. ARRAY_SIZE(intel_cursor_formats),
  12743. DRM_PLANE_TYPE_CURSOR,
  12744. "cursor %c", pipe_name(pipe));
  12745. if (ret)
  12746. goto fail;
  12747. if (INTEL_GEN(dev_priv) >= 4)
  12748. drm_plane_create_rotation_property(&cursor->base,
  12749. DRM_ROTATE_0,
  12750. DRM_ROTATE_0 |
  12751. DRM_ROTATE_180);
  12752. if (INTEL_GEN(dev_priv) >= 9)
  12753. state->scaler_id = -1;
  12754. drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
  12755. return cursor;
  12756. fail:
  12757. kfree(state);
  12758. kfree(cursor);
  12759. return ERR_PTR(ret);
  12760. }
  12761. static void skl_init_scalers(struct drm_i915_private *dev_priv,
  12762. struct intel_crtc *crtc,
  12763. struct intel_crtc_state *crtc_state)
  12764. {
  12765. struct intel_crtc_scaler_state *scaler_state =
  12766. &crtc_state->scaler_state;
  12767. int i;
  12768. for (i = 0; i < crtc->num_scalers; i++) {
  12769. struct intel_scaler *scaler = &scaler_state->scalers[i];
  12770. scaler->in_use = 0;
  12771. scaler->mode = PS_SCALER_MODE_DYN;
  12772. }
  12773. scaler_state->scaler_id = -1;
  12774. }
  12775. static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
  12776. {
  12777. struct intel_crtc *intel_crtc;
  12778. struct intel_crtc_state *crtc_state = NULL;
  12779. struct intel_plane *primary = NULL;
  12780. struct intel_plane *cursor = NULL;
  12781. int sprite, ret;
  12782. intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
  12783. if (!intel_crtc)
  12784. return -ENOMEM;
  12785. crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
  12786. if (!crtc_state) {
  12787. ret = -ENOMEM;
  12788. goto fail;
  12789. }
  12790. intel_crtc->config = crtc_state;
  12791. intel_crtc->base.state = &crtc_state->base;
  12792. crtc_state->base.crtc = &intel_crtc->base;
  12793. /* initialize shared scalers */
  12794. if (INTEL_GEN(dev_priv) >= 9) {
  12795. if (pipe == PIPE_C)
  12796. intel_crtc->num_scalers = 1;
  12797. else
  12798. intel_crtc->num_scalers = SKL_NUM_SCALERS;
  12799. skl_init_scalers(dev_priv, intel_crtc, crtc_state);
  12800. }
  12801. primary = intel_primary_plane_create(dev_priv, pipe);
  12802. if (IS_ERR(primary)) {
  12803. ret = PTR_ERR(primary);
  12804. goto fail;
  12805. }
  12806. intel_crtc->plane_ids_mask |= BIT(primary->id);
  12807. for_each_sprite(dev_priv, pipe, sprite) {
  12808. struct intel_plane *plane;
  12809. plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
  12810. if (IS_ERR(plane)) {
  12811. ret = PTR_ERR(plane);
  12812. goto fail;
  12813. }
  12814. intel_crtc->plane_ids_mask |= BIT(plane->id);
  12815. }
  12816. cursor = intel_cursor_plane_create(dev_priv, pipe);
  12817. if (IS_ERR(cursor)) {
  12818. ret = PTR_ERR(cursor);
  12819. goto fail;
  12820. }
  12821. intel_crtc->plane_ids_mask |= BIT(cursor->id);
  12822. ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
  12823. &primary->base, &cursor->base,
  12824. &intel_crtc_funcs,
  12825. "pipe %c", pipe_name(pipe));
  12826. if (ret)
  12827. goto fail;
  12828. intel_crtc->pipe = pipe;
  12829. intel_crtc->plane = primary->plane;
  12830. intel_crtc->cursor_base = ~0;
  12831. intel_crtc->cursor_cntl = ~0;
  12832. intel_crtc->cursor_size = ~0;
  12833. intel_crtc->wm.cxsr_allowed = true;
  12834. BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
  12835. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
  12836. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
  12837. dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
  12838. drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
  12839. intel_color_init(&intel_crtc->base);
  12840. WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
  12841. return 0;
  12842. fail:
  12843. /*
  12844. * drm_mode_config_cleanup() will free up any
  12845. * crtcs/planes already initialized.
  12846. */
  12847. kfree(crtc_state);
  12848. kfree(intel_crtc);
  12849. return ret;
  12850. }
  12851. enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
  12852. {
  12853. struct drm_encoder *encoder = connector->base.encoder;
  12854. struct drm_device *dev = connector->base.dev;
  12855. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  12856. if (!encoder || WARN_ON(!encoder->crtc))
  12857. return INVALID_PIPE;
  12858. return to_intel_crtc(encoder->crtc)->pipe;
  12859. }
  12860. int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
  12861. struct drm_file *file)
  12862. {
  12863. struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
  12864. struct drm_crtc *drmmode_crtc;
  12865. struct intel_crtc *crtc;
  12866. drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
  12867. if (!drmmode_crtc)
  12868. return -ENOENT;
  12869. crtc = to_intel_crtc(drmmode_crtc);
  12870. pipe_from_crtc_id->pipe = crtc->pipe;
  12871. return 0;
  12872. }
  12873. static int intel_encoder_clones(struct intel_encoder *encoder)
  12874. {
  12875. struct drm_device *dev = encoder->base.dev;
  12876. struct intel_encoder *source_encoder;
  12877. int index_mask = 0;
  12878. int entry = 0;
  12879. for_each_intel_encoder(dev, source_encoder) {
  12880. if (encoders_cloneable(encoder, source_encoder))
  12881. index_mask |= (1 << entry);
  12882. entry++;
  12883. }
  12884. return index_mask;
  12885. }
  12886. static bool has_edp_a(struct drm_i915_private *dev_priv)
  12887. {
  12888. if (!IS_MOBILE(dev_priv))
  12889. return false;
  12890. if ((I915_READ(DP_A) & DP_DETECTED) == 0)
  12891. return false;
  12892. if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
  12893. return false;
  12894. return true;
  12895. }
  12896. static bool intel_crt_present(struct drm_i915_private *dev_priv)
  12897. {
  12898. if (INTEL_GEN(dev_priv) >= 9)
  12899. return false;
  12900. if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
  12901. return false;
  12902. if (IS_CHERRYVIEW(dev_priv))
  12903. return false;
  12904. if (HAS_PCH_LPT_H(dev_priv) &&
  12905. I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
  12906. return false;
  12907. /* DDI E can't be used if DDI A requires 4 lanes */
  12908. if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
  12909. return false;
  12910. if (!dev_priv->vbt.int_crt_support)
  12911. return false;
  12912. return true;
  12913. }
  12914. void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
  12915. {
  12916. int pps_num;
  12917. int pps_idx;
  12918. if (HAS_DDI(dev_priv))
  12919. return;
  12920. /*
  12921. * This w/a is needed at least on CPT/PPT, but to be sure apply it
  12922. * everywhere where registers can be write protected.
  12923. */
  12924. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12925. pps_num = 2;
  12926. else
  12927. pps_num = 1;
  12928. for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
  12929. u32 val = I915_READ(PP_CONTROL(pps_idx));
  12930. val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
  12931. I915_WRITE(PP_CONTROL(pps_idx), val);
  12932. }
  12933. }
  12934. static void intel_pps_init(struct drm_i915_private *dev_priv)
  12935. {
  12936. if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
  12937. dev_priv->pps_mmio_base = PCH_PPS_BASE;
  12938. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12939. dev_priv->pps_mmio_base = VLV_PPS_BASE;
  12940. else
  12941. dev_priv->pps_mmio_base = PPS_BASE;
  12942. intel_pps_unlock_regs_wa(dev_priv);
  12943. }
  12944. static void intel_setup_outputs(struct drm_i915_private *dev_priv)
  12945. {
  12946. struct intel_encoder *encoder;
  12947. bool dpd_is_edp = false;
  12948. intel_pps_init(dev_priv);
  12949. /*
  12950. * intel_edp_init_connector() depends on this completing first, to
  12951. * prevent the registeration of both eDP and LVDS and the incorrect
  12952. * sharing of the PPS.
  12953. */
  12954. intel_lvds_init(dev_priv);
  12955. if (intel_crt_present(dev_priv))
  12956. intel_crt_init(dev_priv);
  12957. if (IS_BROXTON(dev_priv)) {
  12958. /*
  12959. * FIXME: Broxton doesn't support port detection via the
  12960. * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
  12961. * detect the ports.
  12962. */
  12963. intel_ddi_init(dev_priv, PORT_A);
  12964. intel_ddi_init(dev_priv, PORT_B);
  12965. intel_ddi_init(dev_priv, PORT_C);
  12966. intel_dsi_init(dev_priv);
  12967. } else if (HAS_DDI(dev_priv)) {
  12968. int found;
  12969. /*
  12970. * Haswell uses DDI functions to detect digital outputs.
  12971. * On SKL pre-D0 the strap isn't connected, so we assume
  12972. * it's there.
  12973. */
  12974. found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
  12975. /* WaIgnoreDDIAStrap: skl */
  12976. if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  12977. intel_ddi_init(dev_priv, PORT_A);
  12978. /* DDI B, C and D detection is indicated by the SFUSE_STRAP
  12979. * register */
  12980. found = I915_READ(SFUSE_STRAP);
  12981. if (found & SFUSE_STRAP_DDIB_DETECTED)
  12982. intel_ddi_init(dev_priv, PORT_B);
  12983. if (found & SFUSE_STRAP_DDIC_DETECTED)
  12984. intel_ddi_init(dev_priv, PORT_C);
  12985. if (found & SFUSE_STRAP_DDID_DETECTED)
  12986. intel_ddi_init(dev_priv, PORT_D);
  12987. /*
  12988. * On SKL we don't have a way to detect DDI-E so we rely on VBT.
  12989. */
  12990. if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
  12991. (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
  12992. dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
  12993. dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
  12994. intel_ddi_init(dev_priv, PORT_E);
  12995. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12996. int found;
  12997. dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
  12998. if (has_edp_a(dev_priv))
  12999. intel_dp_init(dev_priv, DP_A, PORT_A);
  13000. if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
  13001. /* PCH SDVOB multiplex with HDMIB */
  13002. found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
  13003. if (!found)
  13004. intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
  13005. if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
  13006. intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
  13007. }
  13008. if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
  13009. intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
  13010. if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
  13011. intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
  13012. if (I915_READ(PCH_DP_C) & DP_DETECTED)
  13013. intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
  13014. if (I915_READ(PCH_DP_D) & DP_DETECTED)
  13015. intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
  13016. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  13017. bool has_edp, has_port;
  13018. /*
  13019. * The DP_DETECTED bit is the latched state of the DDC
  13020. * SDA pin at boot. However since eDP doesn't require DDC
  13021. * (no way to plug in a DP->HDMI dongle) the DDC pins for
  13022. * eDP ports may have been muxed to an alternate function.
  13023. * Thus we can't rely on the DP_DETECTED bit alone to detect
  13024. * eDP ports. Consult the VBT as well as DP_DETECTED to
  13025. * detect eDP ports.
  13026. *
  13027. * Sadly the straps seem to be missing sometimes even for HDMI
  13028. * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
  13029. * and VBT for the presence of the port. Additionally we can't
  13030. * trust the port type the VBT declares as we've seen at least
  13031. * HDMI ports that the VBT claim are DP or eDP.
  13032. */
  13033. has_edp = intel_dp_is_edp(dev_priv, PORT_B);
  13034. has_port = intel_bios_is_port_present(dev_priv, PORT_B);
  13035. if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
  13036. has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
  13037. if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
  13038. intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
  13039. has_edp = intel_dp_is_edp(dev_priv, PORT_C);
  13040. has_port = intel_bios_is_port_present(dev_priv, PORT_C);
  13041. if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
  13042. has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
  13043. if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
  13044. intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
  13045. if (IS_CHERRYVIEW(dev_priv)) {
  13046. /*
  13047. * eDP not supported on port D,
  13048. * so no need to worry about it
  13049. */
  13050. has_port = intel_bios_is_port_present(dev_priv, PORT_D);
  13051. if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
  13052. intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
  13053. if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
  13054. intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
  13055. }
  13056. intel_dsi_init(dev_priv);
  13057. } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
  13058. bool found = false;
  13059. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  13060. DRM_DEBUG_KMS("probing SDVOB\n");
  13061. found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
  13062. if (!found && IS_G4X(dev_priv)) {
  13063. DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
  13064. intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
  13065. }
  13066. if (!found && IS_G4X(dev_priv))
  13067. intel_dp_init(dev_priv, DP_B, PORT_B);
  13068. }
  13069. /* Before G4X SDVOC doesn't have its own detect register */
  13070. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  13071. DRM_DEBUG_KMS("probing SDVOC\n");
  13072. found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
  13073. }
  13074. if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
  13075. if (IS_G4X(dev_priv)) {
  13076. DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
  13077. intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
  13078. }
  13079. if (IS_G4X(dev_priv))
  13080. intel_dp_init(dev_priv, DP_C, PORT_C);
  13081. }
  13082. if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
  13083. intel_dp_init(dev_priv, DP_D, PORT_D);
  13084. } else if (IS_GEN2(dev_priv))
  13085. intel_dvo_init(dev_priv);
  13086. if (SUPPORTS_TV(dev_priv))
  13087. intel_tv_init(dev_priv);
  13088. intel_psr_init(dev_priv);
  13089. for_each_intel_encoder(&dev_priv->drm, encoder) {
  13090. encoder->base.possible_crtcs = encoder->crtc_mask;
  13091. encoder->base.possible_clones =
  13092. intel_encoder_clones(encoder);
  13093. }
  13094. intel_init_pch_refclk(dev_priv);
  13095. drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
  13096. }
  13097. static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  13098. {
  13099. struct drm_device *dev = fb->dev;
  13100. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13101. drm_framebuffer_cleanup(fb);
  13102. mutex_lock(&dev->struct_mutex);
  13103. WARN_ON(!intel_fb->obj->framebuffer_references--);
  13104. i915_gem_object_put(intel_fb->obj);
  13105. mutex_unlock(&dev->struct_mutex);
  13106. kfree(intel_fb);
  13107. }
  13108. static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
  13109. struct drm_file *file,
  13110. unsigned int *handle)
  13111. {
  13112. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13113. struct drm_i915_gem_object *obj = intel_fb->obj;
  13114. if (obj->userptr.mm) {
  13115. DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
  13116. return -EINVAL;
  13117. }
  13118. return drm_gem_handle_create(file, &obj->base, handle);
  13119. }
  13120. static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
  13121. struct drm_file *file,
  13122. unsigned flags, unsigned color,
  13123. struct drm_clip_rect *clips,
  13124. unsigned num_clips)
  13125. {
  13126. struct drm_device *dev = fb->dev;
  13127. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13128. struct drm_i915_gem_object *obj = intel_fb->obj;
  13129. mutex_lock(&dev->struct_mutex);
  13130. if (obj->pin_display && obj->cache_dirty)
  13131. i915_gem_clflush_object(obj, true);
  13132. intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
  13133. mutex_unlock(&dev->struct_mutex);
  13134. return 0;
  13135. }
  13136. static const struct drm_framebuffer_funcs intel_fb_funcs = {
  13137. .destroy = intel_user_framebuffer_destroy,
  13138. .create_handle = intel_user_framebuffer_create_handle,
  13139. .dirty = intel_user_framebuffer_dirty,
  13140. };
  13141. static
  13142. u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
  13143. uint64_t fb_modifier, uint32_t pixel_format)
  13144. {
  13145. u32 gen = INTEL_INFO(dev_priv)->gen;
  13146. if (gen >= 9) {
  13147. int cpp = drm_format_plane_cpp(pixel_format, 0);
  13148. /* "The stride in bytes must not exceed the of the size of 8K
  13149. * pixels and 32K bytes."
  13150. */
  13151. return min(8192 * cpp, 32768);
  13152. } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
  13153. !IS_CHERRYVIEW(dev_priv)) {
  13154. return 32*1024;
  13155. } else if (gen >= 4) {
  13156. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  13157. return 16*1024;
  13158. else
  13159. return 32*1024;
  13160. } else if (gen >= 3) {
  13161. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  13162. return 8*1024;
  13163. else
  13164. return 16*1024;
  13165. } else {
  13166. /* XXX DSPC is limited to 4k tiled */
  13167. return 8*1024;
  13168. }
  13169. }
  13170. static int intel_framebuffer_init(struct drm_device *dev,
  13171. struct intel_framebuffer *intel_fb,
  13172. struct drm_mode_fb_cmd2 *mode_cmd,
  13173. struct drm_i915_gem_object *obj)
  13174. {
  13175. struct drm_i915_private *dev_priv = to_i915(dev);
  13176. unsigned int tiling = i915_gem_object_get_tiling(obj);
  13177. int ret;
  13178. u32 pitch_limit, stride_alignment;
  13179. struct drm_format_name_buf format_name;
  13180. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  13181. if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
  13182. /*
  13183. * If there's a fence, enforce that
  13184. * the fb modifier and tiling mode match.
  13185. */
  13186. if (tiling != I915_TILING_NONE &&
  13187. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  13188. DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
  13189. return -EINVAL;
  13190. }
  13191. } else {
  13192. if (tiling == I915_TILING_X) {
  13193. mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
  13194. } else if (tiling == I915_TILING_Y) {
  13195. DRM_DEBUG("No Y tiling for legacy addfb\n");
  13196. return -EINVAL;
  13197. }
  13198. }
  13199. /* Passed in modifier sanity checking. */
  13200. switch (mode_cmd->modifier[0]) {
  13201. case I915_FORMAT_MOD_Y_TILED:
  13202. case I915_FORMAT_MOD_Yf_TILED:
  13203. if (INTEL_GEN(dev_priv) < 9) {
  13204. DRM_DEBUG("Unsupported tiling 0x%llx!\n",
  13205. mode_cmd->modifier[0]);
  13206. return -EINVAL;
  13207. }
  13208. case DRM_FORMAT_MOD_NONE:
  13209. case I915_FORMAT_MOD_X_TILED:
  13210. break;
  13211. default:
  13212. DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
  13213. mode_cmd->modifier[0]);
  13214. return -EINVAL;
  13215. }
  13216. /*
  13217. * gen2/3 display engine uses the fence if present,
  13218. * so the tiling mode must match the fb modifier exactly.
  13219. */
  13220. if (INTEL_INFO(dev_priv)->gen < 4 &&
  13221. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  13222. DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
  13223. return -EINVAL;
  13224. }
  13225. stride_alignment = intel_fb_stride_alignment(dev_priv,
  13226. mode_cmd->modifier[0],
  13227. mode_cmd->pixel_format);
  13228. if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
  13229. DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
  13230. mode_cmd->pitches[0], stride_alignment);
  13231. return -EINVAL;
  13232. }
  13233. pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
  13234. mode_cmd->pixel_format);
  13235. if (mode_cmd->pitches[0] > pitch_limit) {
  13236. DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
  13237. mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
  13238. "tiled" : "linear",
  13239. mode_cmd->pitches[0], pitch_limit);
  13240. return -EINVAL;
  13241. }
  13242. /*
  13243. * If there's a fence, enforce that
  13244. * the fb pitch and fence stride match.
  13245. */
  13246. if (tiling != I915_TILING_NONE &&
  13247. mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
  13248. DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
  13249. mode_cmd->pitches[0],
  13250. i915_gem_object_get_stride(obj));
  13251. return -EINVAL;
  13252. }
  13253. /* Reject formats not supported by any plane early. */
  13254. switch (mode_cmd->pixel_format) {
  13255. case DRM_FORMAT_C8:
  13256. case DRM_FORMAT_RGB565:
  13257. case DRM_FORMAT_XRGB8888:
  13258. case DRM_FORMAT_ARGB8888:
  13259. break;
  13260. case DRM_FORMAT_XRGB1555:
  13261. if (INTEL_GEN(dev_priv) > 3) {
  13262. DRM_DEBUG("unsupported pixel format: %s\n",
  13263. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13264. return -EINVAL;
  13265. }
  13266. break;
  13267. case DRM_FORMAT_ABGR8888:
  13268. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  13269. INTEL_GEN(dev_priv) < 9) {
  13270. DRM_DEBUG("unsupported pixel format: %s\n",
  13271. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13272. return -EINVAL;
  13273. }
  13274. break;
  13275. case DRM_FORMAT_XBGR8888:
  13276. case DRM_FORMAT_XRGB2101010:
  13277. case DRM_FORMAT_XBGR2101010:
  13278. if (INTEL_GEN(dev_priv) < 4) {
  13279. DRM_DEBUG("unsupported pixel format: %s\n",
  13280. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13281. return -EINVAL;
  13282. }
  13283. break;
  13284. case DRM_FORMAT_ABGR2101010:
  13285. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  13286. DRM_DEBUG("unsupported pixel format: %s\n",
  13287. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13288. return -EINVAL;
  13289. }
  13290. break;
  13291. case DRM_FORMAT_YUYV:
  13292. case DRM_FORMAT_UYVY:
  13293. case DRM_FORMAT_YVYU:
  13294. case DRM_FORMAT_VYUY:
  13295. if (INTEL_GEN(dev_priv) < 5) {
  13296. DRM_DEBUG("unsupported pixel format: %s\n",
  13297. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13298. return -EINVAL;
  13299. }
  13300. break;
  13301. default:
  13302. DRM_DEBUG("unsupported pixel format: %s\n",
  13303. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13304. return -EINVAL;
  13305. }
  13306. /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
  13307. if (mode_cmd->offsets[0] != 0)
  13308. return -EINVAL;
  13309. drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
  13310. intel_fb->obj = obj;
  13311. ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
  13312. if (ret)
  13313. return ret;
  13314. ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
  13315. if (ret) {
  13316. DRM_ERROR("framebuffer init failed %d\n", ret);
  13317. return ret;
  13318. }
  13319. intel_fb->obj->framebuffer_references++;
  13320. return 0;
  13321. }
  13322. static struct drm_framebuffer *
  13323. intel_user_framebuffer_create(struct drm_device *dev,
  13324. struct drm_file *filp,
  13325. const struct drm_mode_fb_cmd2 *user_mode_cmd)
  13326. {
  13327. struct drm_framebuffer *fb;
  13328. struct drm_i915_gem_object *obj;
  13329. struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
  13330. obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
  13331. if (!obj)
  13332. return ERR_PTR(-ENOENT);
  13333. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  13334. if (IS_ERR(fb))
  13335. i915_gem_object_put(obj);
  13336. return fb;
  13337. }
  13338. static const struct drm_mode_config_funcs intel_mode_funcs = {
  13339. .fb_create = intel_user_framebuffer_create,
  13340. .output_poll_changed = intel_fbdev_output_poll_changed,
  13341. .atomic_check = intel_atomic_check,
  13342. .atomic_commit = intel_atomic_commit,
  13343. .atomic_state_alloc = intel_atomic_state_alloc,
  13344. .atomic_state_clear = intel_atomic_state_clear,
  13345. };
  13346. /**
  13347. * intel_init_display_hooks - initialize the display modesetting hooks
  13348. * @dev_priv: device private
  13349. */
  13350. void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  13351. {
  13352. if (INTEL_INFO(dev_priv)->gen >= 9) {
  13353. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  13354. dev_priv->display.get_initial_plane_config =
  13355. skylake_get_initial_plane_config;
  13356. dev_priv->display.crtc_compute_clock =
  13357. haswell_crtc_compute_clock;
  13358. dev_priv->display.crtc_enable = haswell_crtc_enable;
  13359. dev_priv->display.crtc_disable = haswell_crtc_disable;
  13360. } else if (HAS_DDI(dev_priv)) {
  13361. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  13362. dev_priv->display.get_initial_plane_config =
  13363. ironlake_get_initial_plane_config;
  13364. dev_priv->display.crtc_compute_clock =
  13365. haswell_crtc_compute_clock;
  13366. dev_priv->display.crtc_enable = haswell_crtc_enable;
  13367. dev_priv->display.crtc_disable = haswell_crtc_disable;
  13368. } else if (HAS_PCH_SPLIT(dev_priv)) {
  13369. dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
  13370. dev_priv->display.get_initial_plane_config =
  13371. ironlake_get_initial_plane_config;
  13372. dev_priv->display.crtc_compute_clock =
  13373. ironlake_crtc_compute_clock;
  13374. dev_priv->display.crtc_enable = ironlake_crtc_enable;
  13375. dev_priv->display.crtc_disable = ironlake_crtc_disable;
  13376. } else if (IS_CHERRYVIEW(dev_priv)) {
  13377. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13378. dev_priv->display.get_initial_plane_config =
  13379. i9xx_get_initial_plane_config;
  13380. dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
  13381. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  13382. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13383. } else if (IS_VALLEYVIEW(dev_priv)) {
  13384. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13385. dev_priv->display.get_initial_plane_config =
  13386. i9xx_get_initial_plane_config;
  13387. dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
  13388. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  13389. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13390. } else if (IS_G4X(dev_priv)) {
  13391. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13392. dev_priv->display.get_initial_plane_config =
  13393. i9xx_get_initial_plane_config;
  13394. dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
  13395. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13396. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13397. } else if (IS_PINEVIEW(dev_priv)) {
  13398. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13399. dev_priv->display.get_initial_plane_config =
  13400. i9xx_get_initial_plane_config;
  13401. dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
  13402. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13403. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13404. } else if (!IS_GEN2(dev_priv)) {
  13405. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13406. dev_priv->display.get_initial_plane_config =
  13407. i9xx_get_initial_plane_config;
  13408. dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
  13409. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13410. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13411. } else {
  13412. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13413. dev_priv->display.get_initial_plane_config =
  13414. i9xx_get_initial_plane_config;
  13415. dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
  13416. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13417. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13418. }
  13419. /* Returns the core display clock speed */
  13420. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  13421. dev_priv->display.get_display_clock_speed =
  13422. skylake_get_display_clock_speed;
  13423. else if (IS_BROXTON(dev_priv))
  13424. dev_priv->display.get_display_clock_speed =
  13425. broxton_get_display_clock_speed;
  13426. else if (IS_BROADWELL(dev_priv))
  13427. dev_priv->display.get_display_clock_speed =
  13428. broadwell_get_display_clock_speed;
  13429. else if (IS_HASWELL(dev_priv))
  13430. dev_priv->display.get_display_clock_speed =
  13431. haswell_get_display_clock_speed;
  13432. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  13433. dev_priv->display.get_display_clock_speed =
  13434. valleyview_get_display_clock_speed;
  13435. else if (IS_GEN5(dev_priv))
  13436. dev_priv->display.get_display_clock_speed =
  13437. ilk_get_display_clock_speed;
  13438. else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
  13439. IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
  13440. dev_priv->display.get_display_clock_speed =
  13441. i945_get_display_clock_speed;
  13442. else if (IS_GM45(dev_priv))
  13443. dev_priv->display.get_display_clock_speed =
  13444. gm45_get_display_clock_speed;
  13445. else if (IS_CRESTLINE(dev_priv))
  13446. dev_priv->display.get_display_clock_speed =
  13447. i965gm_get_display_clock_speed;
  13448. else if (IS_PINEVIEW(dev_priv))
  13449. dev_priv->display.get_display_clock_speed =
  13450. pnv_get_display_clock_speed;
  13451. else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
  13452. dev_priv->display.get_display_clock_speed =
  13453. g33_get_display_clock_speed;
  13454. else if (IS_I915G(dev_priv))
  13455. dev_priv->display.get_display_clock_speed =
  13456. i915_get_display_clock_speed;
  13457. else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
  13458. dev_priv->display.get_display_clock_speed =
  13459. i9xx_misc_get_display_clock_speed;
  13460. else if (IS_I915GM(dev_priv))
  13461. dev_priv->display.get_display_clock_speed =
  13462. i915gm_get_display_clock_speed;
  13463. else if (IS_I865G(dev_priv))
  13464. dev_priv->display.get_display_clock_speed =
  13465. i865_get_display_clock_speed;
  13466. else if (IS_I85X(dev_priv))
  13467. dev_priv->display.get_display_clock_speed =
  13468. i85x_get_display_clock_speed;
  13469. else { /* 830 */
  13470. WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
  13471. dev_priv->display.get_display_clock_speed =
  13472. i830_get_display_clock_speed;
  13473. }
  13474. if (IS_GEN5(dev_priv)) {
  13475. dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
  13476. } else if (IS_GEN6(dev_priv)) {
  13477. dev_priv->display.fdi_link_train = gen6_fdi_link_train;
  13478. } else if (IS_IVYBRIDGE(dev_priv)) {
  13479. /* FIXME: detect B0+ stepping and use auto training */
  13480. dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
  13481. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  13482. dev_priv->display.fdi_link_train = hsw_fdi_link_train;
  13483. }
  13484. if (IS_BROADWELL(dev_priv)) {
  13485. dev_priv->display.modeset_commit_cdclk =
  13486. broadwell_modeset_commit_cdclk;
  13487. dev_priv->display.modeset_calc_cdclk =
  13488. broadwell_modeset_calc_cdclk;
  13489. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  13490. dev_priv->display.modeset_commit_cdclk =
  13491. valleyview_modeset_commit_cdclk;
  13492. dev_priv->display.modeset_calc_cdclk =
  13493. valleyview_modeset_calc_cdclk;
  13494. } else if (IS_BROXTON(dev_priv)) {
  13495. dev_priv->display.modeset_commit_cdclk =
  13496. bxt_modeset_commit_cdclk;
  13497. dev_priv->display.modeset_calc_cdclk =
  13498. bxt_modeset_calc_cdclk;
  13499. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  13500. dev_priv->display.modeset_commit_cdclk =
  13501. skl_modeset_commit_cdclk;
  13502. dev_priv->display.modeset_calc_cdclk =
  13503. skl_modeset_calc_cdclk;
  13504. }
  13505. if (dev_priv->info.gen >= 9)
  13506. dev_priv->display.update_crtcs = skl_update_crtcs;
  13507. else
  13508. dev_priv->display.update_crtcs = intel_update_crtcs;
  13509. switch (INTEL_INFO(dev_priv)->gen) {
  13510. case 2:
  13511. dev_priv->display.queue_flip = intel_gen2_queue_flip;
  13512. break;
  13513. case 3:
  13514. dev_priv->display.queue_flip = intel_gen3_queue_flip;
  13515. break;
  13516. case 4:
  13517. case 5:
  13518. dev_priv->display.queue_flip = intel_gen4_queue_flip;
  13519. break;
  13520. case 6:
  13521. dev_priv->display.queue_flip = intel_gen6_queue_flip;
  13522. break;
  13523. case 7:
  13524. case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
  13525. dev_priv->display.queue_flip = intel_gen7_queue_flip;
  13526. break;
  13527. case 9:
  13528. /* Drop through - unsupported since execlist only. */
  13529. default:
  13530. /* Default just returns -ENODEV to indicate unsupported */
  13531. dev_priv->display.queue_flip = intel_default_queue_flip;
  13532. }
  13533. }
  13534. /*
  13535. * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
  13536. * resume, or other times. This quirk makes sure that's the case for
  13537. * affected systems.
  13538. */
  13539. static void quirk_pipea_force(struct drm_device *dev)
  13540. {
  13541. struct drm_i915_private *dev_priv = to_i915(dev);
  13542. dev_priv->quirks |= QUIRK_PIPEA_FORCE;
  13543. DRM_INFO("applying pipe a force quirk\n");
  13544. }
  13545. static void quirk_pipeb_force(struct drm_device *dev)
  13546. {
  13547. struct drm_i915_private *dev_priv = to_i915(dev);
  13548. dev_priv->quirks |= QUIRK_PIPEB_FORCE;
  13549. DRM_INFO("applying pipe b force quirk\n");
  13550. }
  13551. /*
  13552. * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
  13553. */
  13554. static void quirk_ssc_force_disable(struct drm_device *dev)
  13555. {
  13556. struct drm_i915_private *dev_priv = to_i915(dev);
  13557. dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
  13558. DRM_INFO("applying lvds SSC disable quirk\n");
  13559. }
  13560. /*
  13561. * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
  13562. * brightness value
  13563. */
  13564. static void quirk_invert_brightness(struct drm_device *dev)
  13565. {
  13566. struct drm_i915_private *dev_priv = to_i915(dev);
  13567. dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
  13568. DRM_INFO("applying inverted panel brightness quirk\n");
  13569. }
  13570. /* Some VBT's incorrectly indicate no backlight is present */
  13571. static void quirk_backlight_present(struct drm_device *dev)
  13572. {
  13573. struct drm_i915_private *dev_priv = to_i915(dev);
  13574. dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
  13575. DRM_INFO("applying backlight present quirk\n");
  13576. }
  13577. struct intel_quirk {
  13578. int device;
  13579. int subsystem_vendor;
  13580. int subsystem_device;
  13581. void (*hook)(struct drm_device *dev);
  13582. };
  13583. /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
  13584. struct intel_dmi_quirk {
  13585. void (*hook)(struct drm_device *dev);
  13586. const struct dmi_system_id (*dmi_id_list)[];
  13587. };
  13588. static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  13589. {
  13590. DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
  13591. return 1;
  13592. }
  13593. static const struct intel_dmi_quirk intel_dmi_quirks[] = {
  13594. {
  13595. .dmi_id_list = &(const struct dmi_system_id[]) {
  13596. {
  13597. .callback = intel_dmi_reverse_brightness,
  13598. .ident = "NCR Corporation",
  13599. .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
  13600. DMI_MATCH(DMI_PRODUCT_NAME, ""),
  13601. },
  13602. },
  13603. { } /* terminating entry */
  13604. },
  13605. .hook = quirk_invert_brightness,
  13606. },
  13607. };
  13608. static struct intel_quirk intel_quirks[] = {
  13609. /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
  13610. { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
  13611. /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
  13612. { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  13613. /* 830 needs to leave pipe A & dpll A up */
  13614. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  13615. /* 830 needs to leave pipe B & dpll B up */
  13616. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
  13617. /* Lenovo U160 cannot use SSC on LVDS */
  13618. { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
  13619. /* Sony Vaio Y cannot use SSC on LVDS */
  13620. { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
  13621. /* Acer Aspire 5734Z must invert backlight brightness */
  13622. { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
  13623. /* Acer/eMachines G725 */
  13624. { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
  13625. /* Acer/eMachines e725 */
  13626. { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
  13627. /* Acer/Packard Bell NCL20 */
  13628. { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
  13629. /* Acer Aspire 4736Z */
  13630. { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
  13631. /* Acer Aspire 5336 */
  13632. { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
  13633. /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
  13634. { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
  13635. /* Acer C720 Chromebook (Core i3 4005U) */
  13636. { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
  13637. /* Apple Macbook 2,1 (Core 2 T7400) */
  13638. { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
  13639. /* Apple Macbook 4,1 */
  13640. { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
  13641. /* Toshiba CB35 Chromebook (Celeron 2955U) */
  13642. { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
  13643. /* HP Chromebook 14 (Celeron 2955U) */
  13644. { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
  13645. /* Dell Chromebook 11 */
  13646. { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
  13647. /* Dell Chromebook 11 (2015 version) */
  13648. { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
  13649. };
  13650. static void intel_init_quirks(struct drm_device *dev)
  13651. {
  13652. struct pci_dev *d = dev->pdev;
  13653. int i;
  13654. for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
  13655. struct intel_quirk *q = &intel_quirks[i];
  13656. if (d->device == q->device &&
  13657. (d->subsystem_vendor == q->subsystem_vendor ||
  13658. q->subsystem_vendor == PCI_ANY_ID) &&
  13659. (d->subsystem_device == q->subsystem_device ||
  13660. q->subsystem_device == PCI_ANY_ID))
  13661. q->hook(dev);
  13662. }
  13663. for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
  13664. if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
  13665. intel_dmi_quirks[i].hook(dev);
  13666. }
  13667. }
  13668. /* Disable the VGA plane that we never use */
  13669. static void i915_disable_vga(struct drm_i915_private *dev_priv)
  13670. {
  13671. struct pci_dev *pdev = dev_priv->drm.pdev;
  13672. u8 sr1;
  13673. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  13674. /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
  13675. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  13676. outb(SR01, VGA_SR_INDEX);
  13677. sr1 = inb(VGA_SR_DATA);
  13678. outb(sr1 | 1<<5, VGA_SR_DATA);
  13679. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  13680. udelay(300);
  13681. I915_WRITE(vga_reg, VGA_DISP_DISABLE);
  13682. POSTING_READ(vga_reg);
  13683. }
  13684. void intel_modeset_init_hw(struct drm_device *dev)
  13685. {
  13686. struct drm_i915_private *dev_priv = to_i915(dev);
  13687. intel_update_cdclk(dev_priv);
  13688. dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
  13689. intel_init_clock_gating(dev_priv);
  13690. }
  13691. /*
  13692. * Calculate what we think the watermarks should be for the state we've read
  13693. * out of the hardware and then immediately program those watermarks so that
  13694. * we ensure the hardware settings match our internal state.
  13695. *
  13696. * We can calculate what we think WM's should be by creating a duplicate of the
  13697. * current state (which was constructed during hardware readout) and running it
  13698. * through the atomic check code to calculate new watermark values in the
  13699. * state object.
  13700. */
  13701. static void sanitize_watermarks(struct drm_device *dev)
  13702. {
  13703. struct drm_i915_private *dev_priv = to_i915(dev);
  13704. struct drm_atomic_state *state;
  13705. struct intel_atomic_state *intel_state;
  13706. struct drm_crtc *crtc;
  13707. struct drm_crtc_state *cstate;
  13708. struct drm_modeset_acquire_ctx ctx;
  13709. int ret;
  13710. int i;
  13711. /* Only supported on platforms that use atomic watermark design */
  13712. if (!dev_priv->display.optimize_watermarks)
  13713. return;
  13714. /*
  13715. * We need to hold connection_mutex before calling duplicate_state so
  13716. * that the connector loop is protected.
  13717. */
  13718. drm_modeset_acquire_init(&ctx, 0);
  13719. retry:
  13720. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  13721. if (ret == -EDEADLK) {
  13722. drm_modeset_backoff(&ctx);
  13723. goto retry;
  13724. } else if (WARN_ON(ret)) {
  13725. goto fail;
  13726. }
  13727. state = drm_atomic_helper_duplicate_state(dev, &ctx);
  13728. if (WARN_ON(IS_ERR(state)))
  13729. goto fail;
  13730. intel_state = to_intel_atomic_state(state);
  13731. /*
  13732. * Hardware readout is the only time we don't want to calculate
  13733. * intermediate watermarks (since we don't trust the current
  13734. * watermarks).
  13735. */
  13736. intel_state->skip_intermediate_wm = true;
  13737. ret = intel_atomic_check(dev, state);
  13738. if (ret) {
  13739. /*
  13740. * If we fail here, it means that the hardware appears to be
  13741. * programmed in a way that shouldn't be possible, given our
  13742. * understanding of watermark requirements. This might mean a
  13743. * mistake in the hardware readout code or a mistake in the
  13744. * watermark calculations for a given platform. Raise a WARN
  13745. * so that this is noticeable.
  13746. *
  13747. * If this actually happens, we'll have to just leave the
  13748. * BIOS-programmed watermarks untouched and hope for the best.
  13749. */
  13750. WARN(true, "Could not determine valid watermarks for inherited state\n");
  13751. goto put_state;
  13752. }
  13753. /* Write calculated watermark values back */
  13754. for_each_crtc_in_state(state, crtc, cstate, i) {
  13755. struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
  13756. cs->wm.need_postvbl_update = true;
  13757. dev_priv->display.optimize_watermarks(intel_state, cs);
  13758. }
  13759. put_state:
  13760. drm_atomic_state_put(state);
  13761. fail:
  13762. drm_modeset_drop_locks(&ctx);
  13763. drm_modeset_acquire_fini(&ctx);
  13764. }
  13765. int intel_modeset_init(struct drm_device *dev)
  13766. {
  13767. struct drm_i915_private *dev_priv = to_i915(dev);
  13768. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  13769. enum pipe pipe;
  13770. struct intel_crtc *crtc;
  13771. drm_mode_config_init(dev);
  13772. dev->mode_config.min_width = 0;
  13773. dev->mode_config.min_height = 0;
  13774. dev->mode_config.preferred_depth = 24;
  13775. dev->mode_config.prefer_shadow = 1;
  13776. dev->mode_config.allow_fb_modifiers = true;
  13777. dev->mode_config.funcs = &intel_mode_funcs;
  13778. intel_init_quirks(dev);
  13779. intel_init_pm(dev_priv);
  13780. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  13781. return 0;
  13782. /*
  13783. * There may be no VBT; and if the BIOS enabled SSC we can
  13784. * just keep using it to avoid unnecessary flicker. Whereas if the
  13785. * BIOS isn't using it, don't assume it will work even if the VBT
  13786. * indicates as much.
  13787. */
  13788. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
  13789. bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
  13790. DREF_SSC1_ENABLE);
  13791. if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
  13792. DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
  13793. bios_lvds_use_ssc ? "en" : "dis",
  13794. dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
  13795. dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
  13796. }
  13797. }
  13798. if (IS_GEN2(dev_priv)) {
  13799. dev->mode_config.max_width = 2048;
  13800. dev->mode_config.max_height = 2048;
  13801. } else if (IS_GEN3(dev_priv)) {
  13802. dev->mode_config.max_width = 4096;
  13803. dev->mode_config.max_height = 4096;
  13804. } else {
  13805. dev->mode_config.max_width = 8192;
  13806. dev->mode_config.max_height = 8192;
  13807. }
  13808. if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
  13809. dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
  13810. dev->mode_config.cursor_height = 1023;
  13811. } else if (IS_GEN2(dev_priv)) {
  13812. dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
  13813. dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
  13814. } else {
  13815. dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
  13816. dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
  13817. }
  13818. dev->mode_config.fb_base = ggtt->mappable_base;
  13819. DRM_DEBUG_KMS("%d display pipe%s available.\n",
  13820. INTEL_INFO(dev_priv)->num_pipes,
  13821. INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
  13822. for_each_pipe(dev_priv, pipe) {
  13823. int ret;
  13824. ret = intel_crtc_init(dev_priv, pipe);
  13825. if (ret) {
  13826. drm_mode_config_cleanup(dev);
  13827. return ret;
  13828. }
  13829. }
  13830. intel_update_czclk(dev_priv);
  13831. intel_update_cdclk(dev_priv);
  13832. dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
  13833. intel_shared_dpll_init(dev);
  13834. if (dev_priv->max_cdclk_freq == 0)
  13835. intel_update_max_cdclk(dev_priv);
  13836. /* Just disable it once at startup */
  13837. i915_disable_vga(dev_priv);
  13838. intel_setup_outputs(dev_priv);
  13839. drm_modeset_lock_all(dev);
  13840. intel_modeset_setup_hw_state(dev);
  13841. drm_modeset_unlock_all(dev);
  13842. for_each_intel_crtc(dev, crtc) {
  13843. struct intel_initial_plane_config plane_config = {};
  13844. if (!crtc->active)
  13845. continue;
  13846. /*
  13847. * Note that reserving the BIOS fb up front prevents us
  13848. * from stuffing other stolen allocations like the ring
  13849. * on top. This prevents some ugliness at boot time, and
  13850. * can even allow for smooth boot transitions if the BIOS
  13851. * fb is large enough for the active pipe configuration.
  13852. */
  13853. dev_priv->display.get_initial_plane_config(crtc,
  13854. &plane_config);
  13855. /*
  13856. * If the fb is shared between multiple heads, we'll
  13857. * just get the first one.
  13858. */
  13859. intel_find_initial_plane_obj(crtc, &plane_config);
  13860. }
  13861. /*
  13862. * Make sure hardware watermarks really match the state we read out.
  13863. * Note that we need to do this after reconstructing the BIOS fb's
  13864. * since the watermark calculation done here will use pstate->fb.
  13865. */
  13866. sanitize_watermarks(dev);
  13867. return 0;
  13868. }
  13869. static void intel_enable_pipe_a(struct drm_device *dev)
  13870. {
  13871. struct intel_connector *connector;
  13872. struct drm_connector *crt = NULL;
  13873. struct intel_load_detect_pipe load_detect_temp;
  13874. struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
  13875. /* We can't just switch on the pipe A, we need to set things up with a
  13876. * proper mode and output configuration. As a gross hack, enable pipe A
  13877. * by enabling the load detect pipe once. */
  13878. for_each_intel_connector(dev, connector) {
  13879. if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
  13880. crt = &connector->base;
  13881. break;
  13882. }
  13883. }
  13884. if (!crt)
  13885. return;
  13886. if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
  13887. intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
  13888. }
  13889. static bool
  13890. intel_check_plane_mapping(struct intel_crtc *crtc)
  13891. {
  13892. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  13893. u32 val;
  13894. if (INTEL_INFO(dev_priv)->num_pipes == 1)
  13895. return true;
  13896. val = I915_READ(DSPCNTR(!crtc->plane));
  13897. if ((val & DISPLAY_PLANE_ENABLE) &&
  13898. (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
  13899. return false;
  13900. return true;
  13901. }
  13902. static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
  13903. {
  13904. struct drm_device *dev = crtc->base.dev;
  13905. struct intel_encoder *encoder;
  13906. for_each_encoder_on_crtc(dev, &crtc->base, encoder)
  13907. return true;
  13908. return false;
  13909. }
  13910. static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
  13911. {
  13912. struct drm_device *dev = encoder->base.dev;
  13913. struct intel_connector *connector;
  13914. for_each_connector_on_encoder(dev, &encoder->base, connector)
  13915. return connector;
  13916. return NULL;
  13917. }
  13918. static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
  13919. enum transcoder pch_transcoder)
  13920. {
  13921. return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
  13922. (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
  13923. }
  13924. static void intel_sanitize_crtc(struct intel_crtc *crtc)
  13925. {
  13926. struct drm_device *dev = crtc->base.dev;
  13927. struct drm_i915_private *dev_priv = to_i915(dev);
  13928. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  13929. /* Clear any frame start delays used for debugging left by the BIOS */
  13930. if (!transcoder_is_dsi(cpu_transcoder)) {
  13931. i915_reg_t reg = PIPECONF(cpu_transcoder);
  13932. I915_WRITE(reg,
  13933. I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
  13934. }
  13935. /* restore vblank interrupts to correct state */
  13936. drm_crtc_vblank_reset(&crtc->base);
  13937. if (crtc->active) {
  13938. struct intel_plane *plane;
  13939. drm_crtc_vblank_on(&crtc->base);
  13940. /* Disable everything but the primary plane */
  13941. for_each_intel_plane_on_crtc(dev, crtc, plane) {
  13942. if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
  13943. continue;
  13944. plane->disable_plane(&plane->base, &crtc->base);
  13945. }
  13946. }
  13947. /* We need to sanitize the plane -> pipe mapping first because this will
  13948. * disable the crtc (and hence change the state) if it is wrong. Note
  13949. * that gen4+ has a fixed plane -> pipe mapping. */
  13950. if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
  13951. bool plane;
  13952. DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
  13953. crtc->base.base.id, crtc->base.name);
  13954. /* Pipe has the wrong plane attached and the plane is active.
  13955. * Temporarily change the plane mapping and disable everything
  13956. * ... */
  13957. plane = crtc->plane;
  13958. to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
  13959. crtc->plane = !plane;
  13960. intel_crtc_disable_noatomic(&crtc->base);
  13961. crtc->plane = plane;
  13962. }
  13963. if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
  13964. crtc->pipe == PIPE_A && !crtc->active) {
  13965. /* BIOS forgot to enable pipe A, this mostly happens after
  13966. * resume. Force-enable the pipe to fix this, the update_dpms
  13967. * call below we restore the pipe to the right state, but leave
  13968. * the required bits on. */
  13969. intel_enable_pipe_a(dev);
  13970. }
  13971. /* Adjust the state of the output pipe according to whether we
  13972. * have active connectors/encoders. */
  13973. if (crtc->active && !intel_crtc_has_encoders(crtc))
  13974. intel_crtc_disable_noatomic(&crtc->base);
  13975. if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
  13976. /*
  13977. * We start out with underrun reporting disabled to avoid races.
  13978. * For correct bookkeeping mark this on active crtcs.
  13979. *
  13980. * Also on gmch platforms we dont have any hardware bits to
  13981. * disable the underrun reporting. Which means we need to start
  13982. * out with underrun reporting disabled also on inactive pipes,
  13983. * since otherwise we'll complain about the garbage we read when
  13984. * e.g. coming up after runtime pm.
  13985. *
  13986. * No protection against concurrent access is required - at
  13987. * worst a fifo underrun happens which also sets this to false.
  13988. */
  13989. crtc->cpu_fifo_underrun_disabled = true;
  13990. /*
  13991. * We track the PCH trancoder underrun reporting state
  13992. * within the crtc. With crtc for pipe A housing the underrun
  13993. * reporting state for PCH transcoder A, crtc for pipe B housing
  13994. * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
  13995. * and marking underrun reporting as disabled for the non-existing
  13996. * PCH transcoders B and C would prevent enabling the south
  13997. * error interrupt (see cpt_can_enable_serr_int()).
  13998. */
  13999. if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
  14000. crtc->pch_fifo_underrun_disabled = true;
  14001. }
  14002. }
  14003. static void intel_sanitize_encoder(struct intel_encoder *encoder)
  14004. {
  14005. struct intel_connector *connector;
  14006. /* We need to check both for a crtc link (meaning that the
  14007. * encoder is active and trying to read from a pipe) and the
  14008. * pipe itself being active. */
  14009. bool has_active_crtc = encoder->base.crtc &&
  14010. to_intel_crtc(encoder->base.crtc)->active;
  14011. connector = intel_encoder_find_connector(encoder);
  14012. if (connector && !has_active_crtc) {
  14013. DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
  14014. encoder->base.base.id,
  14015. encoder->base.name);
  14016. /* Connector is active, but has no active pipe. This is
  14017. * fallout from our resume register restoring. Disable
  14018. * the encoder manually again. */
  14019. if (encoder->base.crtc) {
  14020. struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
  14021. DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
  14022. encoder->base.base.id,
  14023. encoder->base.name);
  14024. encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  14025. if (encoder->post_disable)
  14026. encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  14027. }
  14028. encoder->base.crtc = NULL;
  14029. /* Inconsistent output/port/pipe state happens presumably due to
  14030. * a bug in one of the get_hw_state functions. Or someplace else
  14031. * in our code, like the register restore mess on resume. Clamp
  14032. * things to off as a safer default. */
  14033. connector->base.dpms = DRM_MODE_DPMS_OFF;
  14034. connector->base.encoder = NULL;
  14035. }
  14036. /* Enabled encoders without active connectors will be fixed in
  14037. * the crtc fixup. */
  14038. }
  14039. void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
  14040. {
  14041. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  14042. if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
  14043. DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
  14044. i915_disable_vga(dev_priv);
  14045. }
  14046. }
  14047. void i915_redisable_vga(struct drm_i915_private *dev_priv)
  14048. {
  14049. /* This function can be called both from intel_modeset_setup_hw_state or
  14050. * at a very early point in our resume sequence, where the power well
  14051. * structures are not yet restored. Since this function is at a very
  14052. * paranoid "someone might have enabled VGA while we were not looking"
  14053. * level, just check if the power well is enabled instead of trying to
  14054. * follow the "don't touch the power well if we don't need it" policy
  14055. * the rest of the driver uses. */
  14056. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
  14057. return;
  14058. i915_redisable_vga_power_on(dev_priv);
  14059. intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
  14060. }
  14061. static bool primary_get_hw_state(struct intel_plane *plane)
  14062. {
  14063. struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
  14064. return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
  14065. }
  14066. /* FIXME read out full plane state for all planes */
  14067. static void readout_plane_state(struct intel_crtc *crtc)
  14068. {
  14069. struct drm_plane *primary = crtc->base.primary;
  14070. struct intel_plane_state *plane_state =
  14071. to_intel_plane_state(primary->state);
  14072. plane_state->base.visible = crtc->active &&
  14073. primary_get_hw_state(to_intel_plane(primary));
  14074. if (plane_state->base.visible)
  14075. crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
  14076. }
  14077. static void intel_modeset_readout_hw_state(struct drm_device *dev)
  14078. {
  14079. struct drm_i915_private *dev_priv = to_i915(dev);
  14080. enum pipe pipe;
  14081. struct intel_crtc *crtc;
  14082. struct intel_encoder *encoder;
  14083. struct intel_connector *connector;
  14084. int i;
  14085. dev_priv->active_crtcs = 0;
  14086. for_each_intel_crtc(dev, crtc) {
  14087. struct intel_crtc_state *crtc_state = crtc->config;
  14088. int pixclk = 0;
  14089. __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
  14090. memset(crtc_state, 0, sizeof(*crtc_state));
  14091. crtc_state->base.crtc = &crtc->base;
  14092. crtc_state->base.active = crtc_state->base.enable =
  14093. dev_priv->display.get_pipe_config(crtc, crtc_state);
  14094. crtc->base.enabled = crtc_state->base.enable;
  14095. crtc->active = crtc_state->base.active;
  14096. if (crtc_state->base.active) {
  14097. dev_priv->active_crtcs |= 1 << crtc->pipe;
  14098. if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
  14099. pixclk = ilk_pipe_pixel_rate(crtc_state);
  14100. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  14101. pixclk = crtc_state->base.adjusted_mode.crtc_clock;
  14102. else
  14103. WARN_ON(dev_priv->display.modeset_calc_cdclk);
  14104. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  14105. if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
  14106. pixclk = DIV_ROUND_UP(pixclk * 100, 95);
  14107. }
  14108. dev_priv->min_pixclk[crtc->pipe] = pixclk;
  14109. readout_plane_state(crtc);
  14110. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
  14111. crtc->base.base.id, crtc->base.name,
  14112. enableddisabled(crtc->active));
  14113. }
  14114. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  14115. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  14116. pll->on = pll->funcs.get_hw_state(dev_priv, pll,
  14117. &pll->config.hw_state);
  14118. pll->config.crtc_mask = 0;
  14119. for_each_intel_crtc(dev, crtc) {
  14120. if (crtc->active && crtc->config->shared_dpll == pll)
  14121. pll->config.crtc_mask |= 1 << crtc->pipe;
  14122. }
  14123. pll->active_mask = pll->config.crtc_mask;
  14124. DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
  14125. pll->name, pll->config.crtc_mask, pll->on);
  14126. }
  14127. for_each_intel_encoder(dev, encoder) {
  14128. pipe = 0;
  14129. if (encoder->get_hw_state(encoder, &pipe)) {
  14130. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  14131. encoder->base.crtc = &crtc->base;
  14132. crtc->config->output_types |= 1 << encoder->type;
  14133. encoder->get_config(encoder, crtc->config);
  14134. } else {
  14135. encoder->base.crtc = NULL;
  14136. }
  14137. DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
  14138. encoder->base.base.id, encoder->base.name,
  14139. enableddisabled(encoder->base.crtc),
  14140. pipe_name(pipe));
  14141. }
  14142. for_each_intel_connector(dev, connector) {
  14143. if (connector->get_hw_state(connector)) {
  14144. connector->base.dpms = DRM_MODE_DPMS_ON;
  14145. encoder = connector->encoder;
  14146. connector->base.encoder = &encoder->base;
  14147. if (encoder->base.crtc &&
  14148. encoder->base.crtc->state->active) {
  14149. /*
  14150. * This has to be done during hardware readout
  14151. * because anything calling .crtc_disable may
  14152. * rely on the connector_mask being accurate.
  14153. */
  14154. encoder->base.crtc->state->connector_mask |=
  14155. 1 << drm_connector_index(&connector->base);
  14156. encoder->base.crtc->state->encoder_mask |=
  14157. 1 << drm_encoder_index(&encoder->base);
  14158. }
  14159. } else {
  14160. connector->base.dpms = DRM_MODE_DPMS_OFF;
  14161. connector->base.encoder = NULL;
  14162. }
  14163. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
  14164. connector->base.base.id, connector->base.name,
  14165. enableddisabled(connector->base.encoder));
  14166. }
  14167. for_each_intel_crtc(dev, crtc) {
  14168. crtc->base.hwmode = crtc->config->base.adjusted_mode;
  14169. memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
  14170. if (crtc->base.state->active) {
  14171. intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
  14172. intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
  14173. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
  14174. /*
  14175. * The initial mode needs to be set in order to keep
  14176. * the atomic core happy. It wants a valid mode if the
  14177. * crtc's enabled, so we do the above call.
  14178. *
  14179. * At this point some state updated by the connectors
  14180. * in their ->detect() callback has not run yet, so
  14181. * no recalculation can be done yet.
  14182. *
  14183. * Even if we could do a recalculation and modeset
  14184. * right now it would cause a double modeset if
  14185. * fbdev or userspace chooses a different initial mode.
  14186. *
  14187. * If that happens, someone indicated they wanted a
  14188. * mode change, which means it's safe to do a full
  14189. * recalculation.
  14190. */
  14191. crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
  14192. drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
  14193. update_scanline_offset(crtc);
  14194. }
  14195. intel_pipe_config_sanity_check(dev_priv, crtc->config);
  14196. }
  14197. }
  14198. /* Scan out the current hw modeset state,
  14199. * and sanitizes it to the current state
  14200. */
  14201. static void
  14202. intel_modeset_setup_hw_state(struct drm_device *dev)
  14203. {
  14204. struct drm_i915_private *dev_priv = to_i915(dev);
  14205. enum pipe pipe;
  14206. struct intel_crtc *crtc;
  14207. struct intel_encoder *encoder;
  14208. int i;
  14209. intel_modeset_readout_hw_state(dev);
  14210. /* HW state is read out, now we need to sanitize this mess. */
  14211. for_each_intel_encoder(dev, encoder) {
  14212. intel_sanitize_encoder(encoder);
  14213. }
  14214. for_each_pipe(dev_priv, pipe) {
  14215. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  14216. intel_sanitize_crtc(crtc);
  14217. intel_dump_pipe_config(crtc, crtc->config,
  14218. "[setup_hw_state]");
  14219. }
  14220. intel_modeset_update_connector_atomic_state(dev);
  14221. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  14222. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  14223. if (!pll->on || pll->active_mask)
  14224. continue;
  14225. DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
  14226. pll->funcs.disable(dev_priv, pll);
  14227. pll->on = false;
  14228. }
  14229. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  14230. vlv_wm_get_hw_state(dev);
  14231. else if (IS_GEN9(dev_priv))
  14232. skl_wm_get_hw_state(dev);
  14233. else if (HAS_PCH_SPLIT(dev_priv))
  14234. ilk_wm_get_hw_state(dev);
  14235. for_each_intel_crtc(dev, crtc) {
  14236. unsigned long put_domains;
  14237. put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
  14238. if (WARN_ON(put_domains))
  14239. modeset_put_power_domains(dev_priv, put_domains);
  14240. }
  14241. intel_display_set_init_power(dev_priv, false);
  14242. intel_fbc_init_pipe_state(dev_priv);
  14243. }
  14244. void intel_display_resume(struct drm_device *dev)
  14245. {
  14246. struct drm_i915_private *dev_priv = to_i915(dev);
  14247. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  14248. struct drm_modeset_acquire_ctx ctx;
  14249. int ret;
  14250. dev_priv->modeset_restore_state = NULL;
  14251. if (state)
  14252. state->acquire_ctx = &ctx;
  14253. /*
  14254. * This is a cludge because with real atomic modeset mode_config.mutex
  14255. * won't be taken. Unfortunately some probed state like
  14256. * audio_codec_enable is still protected by mode_config.mutex, so lock
  14257. * it here for now.
  14258. */
  14259. mutex_lock(&dev->mode_config.mutex);
  14260. drm_modeset_acquire_init(&ctx, 0);
  14261. while (1) {
  14262. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  14263. if (ret != -EDEADLK)
  14264. break;
  14265. drm_modeset_backoff(&ctx);
  14266. }
  14267. if (!ret)
  14268. ret = __intel_display_resume(dev, state);
  14269. drm_modeset_drop_locks(&ctx);
  14270. drm_modeset_acquire_fini(&ctx);
  14271. mutex_unlock(&dev->mode_config.mutex);
  14272. if (ret)
  14273. DRM_ERROR("Restoring old state failed with %i\n", ret);
  14274. drm_atomic_state_put(state);
  14275. }
  14276. void intel_modeset_gem_init(struct drm_device *dev)
  14277. {
  14278. struct drm_i915_private *dev_priv = to_i915(dev);
  14279. struct drm_crtc *c;
  14280. struct drm_i915_gem_object *obj;
  14281. intel_init_gt_powersave(dev_priv);
  14282. intel_modeset_init_hw(dev);
  14283. intel_setup_overlay(dev_priv);
  14284. /*
  14285. * Make sure any fbs we allocated at startup are properly
  14286. * pinned & fenced. When we do the allocation it's too early
  14287. * for this.
  14288. */
  14289. for_each_crtc(dev, c) {
  14290. struct i915_vma *vma;
  14291. obj = intel_fb_obj(c->primary->fb);
  14292. if (obj == NULL)
  14293. continue;
  14294. mutex_lock(&dev->struct_mutex);
  14295. vma = intel_pin_and_fence_fb_obj(c->primary->fb,
  14296. c->primary->state->rotation);
  14297. mutex_unlock(&dev->struct_mutex);
  14298. if (IS_ERR(vma)) {
  14299. DRM_ERROR("failed to pin boot fb on pipe %d\n",
  14300. to_intel_crtc(c)->pipe);
  14301. drm_framebuffer_unreference(c->primary->fb);
  14302. c->primary->fb = NULL;
  14303. c->primary->crtc = c->primary->state->crtc = NULL;
  14304. update_state_fb(c->primary);
  14305. c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
  14306. }
  14307. }
  14308. }
  14309. int intel_connector_register(struct drm_connector *connector)
  14310. {
  14311. struct intel_connector *intel_connector = to_intel_connector(connector);
  14312. int ret;
  14313. ret = intel_backlight_device_register(intel_connector);
  14314. if (ret)
  14315. goto err;
  14316. return 0;
  14317. err:
  14318. return ret;
  14319. }
  14320. void intel_connector_unregister(struct drm_connector *connector)
  14321. {
  14322. struct intel_connector *intel_connector = to_intel_connector(connector);
  14323. intel_backlight_device_unregister(intel_connector);
  14324. intel_panel_destroy_backlight(connector);
  14325. }
  14326. void intel_modeset_cleanup(struct drm_device *dev)
  14327. {
  14328. struct drm_i915_private *dev_priv = to_i915(dev);
  14329. intel_disable_gt_powersave(dev_priv);
  14330. /*
  14331. * Interrupts and polling as the first thing to avoid creating havoc.
  14332. * Too much stuff here (turning of connectors, ...) would
  14333. * experience fancy races otherwise.
  14334. */
  14335. intel_irq_uninstall(dev_priv);
  14336. /*
  14337. * Due to the hpd irq storm handling the hotplug work can re-arm the
  14338. * poll handlers. Hence disable polling after hpd handling is shut down.
  14339. */
  14340. drm_kms_helper_poll_fini(dev);
  14341. intel_unregister_dsm_handler();
  14342. intel_fbc_global_disable(dev_priv);
  14343. /* flush any delayed tasks or pending work */
  14344. flush_scheduled_work();
  14345. drm_mode_config_cleanup(dev);
  14346. intel_cleanup_overlay(dev_priv);
  14347. intel_cleanup_gt_powersave(dev_priv);
  14348. intel_teardown_gmbus(dev);
  14349. }
  14350. void intel_connector_attach_encoder(struct intel_connector *connector,
  14351. struct intel_encoder *encoder)
  14352. {
  14353. connector->encoder = encoder;
  14354. drm_mode_connector_attach_encoder(&connector->base,
  14355. &encoder->base);
  14356. }
  14357. /*
  14358. * set vga decode state - true == enable VGA decode
  14359. */
  14360. int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
  14361. {
  14362. unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
  14363. u16 gmch_ctrl;
  14364. if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
  14365. DRM_ERROR("failed to read control word\n");
  14366. return -EIO;
  14367. }
  14368. if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
  14369. return 0;
  14370. if (state)
  14371. gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
  14372. else
  14373. gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
  14374. if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
  14375. DRM_ERROR("failed to write control word\n");
  14376. return -EIO;
  14377. }
  14378. return 0;
  14379. }
  14380. #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
  14381. struct intel_display_error_state {
  14382. u32 power_well_driver;
  14383. int num_transcoders;
  14384. struct intel_cursor_error_state {
  14385. u32 control;
  14386. u32 position;
  14387. u32 base;
  14388. u32 size;
  14389. } cursor[I915_MAX_PIPES];
  14390. struct intel_pipe_error_state {
  14391. bool power_domain_on;
  14392. u32 source;
  14393. u32 stat;
  14394. } pipe[I915_MAX_PIPES];
  14395. struct intel_plane_error_state {
  14396. u32 control;
  14397. u32 stride;
  14398. u32 size;
  14399. u32 pos;
  14400. u32 addr;
  14401. u32 surface;
  14402. u32 tile_offset;
  14403. } plane[I915_MAX_PIPES];
  14404. struct intel_transcoder_error_state {
  14405. bool power_domain_on;
  14406. enum transcoder cpu_transcoder;
  14407. u32 conf;
  14408. u32 htotal;
  14409. u32 hblank;
  14410. u32 hsync;
  14411. u32 vtotal;
  14412. u32 vblank;
  14413. u32 vsync;
  14414. } transcoder[4];
  14415. };
  14416. struct intel_display_error_state *
  14417. intel_display_capture_error_state(struct drm_i915_private *dev_priv)
  14418. {
  14419. struct intel_display_error_state *error;
  14420. int transcoders[] = {
  14421. TRANSCODER_A,
  14422. TRANSCODER_B,
  14423. TRANSCODER_C,
  14424. TRANSCODER_EDP,
  14425. };
  14426. int i;
  14427. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  14428. return NULL;
  14429. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  14430. if (error == NULL)
  14431. return NULL;
  14432. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  14433. error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  14434. for_each_pipe(dev_priv, i) {
  14435. error->pipe[i].power_domain_on =
  14436. __intel_display_power_is_enabled(dev_priv,
  14437. POWER_DOMAIN_PIPE(i));
  14438. if (!error->pipe[i].power_domain_on)
  14439. continue;
  14440. error->cursor[i].control = I915_READ(CURCNTR(i));
  14441. error->cursor[i].position = I915_READ(CURPOS(i));
  14442. error->cursor[i].base = I915_READ(CURBASE(i));
  14443. error->plane[i].control = I915_READ(DSPCNTR(i));
  14444. error->plane[i].stride = I915_READ(DSPSTRIDE(i));
  14445. if (INTEL_GEN(dev_priv) <= 3) {
  14446. error->plane[i].size = I915_READ(DSPSIZE(i));
  14447. error->plane[i].pos = I915_READ(DSPPOS(i));
  14448. }
  14449. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  14450. error->plane[i].addr = I915_READ(DSPADDR(i));
  14451. if (INTEL_GEN(dev_priv) >= 4) {
  14452. error->plane[i].surface = I915_READ(DSPSURF(i));
  14453. error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
  14454. }
  14455. error->pipe[i].source = I915_READ(PIPESRC(i));
  14456. if (HAS_GMCH_DISPLAY(dev_priv))
  14457. error->pipe[i].stat = I915_READ(PIPESTAT(i));
  14458. }
  14459. /* Note: this does not include DSI transcoders. */
  14460. error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
  14461. if (HAS_DDI(dev_priv))
  14462. error->num_transcoders++; /* Account for eDP. */
  14463. for (i = 0; i < error->num_transcoders; i++) {
  14464. enum transcoder cpu_transcoder = transcoders[i];
  14465. error->transcoder[i].power_domain_on =
  14466. __intel_display_power_is_enabled(dev_priv,
  14467. POWER_DOMAIN_TRANSCODER(cpu_transcoder));
  14468. if (!error->transcoder[i].power_domain_on)
  14469. continue;
  14470. error->transcoder[i].cpu_transcoder = cpu_transcoder;
  14471. error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
  14472. error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
  14473. error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
  14474. error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
  14475. error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
  14476. error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
  14477. error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
  14478. }
  14479. return error;
  14480. }
  14481. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  14482. void
  14483. intel_display_print_error_state(struct drm_i915_error_state_buf *m,
  14484. struct drm_i915_private *dev_priv,
  14485. struct intel_display_error_state *error)
  14486. {
  14487. int i;
  14488. if (!error)
  14489. return;
  14490. err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
  14491. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  14492. err_printf(m, "PWR_WELL_CTL2: %08x\n",
  14493. error->power_well_driver);
  14494. for_each_pipe(dev_priv, i) {
  14495. err_printf(m, "Pipe [%d]:\n", i);
  14496. err_printf(m, " Power: %s\n",
  14497. onoff(error->pipe[i].power_domain_on));
  14498. err_printf(m, " SRC: %08x\n", error->pipe[i].source);
  14499. err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
  14500. err_printf(m, "Plane [%d]:\n", i);
  14501. err_printf(m, " CNTR: %08x\n", error->plane[i].control);
  14502. err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
  14503. if (INTEL_GEN(dev_priv) <= 3) {
  14504. err_printf(m, " SIZE: %08x\n", error->plane[i].size);
  14505. err_printf(m, " POS: %08x\n", error->plane[i].pos);
  14506. }
  14507. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  14508. err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
  14509. if (INTEL_GEN(dev_priv) >= 4) {
  14510. err_printf(m, " SURF: %08x\n", error->plane[i].surface);
  14511. err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
  14512. }
  14513. err_printf(m, "Cursor [%d]:\n", i);
  14514. err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
  14515. err_printf(m, " POS: %08x\n", error->cursor[i].position);
  14516. err_printf(m, " BASE: %08x\n", error->cursor[i].base);
  14517. }
  14518. for (i = 0; i < error->num_transcoders; i++) {
  14519. err_printf(m, "CPU transcoder: %s\n",
  14520. transcoder_name(error->transcoder[i].cpu_transcoder));
  14521. err_printf(m, " Power: %s\n",
  14522. onoff(error->transcoder[i].power_domain_on));
  14523. err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
  14524. err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
  14525. err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
  14526. err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
  14527. err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
  14528. err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
  14529. err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
  14530. }
  14531. }
  14532. #endif