intel_display.c 485 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358
  1. /*
  2. * Copyright © 2006-2007 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. */
  26. #include <linux/dmi.h>
  27. #include <linux/module.h>
  28. #include <linux/input.h>
  29. #include <linux/i2c.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/vgaarb.h>
  33. #include <drm/drm_edid.h>
  34. #include <drm/drmP.h>
  35. #include "intel_drv.h"
  36. #include "intel_frontbuffer.h"
  37. #include <drm/i915_drm.h>
  38. #include "i915_drv.h"
  39. #include "intel_dsi.h"
  40. #include "i915_trace.h"
  41. #include <drm/drm_atomic.h>
  42. #include <drm/drm_atomic_helper.h>
  43. #include <drm/drm_dp_helper.h>
  44. #include <drm/drm_crtc_helper.h>
  45. #include <drm/drm_plane_helper.h>
  46. #include <drm/drm_rect.h>
  47. #include <linux/dma_remapping.h>
  48. #include <linux/reservation.h>
  49. static bool is_mmio_work(struct intel_flip_work *work)
  50. {
  51. return work->mmio_work.func;
  52. }
  53. /* Primary plane formats for gen <= 3 */
  54. static const uint32_t i8xx_primary_formats[] = {
  55. DRM_FORMAT_C8,
  56. DRM_FORMAT_RGB565,
  57. DRM_FORMAT_XRGB1555,
  58. DRM_FORMAT_XRGB8888,
  59. };
  60. /* Primary plane formats for gen >= 4 */
  61. static const uint32_t i965_primary_formats[] = {
  62. DRM_FORMAT_C8,
  63. DRM_FORMAT_RGB565,
  64. DRM_FORMAT_XRGB8888,
  65. DRM_FORMAT_XBGR8888,
  66. DRM_FORMAT_XRGB2101010,
  67. DRM_FORMAT_XBGR2101010,
  68. };
  69. static const uint32_t skl_primary_formats[] = {
  70. DRM_FORMAT_C8,
  71. DRM_FORMAT_RGB565,
  72. DRM_FORMAT_XRGB8888,
  73. DRM_FORMAT_XBGR8888,
  74. DRM_FORMAT_ARGB8888,
  75. DRM_FORMAT_ABGR8888,
  76. DRM_FORMAT_XRGB2101010,
  77. DRM_FORMAT_XBGR2101010,
  78. DRM_FORMAT_YUYV,
  79. DRM_FORMAT_YVYU,
  80. DRM_FORMAT_UYVY,
  81. DRM_FORMAT_VYUY,
  82. };
  83. /* Cursor formats */
  84. static const uint32_t intel_cursor_formats[] = {
  85. DRM_FORMAT_ARGB8888,
  86. };
  87. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  88. struct intel_crtc_state *pipe_config);
  89. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  90. struct intel_crtc_state *pipe_config);
  91. static int intel_framebuffer_init(struct drm_device *dev,
  92. struct intel_framebuffer *ifb,
  93. struct drm_mode_fb_cmd2 *mode_cmd,
  94. struct drm_i915_gem_object *obj);
  95. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  96. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  97. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
  98. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  99. struct intel_link_m_n *m_n,
  100. struct intel_link_m_n *m2_n2);
  101. static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  102. static void haswell_set_pipeconf(struct drm_crtc *crtc);
  103. static void haswell_set_pipemisc(struct drm_crtc *crtc);
  104. static void vlv_prepare_pll(struct intel_crtc *crtc,
  105. const struct intel_crtc_state *pipe_config);
  106. static void chv_prepare_pll(struct intel_crtc *crtc,
  107. const struct intel_crtc_state *pipe_config);
  108. static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  109. static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  110. static void skl_init_scalers(struct drm_i915_private *dev_priv,
  111. struct intel_crtc *crtc,
  112. struct intel_crtc_state *crtc_state);
  113. static void skylake_pfit_enable(struct intel_crtc *crtc);
  114. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
  115. static void ironlake_pfit_enable(struct intel_crtc *crtc);
  116. static void intel_modeset_setup_hw_state(struct drm_device *dev);
  117. static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
  118. static int ilk_max_pixel_rate(struct drm_atomic_state *state);
  119. static int bxt_calc_cdclk(int max_pixclk);
  120. struct intel_limit {
  121. struct {
  122. int min, max;
  123. } dot, vco, n, m, m1, m2, p, p1;
  124. struct {
  125. int dot_limit;
  126. int p2_slow, p2_fast;
  127. } p2;
  128. };
  129. /* returns HPLL frequency in kHz */
  130. static int valleyview_get_vco(struct drm_i915_private *dev_priv)
  131. {
  132. int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
  133. /* Obtain SKU information */
  134. mutex_lock(&dev_priv->sb_lock);
  135. hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
  136. CCK_FUSE_HPLL_FREQ_MASK;
  137. mutex_unlock(&dev_priv->sb_lock);
  138. return vco_freq[hpll_freq] * 1000;
  139. }
  140. int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
  141. const char *name, u32 reg, int ref_freq)
  142. {
  143. u32 val;
  144. int divider;
  145. mutex_lock(&dev_priv->sb_lock);
  146. val = vlv_cck_read(dev_priv, reg);
  147. mutex_unlock(&dev_priv->sb_lock);
  148. divider = val & CCK_FREQUENCY_VALUES;
  149. WARN((val & CCK_FREQUENCY_STATUS) !=
  150. (divider << CCK_FREQUENCY_STATUS_SHIFT),
  151. "%s change in progress\n", name);
  152. return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  153. }
  154. static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
  155. const char *name, u32 reg)
  156. {
  157. if (dev_priv->hpll_freq == 0)
  158. dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
  159. return vlv_get_cck_clock(dev_priv, name, reg,
  160. dev_priv->hpll_freq);
  161. }
  162. static int
  163. intel_pch_rawclk(struct drm_i915_private *dev_priv)
  164. {
  165. return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
  166. }
  167. static int
  168. intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
  169. {
  170. /* RAWCLK_FREQ_VLV register updated from power well code */
  171. return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
  172. CCK_DISPLAY_REF_CLOCK_CONTROL);
  173. }
  174. static int
  175. intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
  176. {
  177. uint32_t clkcfg;
  178. /* hrawclock is 1/4 the FSB frequency */
  179. clkcfg = I915_READ(CLKCFG);
  180. switch (clkcfg & CLKCFG_FSB_MASK) {
  181. case CLKCFG_FSB_400:
  182. return 100000;
  183. case CLKCFG_FSB_533:
  184. return 133333;
  185. case CLKCFG_FSB_667:
  186. return 166667;
  187. case CLKCFG_FSB_800:
  188. return 200000;
  189. case CLKCFG_FSB_1067:
  190. return 266667;
  191. case CLKCFG_FSB_1333:
  192. return 333333;
  193. /* these two are just a guess; one of them might be right */
  194. case CLKCFG_FSB_1600:
  195. case CLKCFG_FSB_1600_ALT:
  196. return 400000;
  197. default:
  198. return 133333;
  199. }
  200. }
  201. void intel_update_rawclk(struct drm_i915_private *dev_priv)
  202. {
  203. if (HAS_PCH_SPLIT(dev_priv))
  204. dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
  205. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  206. dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
  207. else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
  208. dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
  209. else
  210. return; /* no rawclk on other platforms, or no need to know it */
  211. DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
  212. }
  213. static void intel_update_czclk(struct drm_i915_private *dev_priv)
  214. {
  215. if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
  216. return;
  217. dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
  218. CCK_CZ_CLOCK_CONTROL);
  219. DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
  220. }
  221. static inline u32 /* units of 100MHz */
  222. intel_fdi_link_freq(struct drm_i915_private *dev_priv,
  223. const struct intel_crtc_state *pipe_config)
  224. {
  225. if (HAS_DDI(dev_priv))
  226. return pipe_config->port_clock; /* SPLL */
  227. else if (IS_GEN5(dev_priv))
  228. return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
  229. else
  230. return 270000;
  231. }
  232. static const struct intel_limit intel_limits_i8xx_dac = {
  233. .dot = { .min = 25000, .max = 350000 },
  234. .vco = { .min = 908000, .max = 1512000 },
  235. .n = { .min = 2, .max = 16 },
  236. .m = { .min = 96, .max = 140 },
  237. .m1 = { .min = 18, .max = 26 },
  238. .m2 = { .min = 6, .max = 16 },
  239. .p = { .min = 4, .max = 128 },
  240. .p1 = { .min = 2, .max = 33 },
  241. .p2 = { .dot_limit = 165000,
  242. .p2_slow = 4, .p2_fast = 2 },
  243. };
  244. static const struct intel_limit intel_limits_i8xx_dvo = {
  245. .dot = { .min = 25000, .max = 350000 },
  246. .vco = { .min = 908000, .max = 1512000 },
  247. .n = { .min = 2, .max = 16 },
  248. .m = { .min = 96, .max = 140 },
  249. .m1 = { .min = 18, .max = 26 },
  250. .m2 = { .min = 6, .max = 16 },
  251. .p = { .min = 4, .max = 128 },
  252. .p1 = { .min = 2, .max = 33 },
  253. .p2 = { .dot_limit = 165000,
  254. .p2_slow = 4, .p2_fast = 4 },
  255. };
  256. static const struct intel_limit intel_limits_i8xx_lvds = {
  257. .dot = { .min = 25000, .max = 350000 },
  258. .vco = { .min = 908000, .max = 1512000 },
  259. .n = { .min = 2, .max = 16 },
  260. .m = { .min = 96, .max = 140 },
  261. .m1 = { .min = 18, .max = 26 },
  262. .m2 = { .min = 6, .max = 16 },
  263. .p = { .min = 4, .max = 128 },
  264. .p1 = { .min = 1, .max = 6 },
  265. .p2 = { .dot_limit = 165000,
  266. .p2_slow = 14, .p2_fast = 7 },
  267. };
  268. static const struct intel_limit intel_limits_i9xx_sdvo = {
  269. .dot = { .min = 20000, .max = 400000 },
  270. .vco = { .min = 1400000, .max = 2800000 },
  271. .n = { .min = 1, .max = 6 },
  272. .m = { .min = 70, .max = 120 },
  273. .m1 = { .min = 8, .max = 18 },
  274. .m2 = { .min = 3, .max = 7 },
  275. .p = { .min = 5, .max = 80 },
  276. .p1 = { .min = 1, .max = 8 },
  277. .p2 = { .dot_limit = 200000,
  278. .p2_slow = 10, .p2_fast = 5 },
  279. };
  280. static const struct intel_limit intel_limits_i9xx_lvds = {
  281. .dot = { .min = 20000, .max = 400000 },
  282. .vco = { .min = 1400000, .max = 2800000 },
  283. .n = { .min = 1, .max = 6 },
  284. .m = { .min = 70, .max = 120 },
  285. .m1 = { .min = 8, .max = 18 },
  286. .m2 = { .min = 3, .max = 7 },
  287. .p = { .min = 7, .max = 98 },
  288. .p1 = { .min = 1, .max = 8 },
  289. .p2 = { .dot_limit = 112000,
  290. .p2_slow = 14, .p2_fast = 7 },
  291. };
  292. static const struct intel_limit intel_limits_g4x_sdvo = {
  293. .dot = { .min = 25000, .max = 270000 },
  294. .vco = { .min = 1750000, .max = 3500000},
  295. .n = { .min = 1, .max = 4 },
  296. .m = { .min = 104, .max = 138 },
  297. .m1 = { .min = 17, .max = 23 },
  298. .m2 = { .min = 5, .max = 11 },
  299. .p = { .min = 10, .max = 30 },
  300. .p1 = { .min = 1, .max = 3},
  301. .p2 = { .dot_limit = 270000,
  302. .p2_slow = 10,
  303. .p2_fast = 10
  304. },
  305. };
  306. static const struct intel_limit intel_limits_g4x_hdmi = {
  307. .dot = { .min = 22000, .max = 400000 },
  308. .vco = { .min = 1750000, .max = 3500000},
  309. .n = { .min = 1, .max = 4 },
  310. .m = { .min = 104, .max = 138 },
  311. .m1 = { .min = 16, .max = 23 },
  312. .m2 = { .min = 5, .max = 11 },
  313. .p = { .min = 5, .max = 80 },
  314. .p1 = { .min = 1, .max = 8},
  315. .p2 = { .dot_limit = 165000,
  316. .p2_slow = 10, .p2_fast = 5 },
  317. };
  318. static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
  319. .dot = { .min = 20000, .max = 115000 },
  320. .vco = { .min = 1750000, .max = 3500000 },
  321. .n = { .min = 1, .max = 3 },
  322. .m = { .min = 104, .max = 138 },
  323. .m1 = { .min = 17, .max = 23 },
  324. .m2 = { .min = 5, .max = 11 },
  325. .p = { .min = 28, .max = 112 },
  326. .p1 = { .min = 2, .max = 8 },
  327. .p2 = { .dot_limit = 0,
  328. .p2_slow = 14, .p2_fast = 14
  329. },
  330. };
  331. static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
  332. .dot = { .min = 80000, .max = 224000 },
  333. .vco = { .min = 1750000, .max = 3500000 },
  334. .n = { .min = 1, .max = 3 },
  335. .m = { .min = 104, .max = 138 },
  336. .m1 = { .min = 17, .max = 23 },
  337. .m2 = { .min = 5, .max = 11 },
  338. .p = { .min = 14, .max = 42 },
  339. .p1 = { .min = 2, .max = 6 },
  340. .p2 = { .dot_limit = 0,
  341. .p2_slow = 7, .p2_fast = 7
  342. },
  343. };
  344. static const struct intel_limit intel_limits_pineview_sdvo = {
  345. .dot = { .min = 20000, .max = 400000},
  346. .vco = { .min = 1700000, .max = 3500000 },
  347. /* Pineview's Ncounter is a ring counter */
  348. .n = { .min = 3, .max = 6 },
  349. .m = { .min = 2, .max = 256 },
  350. /* Pineview only has one combined m divider, which we treat as m2. */
  351. .m1 = { .min = 0, .max = 0 },
  352. .m2 = { .min = 0, .max = 254 },
  353. .p = { .min = 5, .max = 80 },
  354. .p1 = { .min = 1, .max = 8 },
  355. .p2 = { .dot_limit = 200000,
  356. .p2_slow = 10, .p2_fast = 5 },
  357. };
  358. static const struct intel_limit intel_limits_pineview_lvds = {
  359. .dot = { .min = 20000, .max = 400000 },
  360. .vco = { .min = 1700000, .max = 3500000 },
  361. .n = { .min = 3, .max = 6 },
  362. .m = { .min = 2, .max = 256 },
  363. .m1 = { .min = 0, .max = 0 },
  364. .m2 = { .min = 0, .max = 254 },
  365. .p = { .min = 7, .max = 112 },
  366. .p1 = { .min = 1, .max = 8 },
  367. .p2 = { .dot_limit = 112000,
  368. .p2_slow = 14, .p2_fast = 14 },
  369. };
  370. /* Ironlake / Sandybridge
  371. *
  372. * We calculate clock using (register_value + 2) for N/M1/M2, so here
  373. * the range value for them is (actual_value - 2).
  374. */
  375. static const struct intel_limit intel_limits_ironlake_dac = {
  376. .dot = { .min = 25000, .max = 350000 },
  377. .vco = { .min = 1760000, .max = 3510000 },
  378. .n = { .min = 1, .max = 5 },
  379. .m = { .min = 79, .max = 127 },
  380. .m1 = { .min = 12, .max = 22 },
  381. .m2 = { .min = 5, .max = 9 },
  382. .p = { .min = 5, .max = 80 },
  383. .p1 = { .min = 1, .max = 8 },
  384. .p2 = { .dot_limit = 225000,
  385. .p2_slow = 10, .p2_fast = 5 },
  386. };
  387. static const struct intel_limit intel_limits_ironlake_single_lvds = {
  388. .dot = { .min = 25000, .max = 350000 },
  389. .vco = { .min = 1760000, .max = 3510000 },
  390. .n = { .min = 1, .max = 3 },
  391. .m = { .min = 79, .max = 118 },
  392. .m1 = { .min = 12, .max = 22 },
  393. .m2 = { .min = 5, .max = 9 },
  394. .p = { .min = 28, .max = 112 },
  395. .p1 = { .min = 2, .max = 8 },
  396. .p2 = { .dot_limit = 225000,
  397. .p2_slow = 14, .p2_fast = 14 },
  398. };
  399. static const struct intel_limit intel_limits_ironlake_dual_lvds = {
  400. .dot = { .min = 25000, .max = 350000 },
  401. .vco = { .min = 1760000, .max = 3510000 },
  402. .n = { .min = 1, .max = 3 },
  403. .m = { .min = 79, .max = 127 },
  404. .m1 = { .min = 12, .max = 22 },
  405. .m2 = { .min = 5, .max = 9 },
  406. .p = { .min = 14, .max = 56 },
  407. .p1 = { .min = 2, .max = 8 },
  408. .p2 = { .dot_limit = 225000,
  409. .p2_slow = 7, .p2_fast = 7 },
  410. };
  411. /* LVDS 100mhz refclk limits. */
  412. static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
  413. .dot = { .min = 25000, .max = 350000 },
  414. .vco = { .min = 1760000, .max = 3510000 },
  415. .n = { .min = 1, .max = 2 },
  416. .m = { .min = 79, .max = 126 },
  417. .m1 = { .min = 12, .max = 22 },
  418. .m2 = { .min = 5, .max = 9 },
  419. .p = { .min = 28, .max = 112 },
  420. .p1 = { .min = 2, .max = 8 },
  421. .p2 = { .dot_limit = 225000,
  422. .p2_slow = 14, .p2_fast = 14 },
  423. };
  424. static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
  425. .dot = { .min = 25000, .max = 350000 },
  426. .vco = { .min = 1760000, .max = 3510000 },
  427. .n = { .min = 1, .max = 3 },
  428. .m = { .min = 79, .max = 126 },
  429. .m1 = { .min = 12, .max = 22 },
  430. .m2 = { .min = 5, .max = 9 },
  431. .p = { .min = 14, .max = 42 },
  432. .p1 = { .min = 2, .max = 6 },
  433. .p2 = { .dot_limit = 225000,
  434. .p2_slow = 7, .p2_fast = 7 },
  435. };
  436. static const struct intel_limit intel_limits_vlv = {
  437. /*
  438. * These are the data rate limits (measured in fast clocks)
  439. * since those are the strictest limits we have. The fast
  440. * clock and actual rate limits are more relaxed, so checking
  441. * them would make no difference.
  442. */
  443. .dot = { .min = 25000 * 5, .max = 270000 * 5 },
  444. .vco = { .min = 4000000, .max = 6000000 },
  445. .n = { .min = 1, .max = 7 },
  446. .m1 = { .min = 2, .max = 3 },
  447. .m2 = { .min = 11, .max = 156 },
  448. .p1 = { .min = 2, .max = 3 },
  449. .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  450. };
  451. static const struct intel_limit intel_limits_chv = {
  452. /*
  453. * These are the data rate limits (measured in fast clocks)
  454. * since those are the strictest limits we have. The fast
  455. * clock and actual rate limits are more relaxed, so checking
  456. * them would make no difference.
  457. */
  458. .dot = { .min = 25000 * 5, .max = 540000 * 5},
  459. .vco = { .min = 4800000, .max = 6480000 },
  460. .n = { .min = 1, .max = 1 },
  461. .m1 = { .min = 2, .max = 2 },
  462. .m2 = { .min = 24 << 22, .max = 175 << 22 },
  463. .p1 = { .min = 2, .max = 4 },
  464. .p2 = { .p2_slow = 1, .p2_fast = 14 },
  465. };
  466. static const struct intel_limit intel_limits_bxt = {
  467. /* FIXME: find real dot limits */
  468. .dot = { .min = 0, .max = INT_MAX },
  469. .vco = { .min = 4800000, .max = 6700000 },
  470. .n = { .min = 1, .max = 1 },
  471. .m1 = { .min = 2, .max = 2 },
  472. /* FIXME: find real m2 limits */
  473. .m2 = { .min = 2 << 22, .max = 255 << 22 },
  474. .p1 = { .min = 2, .max = 4 },
  475. .p2 = { .p2_slow = 1, .p2_fast = 20 },
  476. };
  477. static bool
  478. needs_modeset(struct drm_crtc_state *state)
  479. {
  480. return drm_atomic_crtc_needs_modeset(state);
  481. }
  482. /*
  483. * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  484. * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
  485. * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
  486. * The helpers' return value is the rate of the clock that is fed to the
  487. * display engine's pipe which can be the above fast dot clock rate or a
  488. * divided-down version of it.
  489. */
  490. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  491. static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
  492. {
  493. clock->m = clock->m2 + 2;
  494. clock->p = clock->p1 * clock->p2;
  495. if (WARN_ON(clock->n == 0 || clock->p == 0))
  496. return 0;
  497. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  498. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  499. return clock->dot;
  500. }
  501. static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
  502. {
  503. return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  504. }
  505. static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
  506. {
  507. clock->m = i9xx_dpll_compute_m(clock);
  508. clock->p = clock->p1 * clock->p2;
  509. if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
  510. return 0;
  511. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
  512. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  513. return clock->dot;
  514. }
  515. static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
  516. {
  517. clock->m = clock->m1 * clock->m2;
  518. clock->p = clock->p1 * clock->p2;
  519. if (WARN_ON(clock->n == 0 || clock->p == 0))
  520. return 0;
  521. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  522. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  523. return clock->dot / 5;
  524. }
  525. int chv_calc_dpll_params(int refclk, struct dpll *clock)
  526. {
  527. clock->m = clock->m1 * clock->m2;
  528. clock->p = clock->p1 * clock->p2;
  529. if (WARN_ON(clock->n == 0 || clock->p == 0))
  530. return 0;
  531. clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
  532. clock->n << 22);
  533. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  534. return clock->dot / 5;
  535. }
  536. #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
  537. /**
  538. * Returns whether the given set of divisors are valid for a given refclk with
  539. * the given connectors.
  540. */
  541. static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
  542. const struct intel_limit *limit,
  543. const struct dpll *clock)
  544. {
  545. if (clock->n < limit->n.min || limit->n.max < clock->n)
  546. INTELPllInvalid("n out of range\n");
  547. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  548. INTELPllInvalid("p1 out of range\n");
  549. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  550. INTELPllInvalid("m2 out of range\n");
  551. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  552. INTELPllInvalid("m1 out of range\n");
  553. if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
  554. !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
  555. if (clock->m1 <= clock->m2)
  556. INTELPllInvalid("m1 <= m2\n");
  557. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  558. !IS_BROXTON(dev_priv)) {
  559. if (clock->p < limit->p.min || limit->p.max < clock->p)
  560. INTELPllInvalid("p out of range\n");
  561. if (clock->m < limit->m.min || limit->m.max < clock->m)
  562. INTELPllInvalid("m out of range\n");
  563. }
  564. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  565. INTELPllInvalid("vco out of range\n");
  566. /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  567. * connector, etc., rather than just a single range.
  568. */
  569. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  570. INTELPllInvalid("dot out of range\n");
  571. return true;
  572. }
  573. static int
  574. i9xx_select_p2_div(const struct intel_limit *limit,
  575. const struct intel_crtc_state *crtc_state,
  576. int target)
  577. {
  578. struct drm_device *dev = crtc_state->base.crtc->dev;
  579. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  580. /*
  581. * For LVDS just rely on its current settings for dual-channel.
  582. * We haven't figured out how to reliably set up different
  583. * single/dual channel state, if we even can.
  584. */
  585. if (intel_is_dual_link_lvds(dev))
  586. return limit->p2.p2_fast;
  587. else
  588. return limit->p2.p2_slow;
  589. } else {
  590. if (target < limit->p2.dot_limit)
  591. return limit->p2.p2_slow;
  592. else
  593. return limit->p2.p2_fast;
  594. }
  595. }
  596. /*
  597. * Returns a set of divisors for the desired target clock with the given
  598. * refclk, or FALSE. The returned values represent the clock equation:
  599. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  600. *
  601. * Target and reference clocks are specified in kHz.
  602. *
  603. * If match_clock is provided, then best_clock P divider must match the P
  604. * divider from @match_clock used for LVDS downclocking.
  605. */
  606. static bool
  607. i9xx_find_best_dpll(const struct intel_limit *limit,
  608. struct intel_crtc_state *crtc_state,
  609. int target, int refclk, struct dpll *match_clock,
  610. struct dpll *best_clock)
  611. {
  612. struct drm_device *dev = crtc_state->base.crtc->dev;
  613. struct dpll clock;
  614. int err = target;
  615. memset(best_clock, 0, sizeof(*best_clock));
  616. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  617. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  618. clock.m1++) {
  619. for (clock.m2 = limit->m2.min;
  620. clock.m2 <= limit->m2.max; clock.m2++) {
  621. if (clock.m2 >= clock.m1)
  622. break;
  623. for (clock.n = limit->n.min;
  624. clock.n <= limit->n.max; clock.n++) {
  625. for (clock.p1 = limit->p1.min;
  626. clock.p1 <= limit->p1.max; clock.p1++) {
  627. int this_err;
  628. i9xx_calc_dpll_params(refclk, &clock);
  629. if (!intel_PLL_is_valid(to_i915(dev),
  630. limit,
  631. &clock))
  632. continue;
  633. if (match_clock &&
  634. clock.p != match_clock->p)
  635. continue;
  636. this_err = abs(clock.dot - target);
  637. if (this_err < err) {
  638. *best_clock = clock;
  639. err = this_err;
  640. }
  641. }
  642. }
  643. }
  644. }
  645. return (err != target);
  646. }
  647. /*
  648. * Returns a set of divisors for the desired target clock with the given
  649. * refclk, or FALSE. The returned values represent the clock equation:
  650. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  651. *
  652. * Target and reference clocks are specified in kHz.
  653. *
  654. * If match_clock is provided, then best_clock P divider must match the P
  655. * divider from @match_clock used for LVDS downclocking.
  656. */
  657. static bool
  658. pnv_find_best_dpll(const struct intel_limit *limit,
  659. struct intel_crtc_state *crtc_state,
  660. int target, int refclk, struct dpll *match_clock,
  661. struct dpll *best_clock)
  662. {
  663. struct drm_device *dev = crtc_state->base.crtc->dev;
  664. struct dpll clock;
  665. int err = target;
  666. memset(best_clock, 0, sizeof(*best_clock));
  667. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  668. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  669. clock.m1++) {
  670. for (clock.m2 = limit->m2.min;
  671. clock.m2 <= limit->m2.max; clock.m2++) {
  672. for (clock.n = limit->n.min;
  673. clock.n <= limit->n.max; clock.n++) {
  674. for (clock.p1 = limit->p1.min;
  675. clock.p1 <= limit->p1.max; clock.p1++) {
  676. int this_err;
  677. pnv_calc_dpll_params(refclk, &clock);
  678. if (!intel_PLL_is_valid(to_i915(dev),
  679. limit,
  680. &clock))
  681. continue;
  682. if (match_clock &&
  683. clock.p != match_clock->p)
  684. continue;
  685. this_err = abs(clock.dot - target);
  686. if (this_err < err) {
  687. *best_clock = clock;
  688. err = this_err;
  689. }
  690. }
  691. }
  692. }
  693. }
  694. return (err != target);
  695. }
  696. /*
  697. * Returns a set of divisors for the desired target clock with the given
  698. * refclk, or FALSE. The returned values represent the clock equation:
  699. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  700. *
  701. * Target and reference clocks are specified in kHz.
  702. *
  703. * If match_clock is provided, then best_clock P divider must match the P
  704. * divider from @match_clock used for LVDS downclocking.
  705. */
  706. static bool
  707. g4x_find_best_dpll(const struct intel_limit *limit,
  708. struct intel_crtc_state *crtc_state,
  709. int target, int refclk, struct dpll *match_clock,
  710. struct dpll *best_clock)
  711. {
  712. struct drm_device *dev = crtc_state->base.crtc->dev;
  713. struct dpll clock;
  714. int max_n;
  715. bool found = false;
  716. /* approximately equals target * 0.00585 */
  717. int err_most = (target >> 8) + (target >> 9);
  718. memset(best_clock, 0, sizeof(*best_clock));
  719. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  720. max_n = limit->n.max;
  721. /* based on hardware requirement, prefer smaller n to precision */
  722. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  723. /* based on hardware requirement, prefere larger m1,m2 */
  724. for (clock.m1 = limit->m1.max;
  725. clock.m1 >= limit->m1.min; clock.m1--) {
  726. for (clock.m2 = limit->m2.max;
  727. clock.m2 >= limit->m2.min; clock.m2--) {
  728. for (clock.p1 = limit->p1.max;
  729. clock.p1 >= limit->p1.min; clock.p1--) {
  730. int this_err;
  731. i9xx_calc_dpll_params(refclk, &clock);
  732. if (!intel_PLL_is_valid(to_i915(dev),
  733. limit,
  734. &clock))
  735. continue;
  736. this_err = abs(clock.dot - target);
  737. if (this_err < err_most) {
  738. *best_clock = clock;
  739. err_most = this_err;
  740. max_n = clock.n;
  741. found = true;
  742. }
  743. }
  744. }
  745. }
  746. }
  747. return found;
  748. }
  749. /*
  750. * Check if the calculated PLL configuration is more optimal compared to the
  751. * best configuration and error found so far. Return the calculated error.
  752. */
  753. static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
  754. const struct dpll *calculated_clock,
  755. const struct dpll *best_clock,
  756. unsigned int best_error_ppm,
  757. unsigned int *error_ppm)
  758. {
  759. /*
  760. * For CHV ignore the error and consider only the P value.
  761. * Prefer a bigger P value based on HW requirements.
  762. */
  763. if (IS_CHERRYVIEW(to_i915(dev))) {
  764. *error_ppm = 0;
  765. return calculated_clock->p > best_clock->p;
  766. }
  767. if (WARN_ON_ONCE(!target_freq))
  768. return false;
  769. *error_ppm = div_u64(1000000ULL *
  770. abs(target_freq - calculated_clock->dot),
  771. target_freq);
  772. /*
  773. * Prefer a better P value over a better (smaller) error if the error
  774. * is small. Ensure this preference for future configurations too by
  775. * setting the error to 0.
  776. */
  777. if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
  778. *error_ppm = 0;
  779. return true;
  780. }
  781. return *error_ppm + 10 < best_error_ppm;
  782. }
  783. /*
  784. * Returns a set of divisors for the desired target clock with the given
  785. * refclk, or FALSE. The returned values represent the clock equation:
  786. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  787. */
  788. static bool
  789. vlv_find_best_dpll(const struct intel_limit *limit,
  790. struct intel_crtc_state *crtc_state,
  791. int target, int refclk, struct dpll *match_clock,
  792. struct dpll *best_clock)
  793. {
  794. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  795. struct drm_device *dev = crtc->base.dev;
  796. struct dpll clock;
  797. unsigned int bestppm = 1000000;
  798. /* min update 19.2 MHz */
  799. int max_n = min(limit->n.max, refclk / 19200);
  800. bool found = false;
  801. target *= 5; /* fast clock */
  802. memset(best_clock, 0, sizeof(*best_clock));
  803. /* based on hardware requirement, prefer smaller n to precision */
  804. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  805. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  806. for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
  807. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  808. clock.p = clock.p1 * clock.p2;
  809. /* based on hardware requirement, prefer bigger m1,m2 values */
  810. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  811. unsigned int ppm;
  812. clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
  813. refclk * clock.m1);
  814. vlv_calc_dpll_params(refclk, &clock);
  815. if (!intel_PLL_is_valid(to_i915(dev),
  816. limit,
  817. &clock))
  818. continue;
  819. if (!vlv_PLL_is_optimal(dev, target,
  820. &clock,
  821. best_clock,
  822. bestppm, &ppm))
  823. continue;
  824. *best_clock = clock;
  825. bestppm = ppm;
  826. found = true;
  827. }
  828. }
  829. }
  830. }
  831. return found;
  832. }
  833. /*
  834. * Returns a set of divisors for the desired target clock with the given
  835. * refclk, or FALSE. The returned values represent the clock equation:
  836. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  837. */
  838. static bool
  839. chv_find_best_dpll(const struct intel_limit *limit,
  840. struct intel_crtc_state *crtc_state,
  841. int target, int refclk, struct dpll *match_clock,
  842. struct dpll *best_clock)
  843. {
  844. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  845. struct drm_device *dev = crtc->base.dev;
  846. unsigned int best_error_ppm;
  847. struct dpll clock;
  848. uint64_t m2;
  849. int found = false;
  850. memset(best_clock, 0, sizeof(*best_clock));
  851. best_error_ppm = 1000000;
  852. /*
  853. * Based on hardware doc, the n always set to 1, and m1 always
  854. * set to 2. If requires to support 200Mhz refclk, we need to
  855. * revisit this because n may not 1 anymore.
  856. */
  857. clock.n = 1, clock.m1 = 2;
  858. target *= 5; /* fast clock */
  859. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  860. for (clock.p2 = limit->p2.p2_fast;
  861. clock.p2 >= limit->p2.p2_slow;
  862. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  863. unsigned int error_ppm;
  864. clock.p = clock.p1 * clock.p2;
  865. m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
  866. clock.n) << 22, refclk * clock.m1);
  867. if (m2 > INT_MAX/clock.m1)
  868. continue;
  869. clock.m2 = m2;
  870. chv_calc_dpll_params(refclk, &clock);
  871. if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
  872. continue;
  873. if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
  874. best_error_ppm, &error_ppm))
  875. continue;
  876. *best_clock = clock;
  877. best_error_ppm = error_ppm;
  878. found = true;
  879. }
  880. }
  881. return found;
  882. }
  883. bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
  884. struct dpll *best_clock)
  885. {
  886. int refclk = 100000;
  887. const struct intel_limit *limit = &intel_limits_bxt;
  888. return chv_find_best_dpll(limit, crtc_state,
  889. target_clock, refclk, NULL, best_clock);
  890. }
  891. bool intel_crtc_active(struct intel_crtc *crtc)
  892. {
  893. /* Be paranoid as we can arrive here with only partial
  894. * state retrieved from the hardware during setup.
  895. *
  896. * We can ditch the adjusted_mode.crtc_clock check as soon
  897. * as Haswell has gained clock readout/fastboot support.
  898. *
  899. * We can ditch the crtc->primary->fb check as soon as we can
  900. * properly reconstruct framebuffers.
  901. *
  902. * FIXME: The intel_crtc->active here should be switched to
  903. * crtc->state->active once we have proper CRTC states wired up
  904. * for atomic.
  905. */
  906. return crtc->active && crtc->base.primary->state->fb &&
  907. crtc->config->base.adjusted_mode.crtc_clock;
  908. }
  909. enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
  910. enum pipe pipe)
  911. {
  912. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  913. return crtc->config->cpu_transcoder;
  914. }
  915. static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
  916. {
  917. i915_reg_t reg = PIPEDSL(pipe);
  918. u32 line1, line2;
  919. u32 line_mask;
  920. if (IS_GEN2(dev_priv))
  921. line_mask = DSL_LINEMASK_GEN2;
  922. else
  923. line_mask = DSL_LINEMASK_GEN3;
  924. line1 = I915_READ(reg) & line_mask;
  925. msleep(5);
  926. line2 = I915_READ(reg) & line_mask;
  927. return line1 == line2;
  928. }
  929. /*
  930. * intel_wait_for_pipe_off - wait for pipe to turn off
  931. * @crtc: crtc whose pipe to wait for
  932. *
  933. * After disabling a pipe, we can't wait for vblank in the usual way,
  934. * spinning on the vblank interrupt status bit, since we won't actually
  935. * see an interrupt when the pipe is disabled.
  936. *
  937. * On Gen4 and above:
  938. * wait for the pipe register state bit to turn off
  939. *
  940. * Otherwise:
  941. * wait for the display line value to settle (it usually
  942. * ends up stopping at the start of the next frame).
  943. *
  944. */
  945. static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
  946. {
  947. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  948. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  949. enum pipe pipe = crtc->pipe;
  950. if (INTEL_GEN(dev_priv) >= 4) {
  951. i915_reg_t reg = PIPECONF(cpu_transcoder);
  952. /* Wait for the Pipe State to go off */
  953. if (intel_wait_for_register(dev_priv,
  954. reg, I965_PIPECONF_ACTIVE, 0,
  955. 100))
  956. WARN(1, "pipe_off wait timed out\n");
  957. } else {
  958. /* Wait for the display line to settle */
  959. if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
  960. WARN(1, "pipe_off wait timed out\n");
  961. }
  962. }
  963. /* Only for pre-ILK configs */
  964. void assert_pll(struct drm_i915_private *dev_priv,
  965. enum pipe pipe, bool state)
  966. {
  967. u32 val;
  968. bool cur_state;
  969. val = I915_READ(DPLL(pipe));
  970. cur_state = !!(val & DPLL_VCO_ENABLE);
  971. I915_STATE_WARN(cur_state != state,
  972. "PLL state assertion failure (expected %s, current %s)\n",
  973. onoff(state), onoff(cur_state));
  974. }
  975. /* XXX: the dsi pll is shared between MIPI DSI ports */
  976. void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
  977. {
  978. u32 val;
  979. bool cur_state;
  980. mutex_lock(&dev_priv->sb_lock);
  981. val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
  982. mutex_unlock(&dev_priv->sb_lock);
  983. cur_state = val & DSI_PLL_VCO_EN;
  984. I915_STATE_WARN(cur_state != state,
  985. "DSI PLL state assertion failure (expected %s, current %s)\n",
  986. onoff(state), onoff(cur_state));
  987. }
  988. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  989. enum pipe pipe, bool state)
  990. {
  991. bool cur_state;
  992. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  993. pipe);
  994. if (HAS_DDI(dev_priv)) {
  995. /* DDI does not have a specific FDI_TX register */
  996. u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  997. cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
  998. } else {
  999. u32 val = I915_READ(FDI_TX_CTL(pipe));
  1000. cur_state = !!(val & FDI_TX_ENABLE);
  1001. }
  1002. I915_STATE_WARN(cur_state != state,
  1003. "FDI TX state assertion failure (expected %s, current %s)\n",
  1004. onoff(state), onoff(cur_state));
  1005. }
  1006. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  1007. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  1008. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  1009. enum pipe pipe, bool state)
  1010. {
  1011. u32 val;
  1012. bool cur_state;
  1013. val = I915_READ(FDI_RX_CTL(pipe));
  1014. cur_state = !!(val & FDI_RX_ENABLE);
  1015. I915_STATE_WARN(cur_state != state,
  1016. "FDI RX state assertion failure (expected %s, current %s)\n",
  1017. onoff(state), onoff(cur_state));
  1018. }
  1019. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  1020. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  1021. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  1022. enum pipe pipe)
  1023. {
  1024. u32 val;
  1025. /* ILK FDI PLL is always enabled */
  1026. if (IS_GEN5(dev_priv))
  1027. return;
  1028. /* On Haswell, DDI ports are responsible for the FDI PLL setup */
  1029. if (HAS_DDI(dev_priv))
  1030. return;
  1031. val = I915_READ(FDI_TX_CTL(pipe));
  1032. I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  1033. }
  1034. void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
  1035. enum pipe pipe, bool state)
  1036. {
  1037. u32 val;
  1038. bool cur_state;
  1039. val = I915_READ(FDI_RX_CTL(pipe));
  1040. cur_state = !!(val & FDI_RX_PLL_ENABLE);
  1041. I915_STATE_WARN(cur_state != state,
  1042. "FDI RX PLL assertion failure (expected %s, current %s)\n",
  1043. onoff(state), onoff(cur_state));
  1044. }
  1045. void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
  1046. {
  1047. i915_reg_t pp_reg;
  1048. u32 val;
  1049. enum pipe panel_pipe = PIPE_A;
  1050. bool locked = true;
  1051. if (WARN_ON(HAS_DDI(dev_priv)))
  1052. return;
  1053. if (HAS_PCH_SPLIT(dev_priv)) {
  1054. u32 port_sel;
  1055. pp_reg = PP_CONTROL(0);
  1056. port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
  1057. if (port_sel == PANEL_PORT_SELECT_LVDS &&
  1058. I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
  1059. panel_pipe = PIPE_B;
  1060. /* XXX: else fix for eDP */
  1061. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1062. /* presumably write lock depends on pipe, not port select */
  1063. pp_reg = PP_CONTROL(pipe);
  1064. panel_pipe = pipe;
  1065. } else {
  1066. pp_reg = PP_CONTROL(0);
  1067. if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
  1068. panel_pipe = PIPE_B;
  1069. }
  1070. val = I915_READ(pp_reg);
  1071. if (!(val & PANEL_POWER_ON) ||
  1072. ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
  1073. locked = false;
  1074. I915_STATE_WARN(panel_pipe == pipe && locked,
  1075. "panel assertion failure, pipe %c regs locked\n",
  1076. pipe_name(pipe));
  1077. }
  1078. static void assert_cursor(struct drm_i915_private *dev_priv,
  1079. enum pipe pipe, bool state)
  1080. {
  1081. bool cur_state;
  1082. if (IS_845G(dev_priv) || IS_I865G(dev_priv))
  1083. cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
  1084. else
  1085. cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  1086. I915_STATE_WARN(cur_state != state,
  1087. "cursor on pipe %c assertion failure (expected %s, current %s)\n",
  1088. pipe_name(pipe), onoff(state), onoff(cur_state));
  1089. }
  1090. #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
  1091. #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
  1092. void assert_pipe(struct drm_i915_private *dev_priv,
  1093. enum pipe pipe, bool state)
  1094. {
  1095. bool cur_state;
  1096. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1097. pipe);
  1098. enum intel_display_power_domain power_domain;
  1099. /* if we need the pipe quirk it must be always on */
  1100. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1101. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1102. state = true;
  1103. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  1104. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  1105. u32 val = I915_READ(PIPECONF(cpu_transcoder));
  1106. cur_state = !!(val & PIPECONF_ENABLE);
  1107. intel_display_power_put(dev_priv, power_domain);
  1108. } else {
  1109. cur_state = false;
  1110. }
  1111. I915_STATE_WARN(cur_state != state,
  1112. "pipe %c assertion failure (expected %s, current %s)\n",
  1113. pipe_name(pipe), onoff(state), onoff(cur_state));
  1114. }
  1115. static void assert_plane(struct drm_i915_private *dev_priv,
  1116. enum plane plane, bool state)
  1117. {
  1118. u32 val;
  1119. bool cur_state;
  1120. val = I915_READ(DSPCNTR(plane));
  1121. cur_state = !!(val & DISPLAY_PLANE_ENABLE);
  1122. I915_STATE_WARN(cur_state != state,
  1123. "plane %c assertion failure (expected %s, current %s)\n",
  1124. plane_name(plane), onoff(state), onoff(cur_state));
  1125. }
  1126. #define assert_plane_enabled(d, p) assert_plane(d, p, true)
  1127. #define assert_plane_disabled(d, p) assert_plane(d, p, false)
  1128. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  1129. enum pipe pipe)
  1130. {
  1131. int i;
  1132. /* Primary planes are fixed to pipes on gen4+ */
  1133. if (INTEL_GEN(dev_priv) >= 4) {
  1134. u32 val = I915_READ(DSPCNTR(pipe));
  1135. I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
  1136. "plane %c assertion failure, should be disabled but not\n",
  1137. plane_name(pipe));
  1138. return;
  1139. }
  1140. /* Need to check both planes against the pipe */
  1141. for_each_pipe(dev_priv, i) {
  1142. u32 val = I915_READ(DSPCNTR(i));
  1143. enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  1144. DISPPLANE_SEL_PIPE_SHIFT;
  1145. I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  1146. "plane %c assertion failure, should be off on pipe %c but is still active\n",
  1147. plane_name(i), pipe_name(pipe));
  1148. }
  1149. }
  1150. static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
  1151. enum pipe pipe)
  1152. {
  1153. int sprite;
  1154. if (INTEL_GEN(dev_priv) >= 9) {
  1155. for_each_sprite(dev_priv, pipe, sprite) {
  1156. u32 val = I915_READ(PLANE_CTL(pipe, sprite));
  1157. I915_STATE_WARN(val & PLANE_CTL_ENABLE,
  1158. "plane %d assertion failure, should be off on pipe %c but is still active\n",
  1159. sprite, pipe_name(pipe));
  1160. }
  1161. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1162. for_each_sprite(dev_priv, pipe, sprite) {
  1163. u32 val = I915_READ(SPCNTR(pipe, sprite));
  1164. I915_STATE_WARN(val & SP_ENABLE,
  1165. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1166. sprite_name(pipe, sprite), pipe_name(pipe));
  1167. }
  1168. } else if (INTEL_GEN(dev_priv) >= 7) {
  1169. u32 val = I915_READ(SPRCTL(pipe));
  1170. I915_STATE_WARN(val & SPRITE_ENABLE,
  1171. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1172. plane_name(pipe), pipe_name(pipe));
  1173. } else if (INTEL_GEN(dev_priv) >= 5) {
  1174. u32 val = I915_READ(DVSCNTR(pipe));
  1175. I915_STATE_WARN(val & DVS_ENABLE,
  1176. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1177. plane_name(pipe), pipe_name(pipe));
  1178. }
  1179. }
  1180. static void assert_vblank_disabled(struct drm_crtc *crtc)
  1181. {
  1182. if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
  1183. drm_crtc_vblank_put(crtc);
  1184. }
  1185. void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
  1186. enum pipe pipe)
  1187. {
  1188. u32 val;
  1189. bool enabled;
  1190. val = I915_READ(PCH_TRANSCONF(pipe));
  1191. enabled = !!(val & TRANS_ENABLE);
  1192. I915_STATE_WARN(enabled,
  1193. "transcoder assertion failed, should be off on pipe %c but is still active\n",
  1194. pipe_name(pipe));
  1195. }
  1196. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  1197. enum pipe pipe, u32 port_sel, u32 val)
  1198. {
  1199. if ((val & DP_PORT_EN) == 0)
  1200. return false;
  1201. if (HAS_PCH_CPT(dev_priv)) {
  1202. u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
  1203. if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  1204. return false;
  1205. } else if (IS_CHERRYVIEW(dev_priv)) {
  1206. if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
  1207. return false;
  1208. } else {
  1209. if ((val & DP_PIPE_MASK) != (pipe << 30))
  1210. return false;
  1211. }
  1212. return true;
  1213. }
  1214. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  1215. enum pipe pipe, u32 val)
  1216. {
  1217. if ((val & SDVO_ENABLE) == 0)
  1218. return false;
  1219. if (HAS_PCH_CPT(dev_priv)) {
  1220. if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
  1221. return false;
  1222. } else if (IS_CHERRYVIEW(dev_priv)) {
  1223. if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
  1224. return false;
  1225. } else {
  1226. if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
  1227. return false;
  1228. }
  1229. return true;
  1230. }
  1231. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  1232. enum pipe pipe, u32 val)
  1233. {
  1234. if ((val & LVDS_PORT_EN) == 0)
  1235. return false;
  1236. if (HAS_PCH_CPT(dev_priv)) {
  1237. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1238. return false;
  1239. } else {
  1240. if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  1241. return false;
  1242. }
  1243. return true;
  1244. }
  1245. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  1246. enum pipe pipe, u32 val)
  1247. {
  1248. if ((val & ADPA_DAC_ENABLE) == 0)
  1249. return false;
  1250. if (HAS_PCH_CPT(dev_priv)) {
  1251. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1252. return false;
  1253. } else {
  1254. if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  1255. return false;
  1256. }
  1257. return true;
  1258. }
  1259. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  1260. enum pipe pipe, i915_reg_t reg,
  1261. u32 port_sel)
  1262. {
  1263. u32 val = I915_READ(reg);
  1264. I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  1265. "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1266. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1267. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
  1268. && (val & DP_PIPEB_SELECT),
  1269. "IBX PCH dp port still using transcoder B\n");
  1270. }
  1271. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  1272. enum pipe pipe, i915_reg_t reg)
  1273. {
  1274. u32 val = I915_READ(reg);
  1275. I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
  1276. "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
  1277. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1278. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
  1279. && (val & SDVO_PIPE_B_SELECT),
  1280. "IBX PCH hdmi port still using transcoder B\n");
  1281. }
  1282. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  1283. enum pipe pipe)
  1284. {
  1285. u32 val;
  1286. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1287. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1288. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1289. val = I915_READ(PCH_ADPA);
  1290. I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
  1291. "PCH VGA enabled on transcoder %c, should be disabled\n",
  1292. pipe_name(pipe));
  1293. val = I915_READ(PCH_LVDS);
  1294. I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
  1295. "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1296. pipe_name(pipe));
  1297. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
  1298. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
  1299. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
  1300. }
  1301. static void _vlv_enable_pll(struct intel_crtc *crtc,
  1302. const struct intel_crtc_state *pipe_config)
  1303. {
  1304. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1305. enum pipe pipe = crtc->pipe;
  1306. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1307. POSTING_READ(DPLL(pipe));
  1308. udelay(150);
  1309. if (intel_wait_for_register(dev_priv,
  1310. DPLL(pipe),
  1311. DPLL_LOCK_VLV,
  1312. DPLL_LOCK_VLV,
  1313. 1))
  1314. DRM_ERROR("DPLL %d failed to lock\n", pipe);
  1315. }
  1316. static void vlv_enable_pll(struct intel_crtc *crtc,
  1317. const struct intel_crtc_state *pipe_config)
  1318. {
  1319. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1320. enum pipe pipe = crtc->pipe;
  1321. assert_pipe_disabled(dev_priv, pipe);
  1322. /* PLL is protected by panel, make sure we can write it */
  1323. assert_panel_unlocked(dev_priv, pipe);
  1324. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1325. _vlv_enable_pll(crtc, pipe_config);
  1326. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1327. POSTING_READ(DPLL_MD(pipe));
  1328. }
  1329. static void _chv_enable_pll(struct intel_crtc *crtc,
  1330. const struct intel_crtc_state *pipe_config)
  1331. {
  1332. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1333. enum pipe pipe = crtc->pipe;
  1334. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1335. u32 tmp;
  1336. mutex_lock(&dev_priv->sb_lock);
  1337. /* Enable back the 10bit clock to display controller */
  1338. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1339. tmp |= DPIO_DCLKP_EN;
  1340. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
  1341. mutex_unlock(&dev_priv->sb_lock);
  1342. /*
  1343. * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
  1344. */
  1345. udelay(1);
  1346. /* Enable PLL */
  1347. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1348. /* Check PLL is locked */
  1349. if (intel_wait_for_register(dev_priv,
  1350. DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
  1351. 1))
  1352. DRM_ERROR("PLL %d failed to lock\n", pipe);
  1353. }
  1354. static void chv_enable_pll(struct intel_crtc *crtc,
  1355. const struct intel_crtc_state *pipe_config)
  1356. {
  1357. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1358. enum pipe pipe = crtc->pipe;
  1359. assert_pipe_disabled(dev_priv, pipe);
  1360. /* PLL is protected by panel, make sure we can write it */
  1361. assert_panel_unlocked(dev_priv, pipe);
  1362. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1363. _chv_enable_pll(crtc, pipe_config);
  1364. if (pipe != PIPE_A) {
  1365. /*
  1366. * WaPixelRepeatModeFixForC0:chv
  1367. *
  1368. * DPLLCMD is AWOL. Use chicken bits to propagate
  1369. * the value from DPLLBMD to either pipe B or C.
  1370. */
  1371. I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
  1372. I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
  1373. I915_WRITE(CBR4_VLV, 0);
  1374. dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
  1375. /*
  1376. * DPLLB VGA mode also seems to cause problems.
  1377. * We should always have it disabled.
  1378. */
  1379. WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
  1380. } else {
  1381. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1382. POSTING_READ(DPLL_MD(pipe));
  1383. }
  1384. }
  1385. static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
  1386. {
  1387. struct intel_crtc *crtc;
  1388. int count = 0;
  1389. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1390. count += crtc->base.state->active &&
  1391. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
  1392. }
  1393. return count;
  1394. }
  1395. static void i9xx_enable_pll(struct intel_crtc *crtc)
  1396. {
  1397. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1398. i915_reg_t reg = DPLL(crtc->pipe);
  1399. u32 dpll = crtc->config->dpll_hw_state.dpll;
  1400. assert_pipe_disabled(dev_priv, crtc->pipe);
  1401. /* PLL is protected by panel, make sure we can write it */
  1402. if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
  1403. assert_panel_unlocked(dev_priv, crtc->pipe);
  1404. /* Enable DVO 2x clock on both PLLs if necessary */
  1405. if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
  1406. /*
  1407. * It appears to be important that we don't enable this
  1408. * for the current pipe before otherwise configuring the
  1409. * PLL. No idea how this should be handled if multiple
  1410. * DVO outputs are enabled simultaneosly.
  1411. */
  1412. dpll |= DPLL_DVO_2X_MODE;
  1413. I915_WRITE(DPLL(!crtc->pipe),
  1414. I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
  1415. }
  1416. /*
  1417. * Apparently we need to have VGA mode enabled prior to changing
  1418. * the P1/P2 dividers. Otherwise the DPLL will keep using the old
  1419. * dividers, even though the register value does change.
  1420. */
  1421. I915_WRITE(reg, 0);
  1422. I915_WRITE(reg, dpll);
  1423. /* Wait for the clocks to stabilize. */
  1424. POSTING_READ(reg);
  1425. udelay(150);
  1426. if (INTEL_GEN(dev_priv) >= 4) {
  1427. I915_WRITE(DPLL_MD(crtc->pipe),
  1428. crtc->config->dpll_hw_state.dpll_md);
  1429. } else {
  1430. /* The pixel multiplier can only be updated once the
  1431. * DPLL is enabled and the clocks are stable.
  1432. *
  1433. * So write it again.
  1434. */
  1435. I915_WRITE(reg, dpll);
  1436. }
  1437. /* We do this three times for luck */
  1438. I915_WRITE(reg, dpll);
  1439. POSTING_READ(reg);
  1440. udelay(150); /* wait for warmup */
  1441. I915_WRITE(reg, dpll);
  1442. POSTING_READ(reg);
  1443. udelay(150); /* wait for warmup */
  1444. I915_WRITE(reg, dpll);
  1445. POSTING_READ(reg);
  1446. udelay(150); /* wait for warmup */
  1447. }
  1448. /**
  1449. * i9xx_disable_pll - disable a PLL
  1450. * @dev_priv: i915 private structure
  1451. * @pipe: pipe PLL to disable
  1452. *
  1453. * Disable the PLL for @pipe, making sure the pipe is off first.
  1454. *
  1455. * Note! This is for pre-ILK only.
  1456. */
  1457. static void i9xx_disable_pll(struct intel_crtc *crtc)
  1458. {
  1459. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1460. enum pipe pipe = crtc->pipe;
  1461. /* Disable DVO 2x clock on both PLLs if necessary */
  1462. if (IS_I830(dev_priv) &&
  1463. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
  1464. !intel_num_dvo_pipes(dev_priv)) {
  1465. I915_WRITE(DPLL(PIPE_B),
  1466. I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
  1467. I915_WRITE(DPLL(PIPE_A),
  1468. I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
  1469. }
  1470. /* Don't disable pipe or pipe PLLs if needed */
  1471. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1472. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1473. return;
  1474. /* Make sure the pipe isn't still relying on us */
  1475. assert_pipe_disabled(dev_priv, pipe);
  1476. I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
  1477. POSTING_READ(DPLL(pipe));
  1478. }
  1479. static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1480. {
  1481. u32 val;
  1482. /* Make sure the pipe isn't still relying on us */
  1483. assert_pipe_disabled(dev_priv, pipe);
  1484. val = DPLL_INTEGRATED_REF_CLK_VLV |
  1485. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1486. if (pipe != PIPE_A)
  1487. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1488. I915_WRITE(DPLL(pipe), val);
  1489. POSTING_READ(DPLL(pipe));
  1490. }
  1491. static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1492. {
  1493. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1494. u32 val;
  1495. /* Make sure the pipe isn't still relying on us */
  1496. assert_pipe_disabled(dev_priv, pipe);
  1497. val = DPLL_SSC_REF_CLK_CHV |
  1498. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1499. if (pipe != PIPE_A)
  1500. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1501. I915_WRITE(DPLL(pipe), val);
  1502. POSTING_READ(DPLL(pipe));
  1503. mutex_lock(&dev_priv->sb_lock);
  1504. /* Disable 10bit clock to display controller */
  1505. val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1506. val &= ~DPIO_DCLKP_EN;
  1507. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
  1508. mutex_unlock(&dev_priv->sb_lock);
  1509. }
  1510. void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  1511. struct intel_digital_port *dport,
  1512. unsigned int expected_mask)
  1513. {
  1514. u32 port_mask;
  1515. i915_reg_t dpll_reg;
  1516. switch (dport->port) {
  1517. case PORT_B:
  1518. port_mask = DPLL_PORTB_READY_MASK;
  1519. dpll_reg = DPLL(0);
  1520. break;
  1521. case PORT_C:
  1522. port_mask = DPLL_PORTC_READY_MASK;
  1523. dpll_reg = DPLL(0);
  1524. expected_mask <<= 4;
  1525. break;
  1526. case PORT_D:
  1527. port_mask = DPLL_PORTD_READY_MASK;
  1528. dpll_reg = DPIO_PHY_STATUS;
  1529. break;
  1530. default:
  1531. BUG();
  1532. }
  1533. if (intel_wait_for_register(dev_priv,
  1534. dpll_reg, port_mask, expected_mask,
  1535. 1000))
  1536. WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
  1537. port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
  1538. }
  1539. static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1540. enum pipe pipe)
  1541. {
  1542. struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
  1543. pipe);
  1544. i915_reg_t reg;
  1545. uint32_t val, pipeconf_val;
  1546. /* Make sure PCH DPLL is enabled */
  1547. assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
  1548. /* FDI must be feeding us bits for PCH ports */
  1549. assert_fdi_tx_enabled(dev_priv, pipe);
  1550. assert_fdi_rx_enabled(dev_priv, pipe);
  1551. if (HAS_PCH_CPT(dev_priv)) {
  1552. /* Workaround: Set the timing override bit before enabling the
  1553. * pch transcoder. */
  1554. reg = TRANS_CHICKEN2(pipe);
  1555. val = I915_READ(reg);
  1556. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1557. I915_WRITE(reg, val);
  1558. }
  1559. reg = PCH_TRANSCONF(pipe);
  1560. val = I915_READ(reg);
  1561. pipeconf_val = I915_READ(PIPECONF(pipe));
  1562. if (HAS_PCH_IBX(dev_priv)) {
  1563. /*
  1564. * Make the BPC in transcoder be consistent with
  1565. * that in pipeconf reg. For HDMI we must use 8bpc
  1566. * here for both 8bpc and 12bpc.
  1567. */
  1568. val &= ~PIPECONF_BPC_MASK;
  1569. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
  1570. val |= PIPECONF_8BPC;
  1571. else
  1572. val |= pipeconf_val & PIPECONF_BPC_MASK;
  1573. }
  1574. val &= ~TRANS_INTERLACE_MASK;
  1575. if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
  1576. if (HAS_PCH_IBX(dev_priv) &&
  1577. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  1578. val |= TRANS_LEGACY_INTERLACED_ILK;
  1579. else
  1580. val |= TRANS_INTERLACED;
  1581. else
  1582. val |= TRANS_PROGRESSIVE;
  1583. I915_WRITE(reg, val | TRANS_ENABLE);
  1584. if (intel_wait_for_register(dev_priv,
  1585. reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
  1586. 100))
  1587. DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
  1588. }
  1589. static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1590. enum transcoder cpu_transcoder)
  1591. {
  1592. u32 val, pipeconf_val;
  1593. /* FDI must be feeding us bits for PCH ports */
  1594. assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
  1595. assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
  1596. /* Workaround: set timing override bit. */
  1597. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1598. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1599. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1600. val = TRANS_ENABLE;
  1601. pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
  1602. if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
  1603. PIPECONF_INTERLACED_ILK)
  1604. val |= TRANS_INTERLACED;
  1605. else
  1606. val |= TRANS_PROGRESSIVE;
  1607. I915_WRITE(LPT_TRANSCONF, val);
  1608. if (intel_wait_for_register(dev_priv,
  1609. LPT_TRANSCONF,
  1610. TRANS_STATE_ENABLE,
  1611. TRANS_STATE_ENABLE,
  1612. 100))
  1613. DRM_ERROR("Failed to enable PCH transcoder\n");
  1614. }
  1615. static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
  1616. enum pipe pipe)
  1617. {
  1618. i915_reg_t reg;
  1619. uint32_t val;
  1620. /* FDI relies on the transcoder */
  1621. assert_fdi_tx_disabled(dev_priv, pipe);
  1622. assert_fdi_rx_disabled(dev_priv, pipe);
  1623. /* Ports must be off as well */
  1624. assert_pch_ports_disabled(dev_priv, pipe);
  1625. reg = PCH_TRANSCONF(pipe);
  1626. val = I915_READ(reg);
  1627. val &= ~TRANS_ENABLE;
  1628. I915_WRITE(reg, val);
  1629. /* wait for PCH transcoder off, transcoder state */
  1630. if (intel_wait_for_register(dev_priv,
  1631. reg, TRANS_STATE_ENABLE, 0,
  1632. 50))
  1633. DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
  1634. if (HAS_PCH_CPT(dev_priv)) {
  1635. /* Workaround: Clear the timing override chicken bit again. */
  1636. reg = TRANS_CHICKEN2(pipe);
  1637. val = I915_READ(reg);
  1638. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1639. I915_WRITE(reg, val);
  1640. }
  1641. }
  1642. void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  1643. {
  1644. u32 val;
  1645. val = I915_READ(LPT_TRANSCONF);
  1646. val &= ~TRANS_ENABLE;
  1647. I915_WRITE(LPT_TRANSCONF, val);
  1648. /* wait for PCH transcoder off, transcoder state */
  1649. if (intel_wait_for_register(dev_priv,
  1650. LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
  1651. 50))
  1652. DRM_ERROR("Failed to disable PCH transcoder\n");
  1653. /* Workaround: clear timing override bit. */
  1654. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1655. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1656. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1657. }
  1658. enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
  1659. {
  1660. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1661. WARN_ON(!crtc->config->has_pch_encoder);
  1662. if (HAS_PCH_LPT(dev_priv))
  1663. return TRANSCODER_A;
  1664. else
  1665. return (enum transcoder) crtc->pipe;
  1666. }
  1667. /**
  1668. * intel_enable_pipe - enable a pipe, asserting requirements
  1669. * @crtc: crtc responsible for the pipe
  1670. *
  1671. * Enable @crtc's pipe, making sure that various hardware specific requirements
  1672. * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  1673. */
  1674. static void intel_enable_pipe(struct intel_crtc *crtc)
  1675. {
  1676. struct drm_device *dev = crtc->base.dev;
  1677. struct drm_i915_private *dev_priv = to_i915(dev);
  1678. enum pipe pipe = crtc->pipe;
  1679. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1680. i915_reg_t reg;
  1681. u32 val;
  1682. DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
  1683. assert_planes_disabled(dev_priv, pipe);
  1684. assert_cursor_disabled(dev_priv, pipe);
  1685. assert_sprites_disabled(dev_priv, pipe);
  1686. /*
  1687. * A pipe without a PLL won't actually be able to drive bits from
  1688. * a plane. On ILK+ the pipe PLLs are integrated, so we don't
  1689. * need the check.
  1690. */
  1691. if (HAS_GMCH_DISPLAY(dev_priv)) {
  1692. if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
  1693. assert_dsi_pll_enabled(dev_priv);
  1694. else
  1695. assert_pll_enabled(dev_priv, pipe);
  1696. } else {
  1697. if (crtc->config->has_pch_encoder) {
  1698. /* if driving the PCH, we need FDI enabled */
  1699. assert_fdi_rx_pll_enabled(dev_priv,
  1700. (enum pipe) intel_crtc_pch_transcoder(crtc));
  1701. assert_fdi_tx_pll_enabled(dev_priv,
  1702. (enum pipe) cpu_transcoder);
  1703. }
  1704. /* FIXME: assert CPU port conditions for SNB+ */
  1705. }
  1706. reg = PIPECONF(cpu_transcoder);
  1707. val = I915_READ(reg);
  1708. if (val & PIPECONF_ENABLE) {
  1709. WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1710. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
  1711. return;
  1712. }
  1713. I915_WRITE(reg, val | PIPECONF_ENABLE);
  1714. POSTING_READ(reg);
  1715. /*
  1716. * Until the pipe starts DSL will read as 0, which would cause
  1717. * an apparent vblank timestamp jump, which messes up also the
  1718. * frame count when it's derived from the timestamps. So let's
  1719. * wait for the pipe to start properly before we call
  1720. * drm_crtc_vblank_on()
  1721. */
  1722. if (dev->max_vblank_count == 0 &&
  1723. wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
  1724. DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
  1725. }
  1726. /**
  1727. * intel_disable_pipe - disable a pipe, asserting requirements
  1728. * @crtc: crtc whose pipes is to be disabled
  1729. *
  1730. * Disable the pipe of @crtc, making sure that various hardware
  1731. * specific requirements are met, if applicable, e.g. plane
  1732. * disabled, panel fitter off, etc.
  1733. *
  1734. * Will wait until the pipe has shut down before returning.
  1735. */
  1736. static void intel_disable_pipe(struct intel_crtc *crtc)
  1737. {
  1738. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1739. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1740. enum pipe pipe = crtc->pipe;
  1741. i915_reg_t reg;
  1742. u32 val;
  1743. DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
  1744. /*
  1745. * Make sure planes won't keep trying to pump pixels to us,
  1746. * or we might hang the display.
  1747. */
  1748. assert_planes_disabled(dev_priv, pipe);
  1749. assert_cursor_disabled(dev_priv, pipe);
  1750. assert_sprites_disabled(dev_priv, pipe);
  1751. reg = PIPECONF(cpu_transcoder);
  1752. val = I915_READ(reg);
  1753. if ((val & PIPECONF_ENABLE) == 0)
  1754. return;
  1755. /*
  1756. * Double wide has implications for planes
  1757. * so best keep it disabled when not needed.
  1758. */
  1759. if (crtc->config->double_wide)
  1760. val &= ~PIPECONF_DOUBLE_WIDE;
  1761. /* Don't disable pipe or pipe PLLs if needed */
  1762. if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
  1763. !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1764. val &= ~PIPECONF_ENABLE;
  1765. I915_WRITE(reg, val);
  1766. if ((val & PIPECONF_ENABLE) == 0)
  1767. intel_wait_for_pipe_off(crtc);
  1768. }
  1769. static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
  1770. {
  1771. return IS_GEN2(dev_priv) ? 2048 : 4096;
  1772. }
  1773. static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
  1774. uint64_t fb_modifier, unsigned int cpp)
  1775. {
  1776. switch (fb_modifier) {
  1777. case DRM_FORMAT_MOD_NONE:
  1778. return cpp;
  1779. case I915_FORMAT_MOD_X_TILED:
  1780. if (IS_GEN2(dev_priv))
  1781. return 128;
  1782. else
  1783. return 512;
  1784. case I915_FORMAT_MOD_Y_TILED:
  1785. if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
  1786. return 128;
  1787. else
  1788. return 512;
  1789. case I915_FORMAT_MOD_Yf_TILED:
  1790. switch (cpp) {
  1791. case 1:
  1792. return 64;
  1793. case 2:
  1794. case 4:
  1795. return 128;
  1796. case 8:
  1797. case 16:
  1798. return 256;
  1799. default:
  1800. MISSING_CASE(cpp);
  1801. return cpp;
  1802. }
  1803. break;
  1804. default:
  1805. MISSING_CASE(fb_modifier);
  1806. return cpp;
  1807. }
  1808. }
  1809. unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
  1810. uint64_t fb_modifier, unsigned int cpp)
  1811. {
  1812. if (fb_modifier == DRM_FORMAT_MOD_NONE)
  1813. return 1;
  1814. else
  1815. return intel_tile_size(dev_priv) /
  1816. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1817. }
  1818. /* Return the tile dimensions in pixel units */
  1819. static void intel_tile_dims(const struct drm_i915_private *dev_priv,
  1820. unsigned int *tile_width,
  1821. unsigned int *tile_height,
  1822. uint64_t fb_modifier,
  1823. unsigned int cpp)
  1824. {
  1825. unsigned int tile_width_bytes =
  1826. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1827. *tile_width = tile_width_bytes / cpp;
  1828. *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
  1829. }
  1830. unsigned int
  1831. intel_fb_align_height(struct drm_device *dev, unsigned int height,
  1832. uint32_t pixel_format, uint64_t fb_modifier)
  1833. {
  1834. unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
  1835. unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
  1836. return ALIGN(height, tile_height);
  1837. }
  1838. unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
  1839. {
  1840. unsigned int size = 0;
  1841. int i;
  1842. for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
  1843. size += rot_info->plane[i].width * rot_info->plane[i].height;
  1844. return size;
  1845. }
  1846. static void
  1847. intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
  1848. const struct drm_framebuffer *fb,
  1849. unsigned int rotation)
  1850. {
  1851. if (drm_rotation_90_or_270(rotation)) {
  1852. *view = i915_ggtt_view_rotated;
  1853. view->params.rotated = to_intel_framebuffer(fb)->rot_info;
  1854. } else {
  1855. *view = i915_ggtt_view_normal;
  1856. }
  1857. }
  1858. static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
  1859. {
  1860. if (INTEL_INFO(dev_priv)->gen >= 9)
  1861. return 256 * 1024;
  1862. else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
  1863. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1864. return 128 * 1024;
  1865. else if (INTEL_INFO(dev_priv)->gen >= 4)
  1866. return 4 * 1024;
  1867. else
  1868. return 0;
  1869. }
  1870. static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
  1871. uint64_t fb_modifier)
  1872. {
  1873. switch (fb_modifier) {
  1874. case DRM_FORMAT_MOD_NONE:
  1875. return intel_linear_alignment(dev_priv);
  1876. case I915_FORMAT_MOD_X_TILED:
  1877. if (INTEL_INFO(dev_priv)->gen >= 9)
  1878. return 256 * 1024;
  1879. return 0;
  1880. case I915_FORMAT_MOD_Y_TILED:
  1881. case I915_FORMAT_MOD_Yf_TILED:
  1882. return 1 * 1024 * 1024;
  1883. default:
  1884. MISSING_CASE(fb_modifier);
  1885. return 0;
  1886. }
  1887. }
  1888. struct i915_vma *
  1889. intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1890. {
  1891. struct drm_device *dev = fb->dev;
  1892. struct drm_i915_private *dev_priv = to_i915(dev);
  1893. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1894. struct i915_ggtt_view view;
  1895. struct i915_vma *vma;
  1896. u32 alignment;
  1897. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1898. alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
  1899. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1900. /* Note that the w/a also requires 64 PTE of padding following the
  1901. * bo. We currently fill all unused PTE with the shadow page and so
  1902. * we should always have valid PTE following the scanout preventing
  1903. * the VT-d warning.
  1904. */
  1905. if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
  1906. alignment = 256 * 1024;
  1907. /*
  1908. * Global gtt pte registers are special registers which actually forward
  1909. * writes to a chunk of system memory. Which means that there is no risk
  1910. * that the register values disappear as soon as we call
  1911. * intel_runtime_pm_put(), so it is correct to wrap only the
  1912. * pin/unpin/fence and not more.
  1913. */
  1914. intel_runtime_pm_get(dev_priv);
  1915. vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
  1916. if (IS_ERR(vma))
  1917. goto err;
  1918. if (i915_vma_is_map_and_fenceable(vma)) {
  1919. /* Install a fence for tiled scan-out. Pre-i965 always needs a
  1920. * fence, whereas 965+ only requires a fence if using
  1921. * framebuffer compression. For simplicity, we always, when
  1922. * possible, install a fence as the cost is not that onerous.
  1923. *
  1924. * If we fail to fence the tiled scanout, then either the
  1925. * modeset will reject the change (which is highly unlikely as
  1926. * the affected systems, all but one, do not have unmappable
  1927. * space) or we will not be able to enable full powersaving
  1928. * techniques (also likely not to apply due to various limits
  1929. * FBC and the like impose on the size of the buffer, which
  1930. * presumably we violated anyway with this unmappable buffer).
  1931. * Anyway, it is presumably better to stumble onwards with
  1932. * something and try to run the system in a "less than optimal"
  1933. * mode that matches the user configuration.
  1934. */
  1935. if (i915_vma_get_fence(vma) == 0)
  1936. i915_vma_pin_fence(vma);
  1937. }
  1938. err:
  1939. intel_runtime_pm_put(dev_priv);
  1940. return vma;
  1941. }
  1942. void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1943. {
  1944. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1945. struct i915_ggtt_view view;
  1946. struct i915_vma *vma;
  1947. WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
  1948. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1949. vma = i915_gem_object_to_ggtt(obj, &view);
  1950. i915_vma_unpin_fence(vma);
  1951. i915_gem_object_unpin_from_display_plane(vma);
  1952. }
  1953. static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
  1954. unsigned int rotation)
  1955. {
  1956. if (drm_rotation_90_or_270(rotation))
  1957. return to_intel_framebuffer(fb)->rotated[plane].pitch;
  1958. else
  1959. return fb->pitches[plane];
  1960. }
  1961. /*
  1962. * Convert the x/y offsets into a linear offset.
  1963. * Only valid with 0/180 degree rotation, which is fine since linear
  1964. * offset is only used with linear buffers on pre-hsw and tiled buffers
  1965. * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
  1966. */
  1967. u32 intel_fb_xy_to_linear(int x, int y,
  1968. const struct intel_plane_state *state,
  1969. int plane)
  1970. {
  1971. const struct drm_framebuffer *fb = state->base.fb;
  1972. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  1973. unsigned int pitch = fb->pitches[plane];
  1974. return y * pitch + x * cpp;
  1975. }
  1976. /*
  1977. * Add the x/y offsets derived from fb->offsets[] to the user
  1978. * specified plane src x/y offsets. The resulting x/y offsets
  1979. * specify the start of scanout from the beginning of the gtt mapping.
  1980. */
  1981. void intel_add_fb_offsets(int *x, int *y,
  1982. const struct intel_plane_state *state,
  1983. int plane)
  1984. {
  1985. const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
  1986. unsigned int rotation = state->base.rotation;
  1987. if (drm_rotation_90_or_270(rotation)) {
  1988. *x += intel_fb->rotated[plane].x;
  1989. *y += intel_fb->rotated[plane].y;
  1990. } else {
  1991. *x += intel_fb->normal[plane].x;
  1992. *y += intel_fb->normal[plane].y;
  1993. }
  1994. }
  1995. /*
  1996. * Input tile dimensions and pitch must already be
  1997. * rotated to match x and y, and in pixel units.
  1998. */
  1999. static u32 _intel_adjust_tile_offset(int *x, int *y,
  2000. unsigned int tile_width,
  2001. unsigned int tile_height,
  2002. unsigned int tile_size,
  2003. unsigned int pitch_tiles,
  2004. u32 old_offset,
  2005. u32 new_offset)
  2006. {
  2007. unsigned int pitch_pixels = pitch_tiles * tile_width;
  2008. unsigned int tiles;
  2009. WARN_ON(old_offset & (tile_size - 1));
  2010. WARN_ON(new_offset & (tile_size - 1));
  2011. WARN_ON(new_offset > old_offset);
  2012. tiles = (old_offset - new_offset) / tile_size;
  2013. *y += tiles / pitch_tiles * tile_height;
  2014. *x += tiles % pitch_tiles * tile_width;
  2015. /* minimize x in case it got needlessly big */
  2016. *y += *x / pitch_pixels * tile_height;
  2017. *x %= pitch_pixels;
  2018. return new_offset;
  2019. }
  2020. /*
  2021. * Adjust the tile offset by moving the difference into
  2022. * the x/y offsets.
  2023. */
  2024. static u32 intel_adjust_tile_offset(int *x, int *y,
  2025. const struct intel_plane_state *state, int plane,
  2026. u32 old_offset, u32 new_offset)
  2027. {
  2028. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  2029. const struct drm_framebuffer *fb = state->base.fb;
  2030. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2031. unsigned int rotation = state->base.rotation;
  2032. unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
  2033. WARN_ON(new_offset > old_offset);
  2034. if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
  2035. unsigned int tile_size, tile_width, tile_height;
  2036. unsigned int pitch_tiles;
  2037. tile_size = intel_tile_size(dev_priv);
  2038. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2039. fb->modifier[plane], cpp);
  2040. if (drm_rotation_90_or_270(rotation)) {
  2041. pitch_tiles = pitch / tile_height;
  2042. swap(tile_width, tile_height);
  2043. } else {
  2044. pitch_tiles = pitch / (tile_width * cpp);
  2045. }
  2046. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2047. tile_size, pitch_tiles,
  2048. old_offset, new_offset);
  2049. } else {
  2050. old_offset += *y * pitch + *x * cpp;
  2051. *y = (old_offset - new_offset) / pitch;
  2052. *x = ((old_offset - new_offset) - *y * pitch) / cpp;
  2053. }
  2054. return new_offset;
  2055. }
  2056. /*
  2057. * Computes the linear offset to the base tile and adjusts
  2058. * x, y. bytes per pixel is assumed to be a power-of-two.
  2059. *
  2060. * In the 90/270 rotated case, x and y are assumed
  2061. * to be already rotated to match the rotated GTT view, and
  2062. * pitch is the tile_height aligned framebuffer height.
  2063. *
  2064. * This function is used when computing the derived information
  2065. * under intel_framebuffer, so using any of that information
  2066. * here is not allowed. Anything under drm_framebuffer can be
  2067. * used. This is why the user has to pass in the pitch since it
  2068. * is specified in the rotated orientation.
  2069. */
  2070. static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
  2071. int *x, int *y,
  2072. const struct drm_framebuffer *fb, int plane,
  2073. unsigned int pitch,
  2074. unsigned int rotation,
  2075. u32 alignment)
  2076. {
  2077. uint64_t fb_modifier = fb->modifier[plane];
  2078. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2079. u32 offset, offset_aligned;
  2080. if (alignment)
  2081. alignment--;
  2082. if (fb_modifier != DRM_FORMAT_MOD_NONE) {
  2083. unsigned int tile_size, tile_width, tile_height;
  2084. unsigned int tile_rows, tiles, pitch_tiles;
  2085. tile_size = intel_tile_size(dev_priv);
  2086. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2087. fb_modifier, cpp);
  2088. if (drm_rotation_90_or_270(rotation)) {
  2089. pitch_tiles = pitch / tile_height;
  2090. swap(tile_width, tile_height);
  2091. } else {
  2092. pitch_tiles = pitch / (tile_width * cpp);
  2093. }
  2094. tile_rows = *y / tile_height;
  2095. *y %= tile_height;
  2096. tiles = *x / tile_width;
  2097. *x %= tile_width;
  2098. offset = (tile_rows * pitch_tiles + tiles) * tile_size;
  2099. offset_aligned = offset & ~alignment;
  2100. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2101. tile_size, pitch_tiles,
  2102. offset, offset_aligned);
  2103. } else {
  2104. offset = *y * pitch + *x * cpp;
  2105. offset_aligned = offset & ~alignment;
  2106. *y = (offset & alignment) / pitch;
  2107. *x = ((offset & alignment) - *y * pitch) / cpp;
  2108. }
  2109. return offset_aligned;
  2110. }
  2111. u32 intel_compute_tile_offset(int *x, int *y,
  2112. const struct intel_plane_state *state,
  2113. int plane)
  2114. {
  2115. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  2116. const struct drm_framebuffer *fb = state->base.fb;
  2117. unsigned int rotation = state->base.rotation;
  2118. int pitch = intel_fb_pitch(fb, plane, rotation);
  2119. u32 alignment;
  2120. /* AUX_DIST needs only 4K alignment */
  2121. if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
  2122. alignment = 4096;
  2123. else
  2124. alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
  2125. return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
  2126. rotation, alignment);
  2127. }
  2128. /* Convert the fb->offset[] linear offset into x/y offsets */
  2129. static void intel_fb_offset_to_xy(int *x, int *y,
  2130. const struct drm_framebuffer *fb, int plane)
  2131. {
  2132. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2133. unsigned int pitch = fb->pitches[plane];
  2134. u32 linear_offset = fb->offsets[plane];
  2135. *y = linear_offset / pitch;
  2136. *x = linear_offset % pitch / cpp;
  2137. }
  2138. static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
  2139. {
  2140. switch (fb_modifier) {
  2141. case I915_FORMAT_MOD_X_TILED:
  2142. return I915_TILING_X;
  2143. case I915_FORMAT_MOD_Y_TILED:
  2144. return I915_TILING_Y;
  2145. default:
  2146. return I915_TILING_NONE;
  2147. }
  2148. }
  2149. static int
  2150. intel_fill_fb_info(struct drm_i915_private *dev_priv,
  2151. struct drm_framebuffer *fb)
  2152. {
  2153. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  2154. struct intel_rotation_info *rot_info = &intel_fb->rot_info;
  2155. u32 gtt_offset_rotated = 0;
  2156. unsigned int max_size = 0;
  2157. uint32_t format = fb->pixel_format;
  2158. int i, num_planes = drm_format_num_planes(format);
  2159. unsigned int tile_size = intel_tile_size(dev_priv);
  2160. for (i = 0; i < num_planes; i++) {
  2161. unsigned int width, height;
  2162. unsigned int cpp, size;
  2163. u32 offset;
  2164. int x, y;
  2165. cpp = drm_format_plane_cpp(format, i);
  2166. width = drm_format_plane_width(fb->width, format, i);
  2167. height = drm_format_plane_height(fb->height, format, i);
  2168. intel_fb_offset_to_xy(&x, &y, fb, i);
  2169. /*
  2170. * The fence (if used) is aligned to the start of the object
  2171. * so having the framebuffer wrap around across the edge of the
  2172. * fenced region doesn't really work. We have no API to configure
  2173. * the fence start offset within the object (nor could we probably
  2174. * on gen2/3). So it's just easier if we just require that the
  2175. * fb layout agrees with the fence layout. We already check that the
  2176. * fb stride matches the fence stride elsewhere.
  2177. */
  2178. if (i915_gem_object_is_tiled(intel_fb->obj) &&
  2179. (x + width) * cpp > fb->pitches[i]) {
  2180. DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
  2181. i, fb->offsets[i]);
  2182. return -EINVAL;
  2183. }
  2184. /*
  2185. * First pixel of the framebuffer from
  2186. * the start of the normal gtt mapping.
  2187. */
  2188. intel_fb->normal[i].x = x;
  2189. intel_fb->normal[i].y = y;
  2190. offset = _intel_compute_tile_offset(dev_priv, &x, &y,
  2191. fb, 0, fb->pitches[i],
  2192. DRM_ROTATE_0, tile_size);
  2193. offset /= tile_size;
  2194. if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
  2195. unsigned int tile_width, tile_height;
  2196. unsigned int pitch_tiles;
  2197. struct drm_rect r;
  2198. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2199. fb->modifier[i], cpp);
  2200. rot_info->plane[i].offset = offset;
  2201. rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
  2202. rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
  2203. rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
  2204. intel_fb->rotated[i].pitch =
  2205. rot_info->plane[i].height * tile_height;
  2206. /* how many tiles does this plane need */
  2207. size = rot_info->plane[i].stride * rot_info->plane[i].height;
  2208. /*
  2209. * If the plane isn't horizontally tile aligned,
  2210. * we need one more tile.
  2211. */
  2212. if (x != 0)
  2213. size++;
  2214. /* rotate the x/y offsets to match the GTT view */
  2215. r.x1 = x;
  2216. r.y1 = y;
  2217. r.x2 = x + width;
  2218. r.y2 = y + height;
  2219. drm_rect_rotate(&r,
  2220. rot_info->plane[i].width * tile_width,
  2221. rot_info->plane[i].height * tile_height,
  2222. DRM_ROTATE_270);
  2223. x = r.x1;
  2224. y = r.y1;
  2225. /* rotate the tile dimensions to match the GTT view */
  2226. pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
  2227. swap(tile_width, tile_height);
  2228. /*
  2229. * We only keep the x/y offsets, so push all of the
  2230. * gtt offset into the x/y offsets.
  2231. */
  2232. _intel_adjust_tile_offset(&x, &y, tile_size,
  2233. tile_width, tile_height, pitch_tiles,
  2234. gtt_offset_rotated * tile_size, 0);
  2235. gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
  2236. /*
  2237. * First pixel of the framebuffer from
  2238. * the start of the rotated gtt mapping.
  2239. */
  2240. intel_fb->rotated[i].x = x;
  2241. intel_fb->rotated[i].y = y;
  2242. } else {
  2243. size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
  2244. x * cpp, tile_size);
  2245. }
  2246. /* how many tiles in total needed in the bo */
  2247. max_size = max(max_size, offset + size);
  2248. }
  2249. if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
  2250. DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
  2251. max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
  2252. return -EINVAL;
  2253. }
  2254. return 0;
  2255. }
  2256. static int i9xx_format_to_fourcc(int format)
  2257. {
  2258. switch (format) {
  2259. case DISPPLANE_8BPP:
  2260. return DRM_FORMAT_C8;
  2261. case DISPPLANE_BGRX555:
  2262. return DRM_FORMAT_XRGB1555;
  2263. case DISPPLANE_BGRX565:
  2264. return DRM_FORMAT_RGB565;
  2265. default:
  2266. case DISPPLANE_BGRX888:
  2267. return DRM_FORMAT_XRGB8888;
  2268. case DISPPLANE_RGBX888:
  2269. return DRM_FORMAT_XBGR8888;
  2270. case DISPPLANE_BGRX101010:
  2271. return DRM_FORMAT_XRGB2101010;
  2272. case DISPPLANE_RGBX101010:
  2273. return DRM_FORMAT_XBGR2101010;
  2274. }
  2275. }
  2276. static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
  2277. {
  2278. switch (format) {
  2279. case PLANE_CTL_FORMAT_RGB_565:
  2280. return DRM_FORMAT_RGB565;
  2281. default:
  2282. case PLANE_CTL_FORMAT_XRGB_8888:
  2283. if (rgb_order) {
  2284. if (alpha)
  2285. return DRM_FORMAT_ABGR8888;
  2286. else
  2287. return DRM_FORMAT_XBGR8888;
  2288. } else {
  2289. if (alpha)
  2290. return DRM_FORMAT_ARGB8888;
  2291. else
  2292. return DRM_FORMAT_XRGB8888;
  2293. }
  2294. case PLANE_CTL_FORMAT_XRGB_2101010:
  2295. if (rgb_order)
  2296. return DRM_FORMAT_XBGR2101010;
  2297. else
  2298. return DRM_FORMAT_XRGB2101010;
  2299. }
  2300. }
  2301. static bool
  2302. intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  2303. struct intel_initial_plane_config *plane_config)
  2304. {
  2305. struct drm_device *dev = crtc->base.dev;
  2306. struct drm_i915_private *dev_priv = to_i915(dev);
  2307. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  2308. struct drm_i915_gem_object *obj = NULL;
  2309. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  2310. struct drm_framebuffer *fb = &plane_config->fb->base;
  2311. u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
  2312. u32 size_aligned = round_up(plane_config->base + plane_config->size,
  2313. PAGE_SIZE);
  2314. size_aligned -= base_aligned;
  2315. if (plane_config->size == 0)
  2316. return false;
  2317. /* If the FB is too big, just don't use it since fbdev is not very
  2318. * important and we should probably use that space with FBC or other
  2319. * features. */
  2320. if (size_aligned * 2 > ggtt->stolen_usable_size)
  2321. return false;
  2322. mutex_lock(&dev->struct_mutex);
  2323. obj = i915_gem_object_create_stolen_for_preallocated(dev,
  2324. base_aligned,
  2325. base_aligned,
  2326. size_aligned);
  2327. if (!obj) {
  2328. mutex_unlock(&dev->struct_mutex);
  2329. return false;
  2330. }
  2331. if (plane_config->tiling == I915_TILING_X)
  2332. obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
  2333. mode_cmd.pixel_format = fb->pixel_format;
  2334. mode_cmd.width = fb->width;
  2335. mode_cmd.height = fb->height;
  2336. mode_cmd.pitches[0] = fb->pitches[0];
  2337. mode_cmd.modifier[0] = fb->modifier[0];
  2338. mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
  2339. if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
  2340. &mode_cmd, obj)) {
  2341. DRM_DEBUG_KMS("intel fb init failed\n");
  2342. goto out_unref_obj;
  2343. }
  2344. mutex_unlock(&dev->struct_mutex);
  2345. DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
  2346. return true;
  2347. out_unref_obj:
  2348. i915_gem_object_put(obj);
  2349. mutex_unlock(&dev->struct_mutex);
  2350. return false;
  2351. }
  2352. /* Update plane->state->fb to match plane->fb after driver-internal updates */
  2353. static void
  2354. update_state_fb(struct drm_plane *plane)
  2355. {
  2356. if (plane->fb == plane->state->fb)
  2357. return;
  2358. if (plane->state->fb)
  2359. drm_framebuffer_unreference(plane->state->fb);
  2360. plane->state->fb = plane->fb;
  2361. if (plane->state->fb)
  2362. drm_framebuffer_reference(plane->state->fb);
  2363. }
  2364. static void
  2365. intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
  2366. struct intel_initial_plane_config *plane_config)
  2367. {
  2368. struct drm_device *dev = intel_crtc->base.dev;
  2369. struct drm_i915_private *dev_priv = to_i915(dev);
  2370. struct drm_crtc *c;
  2371. struct intel_crtc *i;
  2372. struct drm_i915_gem_object *obj;
  2373. struct drm_plane *primary = intel_crtc->base.primary;
  2374. struct drm_plane_state *plane_state = primary->state;
  2375. struct drm_crtc_state *crtc_state = intel_crtc->base.state;
  2376. struct intel_plane *intel_plane = to_intel_plane(primary);
  2377. struct intel_plane_state *intel_state =
  2378. to_intel_plane_state(plane_state);
  2379. struct drm_framebuffer *fb;
  2380. if (!plane_config->fb)
  2381. return;
  2382. if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
  2383. fb = &plane_config->fb->base;
  2384. goto valid_fb;
  2385. }
  2386. kfree(plane_config->fb);
  2387. /*
  2388. * Failed to alloc the obj, check to see if we should share
  2389. * an fb with another CRTC instead
  2390. */
  2391. for_each_crtc(dev, c) {
  2392. i = to_intel_crtc(c);
  2393. if (c == &intel_crtc->base)
  2394. continue;
  2395. if (!i->active)
  2396. continue;
  2397. fb = c->primary->fb;
  2398. if (!fb)
  2399. continue;
  2400. obj = intel_fb_obj(fb);
  2401. if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
  2402. drm_framebuffer_reference(fb);
  2403. goto valid_fb;
  2404. }
  2405. }
  2406. /*
  2407. * We've failed to reconstruct the BIOS FB. Current display state
  2408. * indicates that the primary plane is visible, but has a NULL FB,
  2409. * which will lead to problems later if we don't fix it up. The
  2410. * simplest solution is to just disable the primary plane now and
  2411. * pretend the BIOS never had it enabled.
  2412. */
  2413. to_intel_plane_state(plane_state)->base.visible = false;
  2414. crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
  2415. intel_pre_disable_primary_noatomic(&intel_crtc->base);
  2416. intel_plane->disable_plane(primary, &intel_crtc->base);
  2417. return;
  2418. valid_fb:
  2419. plane_state->src_x = 0;
  2420. plane_state->src_y = 0;
  2421. plane_state->src_w = fb->width << 16;
  2422. plane_state->src_h = fb->height << 16;
  2423. plane_state->crtc_x = 0;
  2424. plane_state->crtc_y = 0;
  2425. plane_state->crtc_w = fb->width;
  2426. plane_state->crtc_h = fb->height;
  2427. intel_state->base.src = drm_plane_state_src(plane_state);
  2428. intel_state->base.dst = drm_plane_state_dest(plane_state);
  2429. obj = intel_fb_obj(fb);
  2430. if (i915_gem_object_is_tiled(obj))
  2431. dev_priv->preserve_bios_swizzle = true;
  2432. drm_framebuffer_reference(fb);
  2433. primary->fb = primary->state->fb = fb;
  2434. primary->crtc = primary->state->crtc = &intel_crtc->base;
  2435. intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
  2436. atomic_or(to_intel_plane(primary)->frontbuffer_bit,
  2437. &obj->frontbuffer_bits);
  2438. }
  2439. static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
  2440. unsigned int rotation)
  2441. {
  2442. int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2443. switch (fb->modifier[plane]) {
  2444. case DRM_FORMAT_MOD_NONE:
  2445. case I915_FORMAT_MOD_X_TILED:
  2446. switch (cpp) {
  2447. case 8:
  2448. return 4096;
  2449. case 4:
  2450. case 2:
  2451. case 1:
  2452. return 8192;
  2453. default:
  2454. MISSING_CASE(cpp);
  2455. break;
  2456. }
  2457. break;
  2458. case I915_FORMAT_MOD_Y_TILED:
  2459. case I915_FORMAT_MOD_Yf_TILED:
  2460. switch (cpp) {
  2461. case 8:
  2462. return 2048;
  2463. case 4:
  2464. return 4096;
  2465. case 2:
  2466. case 1:
  2467. return 8192;
  2468. default:
  2469. MISSING_CASE(cpp);
  2470. break;
  2471. }
  2472. break;
  2473. default:
  2474. MISSING_CASE(fb->modifier[plane]);
  2475. }
  2476. return 2048;
  2477. }
  2478. static int skl_check_main_surface(struct intel_plane_state *plane_state)
  2479. {
  2480. const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
  2481. const struct drm_framebuffer *fb = plane_state->base.fb;
  2482. unsigned int rotation = plane_state->base.rotation;
  2483. int x = plane_state->base.src.x1 >> 16;
  2484. int y = plane_state->base.src.y1 >> 16;
  2485. int w = drm_rect_width(&plane_state->base.src) >> 16;
  2486. int h = drm_rect_height(&plane_state->base.src) >> 16;
  2487. int max_width = skl_max_plane_width(fb, 0, rotation);
  2488. int max_height = 4096;
  2489. u32 alignment, offset, aux_offset = plane_state->aux.offset;
  2490. if (w > max_width || h > max_height) {
  2491. DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
  2492. w, h, max_width, max_height);
  2493. return -EINVAL;
  2494. }
  2495. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2496. offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
  2497. alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
  2498. /*
  2499. * AUX surface offset is specified as the distance from the
  2500. * main surface offset, and it must be non-negative. Make
  2501. * sure that is what we will get.
  2502. */
  2503. if (offset > aux_offset)
  2504. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2505. offset, aux_offset & ~(alignment - 1));
  2506. /*
  2507. * When using an X-tiled surface, the plane blows up
  2508. * if the x offset + width exceed the stride.
  2509. *
  2510. * TODO: linear and Y-tiled seem fine, Yf untested,
  2511. */
  2512. if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
  2513. int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
  2514. while ((x + w) * cpp > fb->pitches[0]) {
  2515. if (offset == 0) {
  2516. DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
  2517. return -EINVAL;
  2518. }
  2519. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2520. offset, offset - alignment);
  2521. }
  2522. }
  2523. plane_state->main.offset = offset;
  2524. plane_state->main.x = x;
  2525. plane_state->main.y = y;
  2526. return 0;
  2527. }
  2528. static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
  2529. {
  2530. const struct drm_framebuffer *fb = plane_state->base.fb;
  2531. unsigned int rotation = plane_state->base.rotation;
  2532. int max_width = skl_max_plane_width(fb, 1, rotation);
  2533. int max_height = 4096;
  2534. int x = plane_state->base.src.x1 >> 17;
  2535. int y = plane_state->base.src.y1 >> 17;
  2536. int w = drm_rect_width(&plane_state->base.src) >> 17;
  2537. int h = drm_rect_height(&plane_state->base.src) >> 17;
  2538. u32 offset;
  2539. intel_add_fb_offsets(&x, &y, plane_state, 1);
  2540. offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
  2541. /* FIXME not quite sure how/if these apply to the chroma plane */
  2542. if (w > max_width || h > max_height) {
  2543. DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
  2544. w, h, max_width, max_height);
  2545. return -EINVAL;
  2546. }
  2547. plane_state->aux.offset = offset;
  2548. plane_state->aux.x = x;
  2549. plane_state->aux.y = y;
  2550. return 0;
  2551. }
  2552. int skl_check_plane_surface(struct intel_plane_state *plane_state)
  2553. {
  2554. const struct drm_framebuffer *fb = plane_state->base.fb;
  2555. unsigned int rotation = plane_state->base.rotation;
  2556. int ret;
  2557. /* Rotate src coordinates to match rotated GTT view */
  2558. if (drm_rotation_90_or_270(rotation))
  2559. drm_rect_rotate(&plane_state->base.src,
  2560. fb->width << 16, fb->height << 16,
  2561. DRM_ROTATE_270);
  2562. /*
  2563. * Handle the AUX surface first since
  2564. * the main surface setup depends on it.
  2565. */
  2566. if (fb->pixel_format == DRM_FORMAT_NV12) {
  2567. ret = skl_check_nv12_aux_surface(plane_state);
  2568. if (ret)
  2569. return ret;
  2570. } else {
  2571. plane_state->aux.offset = ~0xfff;
  2572. plane_state->aux.x = 0;
  2573. plane_state->aux.y = 0;
  2574. }
  2575. ret = skl_check_main_surface(plane_state);
  2576. if (ret)
  2577. return ret;
  2578. return 0;
  2579. }
  2580. static void i9xx_update_primary_plane(struct drm_plane *primary,
  2581. const struct intel_crtc_state *crtc_state,
  2582. const struct intel_plane_state *plane_state)
  2583. {
  2584. struct drm_i915_private *dev_priv = to_i915(primary->dev);
  2585. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2586. struct drm_framebuffer *fb = plane_state->base.fb;
  2587. int plane = intel_crtc->plane;
  2588. u32 linear_offset;
  2589. u32 dspcntr;
  2590. i915_reg_t reg = DSPCNTR(plane);
  2591. unsigned int rotation = plane_state->base.rotation;
  2592. int x = plane_state->base.src.x1 >> 16;
  2593. int y = plane_state->base.src.y1 >> 16;
  2594. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2595. dspcntr |= DISPLAY_PLANE_ENABLE;
  2596. if (INTEL_GEN(dev_priv) < 4) {
  2597. if (intel_crtc->pipe == PIPE_B)
  2598. dspcntr |= DISPPLANE_SEL_PIPE_B;
  2599. /* pipesrc and dspsize control the size that is scaled from,
  2600. * which should always be the user's requested size.
  2601. */
  2602. I915_WRITE(DSPSIZE(plane),
  2603. ((crtc_state->pipe_src_h - 1) << 16) |
  2604. (crtc_state->pipe_src_w - 1));
  2605. I915_WRITE(DSPPOS(plane), 0);
  2606. } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
  2607. I915_WRITE(PRIMSIZE(plane),
  2608. ((crtc_state->pipe_src_h - 1) << 16) |
  2609. (crtc_state->pipe_src_w - 1));
  2610. I915_WRITE(PRIMPOS(plane), 0);
  2611. I915_WRITE(PRIMCNSTALPHA(plane), 0);
  2612. }
  2613. switch (fb->pixel_format) {
  2614. case DRM_FORMAT_C8:
  2615. dspcntr |= DISPPLANE_8BPP;
  2616. break;
  2617. case DRM_FORMAT_XRGB1555:
  2618. dspcntr |= DISPPLANE_BGRX555;
  2619. break;
  2620. case DRM_FORMAT_RGB565:
  2621. dspcntr |= DISPPLANE_BGRX565;
  2622. break;
  2623. case DRM_FORMAT_XRGB8888:
  2624. dspcntr |= DISPPLANE_BGRX888;
  2625. break;
  2626. case DRM_FORMAT_XBGR8888:
  2627. dspcntr |= DISPPLANE_RGBX888;
  2628. break;
  2629. case DRM_FORMAT_XRGB2101010:
  2630. dspcntr |= DISPPLANE_BGRX101010;
  2631. break;
  2632. case DRM_FORMAT_XBGR2101010:
  2633. dspcntr |= DISPPLANE_RGBX101010;
  2634. break;
  2635. default:
  2636. BUG();
  2637. }
  2638. if (INTEL_GEN(dev_priv) >= 4 &&
  2639. fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
  2640. dspcntr |= DISPPLANE_TILED;
  2641. if (rotation & DRM_ROTATE_180)
  2642. dspcntr |= DISPPLANE_ROTATE_180;
  2643. if (rotation & DRM_REFLECT_X)
  2644. dspcntr |= DISPPLANE_MIRROR;
  2645. if (IS_G4X(dev_priv))
  2646. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2647. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2648. if (INTEL_GEN(dev_priv) >= 4)
  2649. intel_crtc->dspaddr_offset =
  2650. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2651. if (rotation & DRM_ROTATE_180) {
  2652. x += crtc_state->pipe_src_w - 1;
  2653. y += crtc_state->pipe_src_h - 1;
  2654. } else if (rotation & DRM_REFLECT_X) {
  2655. x += crtc_state->pipe_src_w - 1;
  2656. }
  2657. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2658. if (INTEL_GEN(dev_priv) < 4)
  2659. intel_crtc->dspaddr_offset = linear_offset;
  2660. intel_crtc->adjusted_x = x;
  2661. intel_crtc->adjusted_y = y;
  2662. I915_WRITE(reg, dspcntr);
  2663. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2664. if (INTEL_GEN(dev_priv) >= 4) {
  2665. I915_WRITE(DSPSURF(plane),
  2666. intel_fb_gtt_offset(fb, rotation) +
  2667. intel_crtc->dspaddr_offset);
  2668. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2669. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2670. } else {
  2671. I915_WRITE(DSPADDR(plane),
  2672. intel_fb_gtt_offset(fb, rotation) +
  2673. intel_crtc->dspaddr_offset);
  2674. }
  2675. POSTING_READ(reg);
  2676. }
  2677. static void i9xx_disable_primary_plane(struct drm_plane *primary,
  2678. struct drm_crtc *crtc)
  2679. {
  2680. struct drm_device *dev = crtc->dev;
  2681. struct drm_i915_private *dev_priv = to_i915(dev);
  2682. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2683. int plane = intel_crtc->plane;
  2684. I915_WRITE(DSPCNTR(plane), 0);
  2685. if (INTEL_INFO(dev_priv)->gen >= 4)
  2686. I915_WRITE(DSPSURF(plane), 0);
  2687. else
  2688. I915_WRITE(DSPADDR(plane), 0);
  2689. POSTING_READ(DSPCNTR(plane));
  2690. }
  2691. static void ironlake_update_primary_plane(struct drm_plane *primary,
  2692. const struct intel_crtc_state *crtc_state,
  2693. const struct intel_plane_state *plane_state)
  2694. {
  2695. struct drm_device *dev = primary->dev;
  2696. struct drm_i915_private *dev_priv = to_i915(dev);
  2697. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2698. struct drm_framebuffer *fb = plane_state->base.fb;
  2699. int plane = intel_crtc->plane;
  2700. u32 linear_offset;
  2701. u32 dspcntr;
  2702. i915_reg_t reg = DSPCNTR(plane);
  2703. unsigned int rotation = plane_state->base.rotation;
  2704. int x = plane_state->base.src.x1 >> 16;
  2705. int y = plane_state->base.src.y1 >> 16;
  2706. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2707. dspcntr |= DISPLAY_PLANE_ENABLE;
  2708. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  2709. dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
  2710. switch (fb->pixel_format) {
  2711. case DRM_FORMAT_C8:
  2712. dspcntr |= DISPPLANE_8BPP;
  2713. break;
  2714. case DRM_FORMAT_RGB565:
  2715. dspcntr |= DISPPLANE_BGRX565;
  2716. break;
  2717. case DRM_FORMAT_XRGB8888:
  2718. dspcntr |= DISPPLANE_BGRX888;
  2719. break;
  2720. case DRM_FORMAT_XBGR8888:
  2721. dspcntr |= DISPPLANE_RGBX888;
  2722. break;
  2723. case DRM_FORMAT_XRGB2101010:
  2724. dspcntr |= DISPPLANE_BGRX101010;
  2725. break;
  2726. case DRM_FORMAT_XBGR2101010:
  2727. dspcntr |= DISPPLANE_RGBX101010;
  2728. break;
  2729. default:
  2730. BUG();
  2731. }
  2732. if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
  2733. dspcntr |= DISPPLANE_TILED;
  2734. if (rotation & DRM_ROTATE_180)
  2735. dspcntr |= DISPPLANE_ROTATE_180;
  2736. if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
  2737. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2738. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2739. intel_crtc->dspaddr_offset =
  2740. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2741. /* HSW+ does this automagically in hardware */
  2742. if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
  2743. rotation & DRM_ROTATE_180) {
  2744. x += crtc_state->pipe_src_w - 1;
  2745. y += crtc_state->pipe_src_h - 1;
  2746. }
  2747. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2748. intel_crtc->adjusted_x = x;
  2749. intel_crtc->adjusted_y = y;
  2750. I915_WRITE(reg, dspcntr);
  2751. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2752. I915_WRITE(DSPSURF(plane),
  2753. intel_fb_gtt_offset(fb, rotation) +
  2754. intel_crtc->dspaddr_offset);
  2755. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  2756. I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
  2757. } else {
  2758. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2759. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2760. }
  2761. POSTING_READ(reg);
  2762. }
  2763. u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
  2764. uint64_t fb_modifier, uint32_t pixel_format)
  2765. {
  2766. if (fb_modifier == DRM_FORMAT_MOD_NONE) {
  2767. return 64;
  2768. } else {
  2769. int cpp = drm_format_plane_cpp(pixel_format, 0);
  2770. return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  2771. }
  2772. }
  2773. u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
  2774. unsigned int rotation)
  2775. {
  2776. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  2777. struct i915_ggtt_view view;
  2778. struct i915_vma *vma;
  2779. intel_fill_fb_ggtt_view(&view, fb, rotation);
  2780. vma = i915_gem_object_to_ggtt(obj, &view);
  2781. if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
  2782. view.type))
  2783. return -1;
  2784. return i915_ggtt_offset(vma);
  2785. }
  2786. static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
  2787. {
  2788. struct drm_device *dev = intel_crtc->base.dev;
  2789. struct drm_i915_private *dev_priv = to_i915(dev);
  2790. I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
  2791. I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
  2792. I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
  2793. }
  2794. /*
  2795. * This function detaches (aka. unbinds) unused scalers in hardware
  2796. */
  2797. static void skl_detach_scalers(struct intel_crtc *intel_crtc)
  2798. {
  2799. struct intel_crtc_scaler_state *scaler_state;
  2800. int i;
  2801. scaler_state = &intel_crtc->config->scaler_state;
  2802. /* loop through and disable scalers that aren't in use */
  2803. for (i = 0; i < intel_crtc->num_scalers; i++) {
  2804. if (!scaler_state->scalers[i].in_use)
  2805. skl_detach_scaler(intel_crtc, i);
  2806. }
  2807. }
  2808. u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
  2809. unsigned int rotation)
  2810. {
  2811. const struct drm_i915_private *dev_priv = to_i915(fb->dev);
  2812. u32 stride = intel_fb_pitch(fb, plane, rotation);
  2813. /*
  2814. * The stride is either expressed as a multiple of 64 bytes chunks for
  2815. * linear buffers or in number of tiles for tiled buffers.
  2816. */
  2817. if (drm_rotation_90_or_270(rotation)) {
  2818. int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2819. stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
  2820. } else {
  2821. stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
  2822. fb->pixel_format);
  2823. }
  2824. return stride;
  2825. }
  2826. u32 skl_plane_ctl_format(uint32_t pixel_format)
  2827. {
  2828. switch (pixel_format) {
  2829. case DRM_FORMAT_C8:
  2830. return PLANE_CTL_FORMAT_INDEXED;
  2831. case DRM_FORMAT_RGB565:
  2832. return PLANE_CTL_FORMAT_RGB_565;
  2833. case DRM_FORMAT_XBGR8888:
  2834. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
  2835. case DRM_FORMAT_XRGB8888:
  2836. return PLANE_CTL_FORMAT_XRGB_8888;
  2837. /*
  2838. * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
  2839. * to be already pre-multiplied. We need to add a knob (or a different
  2840. * DRM_FORMAT) for user-space to configure that.
  2841. */
  2842. case DRM_FORMAT_ABGR8888:
  2843. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
  2844. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2845. case DRM_FORMAT_ARGB8888:
  2846. return PLANE_CTL_FORMAT_XRGB_8888 |
  2847. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2848. case DRM_FORMAT_XRGB2101010:
  2849. return PLANE_CTL_FORMAT_XRGB_2101010;
  2850. case DRM_FORMAT_XBGR2101010:
  2851. return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
  2852. case DRM_FORMAT_YUYV:
  2853. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
  2854. case DRM_FORMAT_YVYU:
  2855. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
  2856. case DRM_FORMAT_UYVY:
  2857. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
  2858. case DRM_FORMAT_VYUY:
  2859. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
  2860. default:
  2861. MISSING_CASE(pixel_format);
  2862. }
  2863. return 0;
  2864. }
  2865. u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
  2866. {
  2867. switch (fb_modifier) {
  2868. case DRM_FORMAT_MOD_NONE:
  2869. break;
  2870. case I915_FORMAT_MOD_X_TILED:
  2871. return PLANE_CTL_TILED_X;
  2872. case I915_FORMAT_MOD_Y_TILED:
  2873. return PLANE_CTL_TILED_Y;
  2874. case I915_FORMAT_MOD_Yf_TILED:
  2875. return PLANE_CTL_TILED_YF;
  2876. default:
  2877. MISSING_CASE(fb_modifier);
  2878. }
  2879. return 0;
  2880. }
  2881. u32 skl_plane_ctl_rotation(unsigned int rotation)
  2882. {
  2883. switch (rotation) {
  2884. case DRM_ROTATE_0:
  2885. break;
  2886. /*
  2887. * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
  2888. * while i915 HW rotation is clockwise, thats why this swapping.
  2889. */
  2890. case DRM_ROTATE_90:
  2891. return PLANE_CTL_ROTATE_270;
  2892. case DRM_ROTATE_180:
  2893. return PLANE_CTL_ROTATE_180;
  2894. case DRM_ROTATE_270:
  2895. return PLANE_CTL_ROTATE_90;
  2896. default:
  2897. MISSING_CASE(rotation);
  2898. }
  2899. return 0;
  2900. }
  2901. static void skylake_update_primary_plane(struct drm_plane *plane,
  2902. const struct intel_crtc_state *crtc_state,
  2903. const struct intel_plane_state *plane_state)
  2904. {
  2905. struct drm_device *dev = plane->dev;
  2906. struct drm_i915_private *dev_priv = to_i915(dev);
  2907. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2908. struct drm_framebuffer *fb = plane_state->base.fb;
  2909. int pipe = intel_crtc->pipe;
  2910. u32 plane_ctl;
  2911. unsigned int rotation = plane_state->base.rotation;
  2912. u32 stride = skl_plane_stride(fb, 0, rotation);
  2913. u32 surf_addr = plane_state->main.offset;
  2914. int scaler_id = plane_state->scaler_id;
  2915. int src_x = plane_state->main.x;
  2916. int src_y = plane_state->main.y;
  2917. int src_w = drm_rect_width(&plane_state->base.src) >> 16;
  2918. int src_h = drm_rect_height(&plane_state->base.src) >> 16;
  2919. int dst_x = plane_state->base.dst.x1;
  2920. int dst_y = plane_state->base.dst.y1;
  2921. int dst_w = drm_rect_width(&plane_state->base.dst);
  2922. int dst_h = drm_rect_height(&plane_state->base.dst);
  2923. plane_ctl = PLANE_CTL_ENABLE |
  2924. PLANE_CTL_PIPE_GAMMA_ENABLE |
  2925. PLANE_CTL_PIPE_CSC_ENABLE;
  2926. plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
  2927. plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
  2928. plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
  2929. plane_ctl |= skl_plane_ctl_rotation(rotation);
  2930. /* Sizes are 0 based */
  2931. src_w--;
  2932. src_h--;
  2933. dst_w--;
  2934. dst_h--;
  2935. intel_crtc->dspaddr_offset = surf_addr;
  2936. intel_crtc->adjusted_x = src_x;
  2937. intel_crtc->adjusted_y = src_y;
  2938. I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
  2939. I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
  2940. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  2941. I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
  2942. if (scaler_id >= 0) {
  2943. uint32_t ps_ctrl = 0;
  2944. WARN_ON(!dst_w || !dst_h);
  2945. ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
  2946. crtc_state->scaler_state.scalers[scaler_id].mode;
  2947. I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
  2948. I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
  2949. I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
  2950. I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
  2951. I915_WRITE(PLANE_POS(pipe, 0), 0);
  2952. } else {
  2953. I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
  2954. }
  2955. I915_WRITE(PLANE_SURF(pipe, 0),
  2956. intel_fb_gtt_offset(fb, rotation) + surf_addr);
  2957. POSTING_READ(PLANE_SURF(pipe, 0));
  2958. }
  2959. static void skylake_disable_primary_plane(struct drm_plane *primary,
  2960. struct drm_crtc *crtc)
  2961. {
  2962. struct drm_device *dev = crtc->dev;
  2963. struct drm_i915_private *dev_priv = to_i915(dev);
  2964. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2965. int pipe = intel_crtc->pipe;
  2966. I915_WRITE(PLANE_CTL(pipe, 0), 0);
  2967. I915_WRITE(PLANE_SURF(pipe, 0), 0);
  2968. POSTING_READ(PLANE_SURF(pipe, 0));
  2969. }
  2970. /* Assume fb object is pinned & idle & fenced and just update base pointers */
  2971. static int
  2972. intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  2973. int x, int y, enum mode_set_atomic state)
  2974. {
  2975. /* Support for kgdboc is disabled, this needs a major rework. */
  2976. DRM_ERROR("legacy panic handler not supported any more.\n");
  2977. return -ENODEV;
  2978. }
  2979. static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
  2980. {
  2981. struct intel_crtc *crtc;
  2982. for_each_intel_crtc(&dev_priv->drm, crtc)
  2983. intel_finish_page_flip_cs(dev_priv, crtc->pipe);
  2984. }
  2985. static void intel_update_primary_planes(struct drm_device *dev)
  2986. {
  2987. struct drm_crtc *crtc;
  2988. for_each_crtc(dev, crtc) {
  2989. struct intel_plane *plane = to_intel_plane(crtc->primary);
  2990. struct intel_plane_state *plane_state =
  2991. to_intel_plane_state(plane->base.state);
  2992. if (plane_state->base.visible)
  2993. plane->update_plane(&plane->base,
  2994. to_intel_crtc_state(crtc->state),
  2995. plane_state);
  2996. }
  2997. }
  2998. static int
  2999. __intel_display_resume(struct drm_device *dev,
  3000. struct drm_atomic_state *state)
  3001. {
  3002. struct drm_crtc_state *crtc_state;
  3003. struct drm_crtc *crtc;
  3004. int i, ret;
  3005. intel_modeset_setup_hw_state(dev);
  3006. i915_redisable_vga(to_i915(dev));
  3007. if (!state)
  3008. return 0;
  3009. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  3010. /*
  3011. * Force recalculation even if we restore
  3012. * current state. With fast modeset this may not result
  3013. * in a modeset when the state is compatible.
  3014. */
  3015. crtc_state->mode_changed = true;
  3016. }
  3017. /* ignore any reset values/BIOS leftovers in the WM registers */
  3018. to_intel_atomic_state(state)->skip_intermediate_wm = true;
  3019. ret = drm_atomic_commit(state);
  3020. WARN_ON(ret == -EDEADLK);
  3021. return ret;
  3022. }
  3023. static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
  3024. {
  3025. return intel_has_gpu_reset(dev_priv) &&
  3026. INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
  3027. }
  3028. void intel_prepare_reset(struct drm_i915_private *dev_priv)
  3029. {
  3030. struct drm_device *dev = &dev_priv->drm;
  3031. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3032. struct drm_atomic_state *state;
  3033. int ret;
  3034. /*
  3035. * Need mode_config.mutex so that we don't
  3036. * trample ongoing ->detect() and whatnot.
  3037. */
  3038. mutex_lock(&dev->mode_config.mutex);
  3039. drm_modeset_acquire_init(ctx, 0);
  3040. while (1) {
  3041. ret = drm_modeset_lock_all_ctx(dev, ctx);
  3042. if (ret != -EDEADLK)
  3043. break;
  3044. drm_modeset_backoff(ctx);
  3045. }
  3046. /* reset doesn't touch the display, but flips might get nuked anyway, */
  3047. if (!i915.force_reset_modeset_test &&
  3048. !gpu_reset_clobbers_display(dev_priv))
  3049. return;
  3050. /*
  3051. * Disabling the crtcs gracefully seems nicer. Also the
  3052. * g33 docs say we should at least disable all the planes.
  3053. */
  3054. state = drm_atomic_helper_duplicate_state(dev, ctx);
  3055. if (IS_ERR(state)) {
  3056. ret = PTR_ERR(state);
  3057. state = NULL;
  3058. DRM_ERROR("Duplicating state failed with %i\n", ret);
  3059. goto err;
  3060. }
  3061. ret = drm_atomic_helper_disable_all(dev, ctx);
  3062. if (ret) {
  3063. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  3064. goto err;
  3065. }
  3066. dev_priv->modeset_restore_state = state;
  3067. state->acquire_ctx = ctx;
  3068. return;
  3069. err:
  3070. drm_atomic_state_put(state);
  3071. }
  3072. void intel_finish_reset(struct drm_i915_private *dev_priv)
  3073. {
  3074. struct drm_device *dev = &dev_priv->drm;
  3075. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3076. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  3077. int ret;
  3078. /*
  3079. * Flips in the rings will be nuked by the reset,
  3080. * so complete all pending flips so that user space
  3081. * will get its events and not get stuck.
  3082. */
  3083. intel_complete_page_flips(dev_priv);
  3084. dev_priv->modeset_restore_state = NULL;
  3085. /* reset doesn't touch the display */
  3086. if (!gpu_reset_clobbers_display(dev_priv)) {
  3087. if (!state) {
  3088. /*
  3089. * Flips in the rings have been nuked by the reset,
  3090. * so update the base address of all primary
  3091. * planes to the the last fb to make sure we're
  3092. * showing the correct fb after a reset.
  3093. *
  3094. * FIXME: Atomic will make this obsolete since we won't schedule
  3095. * CS-based flips (which might get lost in gpu resets) any more.
  3096. */
  3097. intel_update_primary_planes(dev);
  3098. } else {
  3099. ret = __intel_display_resume(dev, state);
  3100. if (ret)
  3101. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3102. }
  3103. } else {
  3104. /*
  3105. * The display has been reset as well,
  3106. * so need a full re-initialization.
  3107. */
  3108. intel_runtime_pm_disable_interrupts(dev_priv);
  3109. intel_runtime_pm_enable_interrupts(dev_priv);
  3110. intel_pps_unlock_regs_wa(dev_priv);
  3111. intel_modeset_init_hw(dev);
  3112. spin_lock_irq(&dev_priv->irq_lock);
  3113. if (dev_priv->display.hpd_irq_setup)
  3114. dev_priv->display.hpd_irq_setup(dev_priv);
  3115. spin_unlock_irq(&dev_priv->irq_lock);
  3116. ret = __intel_display_resume(dev, state);
  3117. if (ret)
  3118. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3119. intel_hpd_init(dev_priv);
  3120. }
  3121. if (state)
  3122. drm_atomic_state_put(state);
  3123. drm_modeset_drop_locks(ctx);
  3124. drm_modeset_acquire_fini(ctx);
  3125. mutex_unlock(&dev->mode_config.mutex);
  3126. }
  3127. static bool abort_flip_on_reset(struct intel_crtc *crtc)
  3128. {
  3129. struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
  3130. if (i915_reset_in_progress(error))
  3131. return true;
  3132. if (crtc->reset_count != i915_reset_count(error))
  3133. return true;
  3134. return false;
  3135. }
  3136. static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
  3137. {
  3138. struct drm_device *dev = crtc->dev;
  3139. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3140. bool pending;
  3141. if (abort_flip_on_reset(intel_crtc))
  3142. return false;
  3143. spin_lock_irq(&dev->event_lock);
  3144. pending = to_intel_crtc(crtc)->flip_work != NULL;
  3145. spin_unlock_irq(&dev->event_lock);
  3146. return pending;
  3147. }
  3148. static void intel_update_pipe_config(struct intel_crtc *crtc,
  3149. struct intel_crtc_state *old_crtc_state)
  3150. {
  3151. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  3152. struct intel_crtc_state *pipe_config =
  3153. to_intel_crtc_state(crtc->base.state);
  3154. /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
  3155. crtc->base.mode = crtc->base.state->mode;
  3156. DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
  3157. old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
  3158. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  3159. /*
  3160. * Update pipe size and adjust fitter if needed: the reason for this is
  3161. * that in compute_mode_changes we check the native mode (not the pfit
  3162. * mode) to see if we can flip rather than do a full mode set. In the
  3163. * fastboot case, we'll flip, but if we don't update the pipesrc and
  3164. * pfit state, we'll end up with a big fb scanned out into the wrong
  3165. * sized surface.
  3166. */
  3167. I915_WRITE(PIPESRC(crtc->pipe),
  3168. ((pipe_config->pipe_src_w - 1) << 16) |
  3169. (pipe_config->pipe_src_h - 1));
  3170. /* on skylake this is done by detaching scalers */
  3171. if (INTEL_GEN(dev_priv) >= 9) {
  3172. skl_detach_scalers(crtc);
  3173. if (pipe_config->pch_pfit.enabled)
  3174. skylake_pfit_enable(crtc);
  3175. } else if (HAS_PCH_SPLIT(dev_priv)) {
  3176. if (pipe_config->pch_pfit.enabled)
  3177. ironlake_pfit_enable(crtc);
  3178. else if (old_crtc_state->pch_pfit.enabled)
  3179. ironlake_pfit_disable(crtc, true);
  3180. }
  3181. }
  3182. static void intel_fdi_normal_train(struct drm_crtc *crtc)
  3183. {
  3184. struct drm_device *dev = crtc->dev;
  3185. struct drm_i915_private *dev_priv = to_i915(dev);
  3186. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3187. int pipe = intel_crtc->pipe;
  3188. i915_reg_t reg;
  3189. u32 temp;
  3190. /* enable normal train */
  3191. reg = FDI_TX_CTL(pipe);
  3192. temp = I915_READ(reg);
  3193. if (IS_IVYBRIDGE(dev_priv)) {
  3194. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3195. temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  3196. } else {
  3197. temp &= ~FDI_LINK_TRAIN_NONE;
  3198. temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
  3199. }
  3200. I915_WRITE(reg, temp);
  3201. reg = FDI_RX_CTL(pipe);
  3202. temp = I915_READ(reg);
  3203. if (HAS_PCH_CPT(dev_priv)) {
  3204. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3205. temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  3206. } else {
  3207. temp &= ~FDI_LINK_TRAIN_NONE;
  3208. temp |= FDI_LINK_TRAIN_NONE;
  3209. }
  3210. I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
  3211. /* wait one idle pattern time */
  3212. POSTING_READ(reg);
  3213. udelay(1000);
  3214. /* IVB wants error correction enabled */
  3215. if (IS_IVYBRIDGE(dev_priv))
  3216. I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  3217. FDI_FE_ERRC_ENABLE);
  3218. }
  3219. /* The FDI link training functions for ILK/Ibexpeak. */
  3220. static void ironlake_fdi_link_train(struct drm_crtc *crtc)
  3221. {
  3222. struct drm_device *dev = crtc->dev;
  3223. struct drm_i915_private *dev_priv = to_i915(dev);
  3224. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3225. int pipe = intel_crtc->pipe;
  3226. i915_reg_t reg;
  3227. u32 temp, tries;
  3228. /* FDI needs bits from pipe first */
  3229. assert_pipe_enabled(dev_priv, pipe);
  3230. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3231. for train result */
  3232. reg = FDI_RX_IMR(pipe);
  3233. temp = I915_READ(reg);
  3234. temp &= ~FDI_RX_SYMBOL_LOCK;
  3235. temp &= ~FDI_RX_BIT_LOCK;
  3236. I915_WRITE(reg, temp);
  3237. I915_READ(reg);
  3238. udelay(150);
  3239. /* enable CPU FDI TX and PCH FDI RX */
  3240. reg = FDI_TX_CTL(pipe);
  3241. temp = I915_READ(reg);
  3242. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3243. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3244. temp &= ~FDI_LINK_TRAIN_NONE;
  3245. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3246. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3247. reg = FDI_RX_CTL(pipe);
  3248. temp = I915_READ(reg);
  3249. temp &= ~FDI_LINK_TRAIN_NONE;
  3250. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3251. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3252. POSTING_READ(reg);
  3253. udelay(150);
  3254. /* Ironlake workaround, enable clock pointer after FDI enable*/
  3255. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3256. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
  3257. FDI_RX_PHASE_SYNC_POINTER_EN);
  3258. reg = FDI_RX_IIR(pipe);
  3259. for (tries = 0; tries < 5; tries++) {
  3260. temp = I915_READ(reg);
  3261. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3262. if ((temp & FDI_RX_BIT_LOCK)) {
  3263. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3264. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3265. break;
  3266. }
  3267. }
  3268. if (tries == 5)
  3269. DRM_ERROR("FDI train 1 fail!\n");
  3270. /* Train 2 */
  3271. reg = FDI_TX_CTL(pipe);
  3272. temp = I915_READ(reg);
  3273. temp &= ~FDI_LINK_TRAIN_NONE;
  3274. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3275. I915_WRITE(reg, temp);
  3276. reg = FDI_RX_CTL(pipe);
  3277. temp = I915_READ(reg);
  3278. temp &= ~FDI_LINK_TRAIN_NONE;
  3279. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3280. I915_WRITE(reg, temp);
  3281. POSTING_READ(reg);
  3282. udelay(150);
  3283. reg = FDI_RX_IIR(pipe);
  3284. for (tries = 0; tries < 5; tries++) {
  3285. temp = I915_READ(reg);
  3286. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3287. if (temp & FDI_RX_SYMBOL_LOCK) {
  3288. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3289. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3290. break;
  3291. }
  3292. }
  3293. if (tries == 5)
  3294. DRM_ERROR("FDI train 2 fail!\n");
  3295. DRM_DEBUG_KMS("FDI train done\n");
  3296. }
  3297. static const int snb_b_fdi_train_param[] = {
  3298. FDI_LINK_TRAIN_400MV_0DB_SNB_B,
  3299. FDI_LINK_TRAIN_400MV_6DB_SNB_B,
  3300. FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
  3301. FDI_LINK_TRAIN_800MV_0DB_SNB_B,
  3302. };
  3303. /* The FDI link training functions for SNB/Cougarpoint. */
  3304. static void gen6_fdi_link_train(struct drm_crtc *crtc)
  3305. {
  3306. struct drm_device *dev = crtc->dev;
  3307. struct drm_i915_private *dev_priv = to_i915(dev);
  3308. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3309. int pipe = intel_crtc->pipe;
  3310. i915_reg_t reg;
  3311. u32 temp, i, retry;
  3312. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3313. for train result */
  3314. reg = FDI_RX_IMR(pipe);
  3315. temp = I915_READ(reg);
  3316. temp &= ~FDI_RX_SYMBOL_LOCK;
  3317. temp &= ~FDI_RX_BIT_LOCK;
  3318. I915_WRITE(reg, temp);
  3319. POSTING_READ(reg);
  3320. udelay(150);
  3321. /* enable CPU FDI TX and PCH FDI RX */
  3322. reg = FDI_TX_CTL(pipe);
  3323. temp = I915_READ(reg);
  3324. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3325. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3326. temp &= ~FDI_LINK_TRAIN_NONE;
  3327. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3328. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3329. /* SNB-B */
  3330. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3331. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3332. I915_WRITE(FDI_RX_MISC(pipe),
  3333. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3334. reg = FDI_RX_CTL(pipe);
  3335. temp = I915_READ(reg);
  3336. if (HAS_PCH_CPT(dev_priv)) {
  3337. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3338. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3339. } else {
  3340. temp &= ~FDI_LINK_TRAIN_NONE;
  3341. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3342. }
  3343. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3344. POSTING_READ(reg);
  3345. udelay(150);
  3346. for (i = 0; i < 4; i++) {
  3347. reg = FDI_TX_CTL(pipe);
  3348. temp = I915_READ(reg);
  3349. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3350. temp |= snb_b_fdi_train_param[i];
  3351. I915_WRITE(reg, temp);
  3352. POSTING_READ(reg);
  3353. udelay(500);
  3354. for (retry = 0; retry < 5; retry++) {
  3355. reg = FDI_RX_IIR(pipe);
  3356. temp = I915_READ(reg);
  3357. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3358. if (temp & FDI_RX_BIT_LOCK) {
  3359. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3360. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3361. break;
  3362. }
  3363. udelay(50);
  3364. }
  3365. if (retry < 5)
  3366. break;
  3367. }
  3368. if (i == 4)
  3369. DRM_ERROR("FDI train 1 fail!\n");
  3370. /* Train 2 */
  3371. reg = FDI_TX_CTL(pipe);
  3372. temp = I915_READ(reg);
  3373. temp &= ~FDI_LINK_TRAIN_NONE;
  3374. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3375. if (IS_GEN6(dev_priv)) {
  3376. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3377. /* SNB-B */
  3378. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3379. }
  3380. I915_WRITE(reg, temp);
  3381. reg = FDI_RX_CTL(pipe);
  3382. temp = I915_READ(reg);
  3383. if (HAS_PCH_CPT(dev_priv)) {
  3384. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3385. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3386. } else {
  3387. temp &= ~FDI_LINK_TRAIN_NONE;
  3388. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3389. }
  3390. I915_WRITE(reg, temp);
  3391. POSTING_READ(reg);
  3392. udelay(150);
  3393. for (i = 0; i < 4; i++) {
  3394. reg = FDI_TX_CTL(pipe);
  3395. temp = I915_READ(reg);
  3396. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3397. temp |= snb_b_fdi_train_param[i];
  3398. I915_WRITE(reg, temp);
  3399. POSTING_READ(reg);
  3400. udelay(500);
  3401. for (retry = 0; retry < 5; retry++) {
  3402. reg = FDI_RX_IIR(pipe);
  3403. temp = I915_READ(reg);
  3404. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3405. if (temp & FDI_RX_SYMBOL_LOCK) {
  3406. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3407. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3408. break;
  3409. }
  3410. udelay(50);
  3411. }
  3412. if (retry < 5)
  3413. break;
  3414. }
  3415. if (i == 4)
  3416. DRM_ERROR("FDI train 2 fail!\n");
  3417. DRM_DEBUG_KMS("FDI train done.\n");
  3418. }
  3419. /* Manual link training for Ivy Bridge A0 parts */
  3420. static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
  3421. {
  3422. struct drm_device *dev = crtc->dev;
  3423. struct drm_i915_private *dev_priv = to_i915(dev);
  3424. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3425. int pipe = intel_crtc->pipe;
  3426. i915_reg_t reg;
  3427. u32 temp, i, j;
  3428. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3429. for train result */
  3430. reg = FDI_RX_IMR(pipe);
  3431. temp = I915_READ(reg);
  3432. temp &= ~FDI_RX_SYMBOL_LOCK;
  3433. temp &= ~FDI_RX_BIT_LOCK;
  3434. I915_WRITE(reg, temp);
  3435. POSTING_READ(reg);
  3436. udelay(150);
  3437. DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
  3438. I915_READ(FDI_RX_IIR(pipe)));
  3439. /* Try each vswing and preemphasis setting twice before moving on */
  3440. for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
  3441. /* disable first in case we need to retry */
  3442. reg = FDI_TX_CTL(pipe);
  3443. temp = I915_READ(reg);
  3444. temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
  3445. temp &= ~FDI_TX_ENABLE;
  3446. I915_WRITE(reg, temp);
  3447. reg = FDI_RX_CTL(pipe);
  3448. temp = I915_READ(reg);
  3449. temp &= ~FDI_LINK_TRAIN_AUTO;
  3450. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3451. temp &= ~FDI_RX_ENABLE;
  3452. I915_WRITE(reg, temp);
  3453. /* enable CPU FDI TX and PCH FDI RX */
  3454. reg = FDI_TX_CTL(pipe);
  3455. temp = I915_READ(reg);
  3456. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3457. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3458. temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
  3459. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3460. temp |= snb_b_fdi_train_param[j/2];
  3461. temp |= FDI_COMPOSITE_SYNC;
  3462. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3463. I915_WRITE(FDI_RX_MISC(pipe),
  3464. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3465. reg = FDI_RX_CTL(pipe);
  3466. temp = I915_READ(reg);
  3467. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3468. temp |= FDI_COMPOSITE_SYNC;
  3469. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3470. POSTING_READ(reg);
  3471. udelay(1); /* should be 0.5us */
  3472. for (i = 0; i < 4; i++) {
  3473. reg = FDI_RX_IIR(pipe);
  3474. temp = I915_READ(reg);
  3475. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3476. if (temp & FDI_RX_BIT_LOCK ||
  3477. (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
  3478. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3479. DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
  3480. i);
  3481. break;
  3482. }
  3483. udelay(1); /* should be 0.5us */
  3484. }
  3485. if (i == 4) {
  3486. DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
  3487. continue;
  3488. }
  3489. /* Train 2 */
  3490. reg = FDI_TX_CTL(pipe);
  3491. temp = I915_READ(reg);
  3492. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3493. temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
  3494. I915_WRITE(reg, temp);
  3495. reg = FDI_RX_CTL(pipe);
  3496. temp = I915_READ(reg);
  3497. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3498. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3499. I915_WRITE(reg, temp);
  3500. POSTING_READ(reg);
  3501. udelay(2); /* should be 1.5us */
  3502. for (i = 0; i < 4; i++) {
  3503. reg = FDI_RX_IIR(pipe);
  3504. temp = I915_READ(reg);
  3505. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3506. if (temp & FDI_RX_SYMBOL_LOCK ||
  3507. (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
  3508. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3509. DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
  3510. i);
  3511. goto train_done;
  3512. }
  3513. udelay(2); /* should be 1.5us */
  3514. }
  3515. if (i == 4)
  3516. DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
  3517. }
  3518. train_done:
  3519. DRM_DEBUG_KMS("FDI train done.\n");
  3520. }
  3521. static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
  3522. {
  3523. struct drm_device *dev = intel_crtc->base.dev;
  3524. struct drm_i915_private *dev_priv = to_i915(dev);
  3525. int pipe = intel_crtc->pipe;
  3526. i915_reg_t reg;
  3527. u32 temp;
  3528. /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
  3529. reg = FDI_RX_CTL(pipe);
  3530. temp = I915_READ(reg);
  3531. temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
  3532. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3533. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3534. I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  3535. POSTING_READ(reg);
  3536. udelay(200);
  3537. /* Switch from Rawclk to PCDclk */
  3538. temp = I915_READ(reg);
  3539. I915_WRITE(reg, temp | FDI_PCDCLK);
  3540. POSTING_READ(reg);
  3541. udelay(200);
  3542. /* Enable CPU FDI TX PLL, always on for Ironlake */
  3543. reg = FDI_TX_CTL(pipe);
  3544. temp = I915_READ(reg);
  3545. if ((temp & FDI_TX_PLL_ENABLE) == 0) {
  3546. I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  3547. POSTING_READ(reg);
  3548. udelay(100);
  3549. }
  3550. }
  3551. static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
  3552. {
  3553. struct drm_device *dev = intel_crtc->base.dev;
  3554. struct drm_i915_private *dev_priv = to_i915(dev);
  3555. int pipe = intel_crtc->pipe;
  3556. i915_reg_t reg;
  3557. u32 temp;
  3558. /* Switch from PCDclk to Rawclk */
  3559. reg = FDI_RX_CTL(pipe);
  3560. temp = I915_READ(reg);
  3561. I915_WRITE(reg, temp & ~FDI_PCDCLK);
  3562. /* Disable CPU FDI TX PLL */
  3563. reg = FDI_TX_CTL(pipe);
  3564. temp = I915_READ(reg);
  3565. I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
  3566. POSTING_READ(reg);
  3567. udelay(100);
  3568. reg = FDI_RX_CTL(pipe);
  3569. temp = I915_READ(reg);
  3570. I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
  3571. /* Wait for the clocks to turn off. */
  3572. POSTING_READ(reg);
  3573. udelay(100);
  3574. }
  3575. static void ironlake_fdi_disable(struct drm_crtc *crtc)
  3576. {
  3577. struct drm_device *dev = crtc->dev;
  3578. struct drm_i915_private *dev_priv = to_i915(dev);
  3579. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3580. int pipe = intel_crtc->pipe;
  3581. i915_reg_t reg;
  3582. u32 temp;
  3583. /* disable CPU FDI tx and PCH FDI rx */
  3584. reg = FDI_TX_CTL(pipe);
  3585. temp = I915_READ(reg);
  3586. I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
  3587. POSTING_READ(reg);
  3588. reg = FDI_RX_CTL(pipe);
  3589. temp = I915_READ(reg);
  3590. temp &= ~(0x7 << 16);
  3591. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3592. I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  3593. POSTING_READ(reg);
  3594. udelay(100);
  3595. /* Ironlake workaround, disable clock pointer after downing FDI */
  3596. if (HAS_PCH_IBX(dev_priv))
  3597. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3598. /* still set train pattern 1 */
  3599. reg = FDI_TX_CTL(pipe);
  3600. temp = I915_READ(reg);
  3601. temp &= ~FDI_LINK_TRAIN_NONE;
  3602. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3603. I915_WRITE(reg, temp);
  3604. reg = FDI_RX_CTL(pipe);
  3605. temp = I915_READ(reg);
  3606. if (HAS_PCH_CPT(dev_priv)) {
  3607. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3608. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3609. } else {
  3610. temp &= ~FDI_LINK_TRAIN_NONE;
  3611. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3612. }
  3613. /* BPC in FDI rx is consistent with that in PIPECONF */
  3614. temp &= ~(0x07 << 16);
  3615. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3616. I915_WRITE(reg, temp);
  3617. POSTING_READ(reg);
  3618. udelay(100);
  3619. }
  3620. bool intel_has_pending_fb_unpin(struct drm_device *dev)
  3621. {
  3622. struct drm_i915_private *dev_priv = to_i915(dev);
  3623. struct intel_crtc *crtc;
  3624. /* Note that we don't need to be called with mode_config.lock here
  3625. * as our list of CRTC objects is static for the lifetime of the
  3626. * device and so cannot disappear as we iterate. Similarly, we can
  3627. * happily treat the predicates as racy, atomic checks as userspace
  3628. * cannot claim and pin a new fb without at least acquring the
  3629. * struct_mutex and so serialising with us.
  3630. */
  3631. for_each_intel_crtc(dev, crtc) {
  3632. if (atomic_read(&crtc->unpin_work_count) == 0)
  3633. continue;
  3634. if (crtc->flip_work)
  3635. intel_wait_for_vblank(dev_priv, crtc->pipe);
  3636. return true;
  3637. }
  3638. return false;
  3639. }
  3640. static void page_flip_completed(struct intel_crtc *intel_crtc)
  3641. {
  3642. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  3643. struct intel_flip_work *work = intel_crtc->flip_work;
  3644. intel_crtc->flip_work = NULL;
  3645. if (work->event)
  3646. drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
  3647. drm_crtc_vblank_put(&intel_crtc->base);
  3648. wake_up_all(&dev_priv->pending_flip_queue);
  3649. queue_work(dev_priv->wq, &work->unpin_work);
  3650. trace_i915_flip_complete(intel_crtc->plane,
  3651. work->pending_flip_obj);
  3652. }
  3653. static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  3654. {
  3655. struct drm_device *dev = crtc->dev;
  3656. struct drm_i915_private *dev_priv = to_i915(dev);
  3657. long ret;
  3658. WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
  3659. ret = wait_event_interruptible_timeout(
  3660. dev_priv->pending_flip_queue,
  3661. !intel_crtc_has_pending_flip(crtc),
  3662. 60*HZ);
  3663. if (ret < 0)
  3664. return ret;
  3665. if (ret == 0) {
  3666. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3667. struct intel_flip_work *work;
  3668. spin_lock_irq(&dev->event_lock);
  3669. work = intel_crtc->flip_work;
  3670. if (work && !is_mmio_work(work)) {
  3671. WARN_ONCE(1, "Removing stuck page flip\n");
  3672. page_flip_completed(intel_crtc);
  3673. }
  3674. spin_unlock_irq(&dev->event_lock);
  3675. }
  3676. return 0;
  3677. }
  3678. void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
  3679. {
  3680. u32 temp;
  3681. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
  3682. mutex_lock(&dev_priv->sb_lock);
  3683. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3684. temp |= SBI_SSCCTL_DISABLE;
  3685. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3686. mutex_unlock(&dev_priv->sb_lock);
  3687. }
  3688. /* Program iCLKIP clock to the desired frequency */
  3689. static void lpt_program_iclkip(struct drm_crtc *crtc)
  3690. {
  3691. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  3692. int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
  3693. u32 divsel, phaseinc, auxdiv, phasedir = 0;
  3694. u32 temp;
  3695. lpt_disable_iclkip(dev_priv);
  3696. /* The iCLK virtual clock root frequency is in MHz,
  3697. * but the adjusted_mode->crtc_clock in in KHz. To get the
  3698. * divisors, it is necessary to divide one by another, so we
  3699. * convert the virtual clock precision to KHz here for higher
  3700. * precision.
  3701. */
  3702. for (auxdiv = 0; auxdiv < 2; auxdiv++) {
  3703. u32 iclk_virtual_root_freq = 172800 * 1000;
  3704. u32 iclk_pi_range = 64;
  3705. u32 desired_divisor;
  3706. desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3707. clock << auxdiv);
  3708. divsel = (desired_divisor / iclk_pi_range) - 2;
  3709. phaseinc = desired_divisor % iclk_pi_range;
  3710. /*
  3711. * Near 20MHz is a corner case which is
  3712. * out of range for the 7-bit divisor
  3713. */
  3714. if (divsel <= 0x7f)
  3715. break;
  3716. }
  3717. /* This should not happen with any sane values */
  3718. WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
  3719. ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
  3720. WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
  3721. ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
  3722. DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
  3723. clock,
  3724. auxdiv,
  3725. divsel,
  3726. phasedir,
  3727. phaseinc);
  3728. mutex_lock(&dev_priv->sb_lock);
  3729. /* Program SSCDIVINTPHASE6 */
  3730. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3731. temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
  3732. temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
  3733. temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
  3734. temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
  3735. temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
  3736. temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
  3737. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
  3738. /* Program SSCAUXDIV */
  3739. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3740. temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
  3741. temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
  3742. intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
  3743. /* Enable modulator and associated divider */
  3744. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3745. temp &= ~SBI_SSCCTL_DISABLE;
  3746. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3747. mutex_unlock(&dev_priv->sb_lock);
  3748. /* Wait for initialization time */
  3749. udelay(24);
  3750. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
  3751. }
  3752. int lpt_get_iclkip(struct drm_i915_private *dev_priv)
  3753. {
  3754. u32 divsel, phaseinc, auxdiv;
  3755. u32 iclk_virtual_root_freq = 172800 * 1000;
  3756. u32 iclk_pi_range = 64;
  3757. u32 desired_divisor;
  3758. u32 temp;
  3759. if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
  3760. return 0;
  3761. mutex_lock(&dev_priv->sb_lock);
  3762. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3763. if (temp & SBI_SSCCTL_DISABLE) {
  3764. mutex_unlock(&dev_priv->sb_lock);
  3765. return 0;
  3766. }
  3767. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3768. divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
  3769. SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
  3770. phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
  3771. SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
  3772. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3773. auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
  3774. SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
  3775. mutex_unlock(&dev_priv->sb_lock);
  3776. desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
  3777. return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3778. desired_divisor << auxdiv);
  3779. }
  3780. static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
  3781. enum pipe pch_transcoder)
  3782. {
  3783. struct drm_device *dev = crtc->base.dev;
  3784. struct drm_i915_private *dev_priv = to_i915(dev);
  3785. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  3786. I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
  3787. I915_READ(HTOTAL(cpu_transcoder)));
  3788. I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
  3789. I915_READ(HBLANK(cpu_transcoder)));
  3790. I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
  3791. I915_READ(HSYNC(cpu_transcoder)));
  3792. I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
  3793. I915_READ(VTOTAL(cpu_transcoder)));
  3794. I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
  3795. I915_READ(VBLANK(cpu_transcoder)));
  3796. I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
  3797. I915_READ(VSYNC(cpu_transcoder)));
  3798. I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
  3799. I915_READ(VSYNCSHIFT(cpu_transcoder)));
  3800. }
  3801. static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
  3802. {
  3803. struct drm_i915_private *dev_priv = to_i915(dev);
  3804. uint32_t temp;
  3805. temp = I915_READ(SOUTH_CHICKEN1);
  3806. if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
  3807. return;
  3808. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
  3809. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
  3810. temp &= ~FDI_BC_BIFURCATION_SELECT;
  3811. if (enable)
  3812. temp |= FDI_BC_BIFURCATION_SELECT;
  3813. DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
  3814. I915_WRITE(SOUTH_CHICKEN1, temp);
  3815. POSTING_READ(SOUTH_CHICKEN1);
  3816. }
  3817. static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
  3818. {
  3819. struct drm_device *dev = intel_crtc->base.dev;
  3820. switch (intel_crtc->pipe) {
  3821. case PIPE_A:
  3822. break;
  3823. case PIPE_B:
  3824. if (intel_crtc->config->fdi_lanes > 2)
  3825. cpt_set_fdi_bc_bifurcation(dev, false);
  3826. else
  3827. cpt_set_fdi_bc_bifurcation(dev, true);
  3828. break;
  3829. case PIPE_C:
  3830. cpt_set_fdi_bc_bifurcation(dev, true);
  3831. break;
  3832. default:
  3833. BUG();
  3834. }
  3835. }
  3836. /* Return which DP Port should be selected for Transcoder DP control */
  3837. static enum port
  3838. intel_trans_dp_port_sel(struct drm_crtc *crtc)
  3839. {
  3840. struct drm_device *dev = crtc->dev;
  3841. struct intel_encoder *encoder;
  3842. for_each_encoder_on_crtc(dev, crtc, encoder) {
  3843. if (encoder->type == INTEL_OUTPUT_DP ||
  3844. encoder->type == INTEL_OUTPUT_EDP)
  3845. return enc_to_dig_port(&encoder->base)->port;
  3846. }
  3847. return -1;
  3848. }
  3849. /*
  3850. * Enable PCH resources required for PCH ports:
  3851. * - PCH PLLs
  3852. * - FDI training & RX/TX
  3853. * - update transcoder timings
  3854. * - DP transcoding bits
  3855. * - transcoder
  3856. */
  3857. static void ironlake_pch_enable(struct drm_crtc *crtc)
  3858. {
  3859. struct drm_device *dev = crtc->dev;
  3860. struct drm_i915_private *dev_priv = to_i915(dev);
  3861. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3862. int pipe = intel_crtc->pipe;
  3863. u32 temp;
  3864. assert_pch_transcoder_disabled(dev_priv, pipe);
  3865. if (IS_IVYBRIDGE(dev_priv))
  3866. ivybridge_update_fdi_bc_bifurcation(intel_crtc);
  3867. /* Write the TU size bits before fdi link training, so that error
  3868. * detection works. */
  3869. I915_WRITE(FDI_RX_TUSIZE1(pipe),
  3870. I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  3871. /* For PCH output, training FDI link */
  3872. dev_priv->display.fdi_link_train(crtc);
  3873. /* We need to program the right clock selection before writing the pixel
  3874. * mutliplier into the DPLL. */
  3875. if (HAS_PCH_CPT(dev_priv)) {
  3876. u32 sel;
  3877. temp = I915_READ(PCH_DPLL_SEL);
  3878. temp |= TRANS_DPLL_ENABLE(pipe);
  3879. sel = TRANS_DPLLB_SEL(pipe);
  3880. if (intel_crtc->config->shared_dpll ==
  3881. intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
  3882. temp |= sel;
  3883. else
  3884. temp &= ~sel;
  3885. I915_WRITE(PCH_DPLL_SEL, temp);
  3886. }
  3887. /* XXX: pch pll's can be enabled any time before we enable the PCH
  3888. * transcoder, and we actually should do this to not upset any PCH
  3889. * transcoder that already use the clock when we share it.
  3890. *
  3891. * Note that enable_shared_dpll tries to do the right thing, but
  3892. * get_shared_dpll unconditionally resets the pll - we need that to have
  3893. * the right LVDS enable sequence. */
  3894. intel_enable_shared_dpll(intel_crtc);
  3895. /* set transcoder timing, panel must allow it */
  3896. assert_panel_unlocked(dev_priv, pipe);
  3897. ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
  3898. intel_fdi_normal_train(crtc);
  3899. /* For PCH DP, enable TRANS_DP_CTL */
  3900. if (HAS_PCH_CPT(dev_priv) &&
  3901. intel_crtc_has_dp_encoder(intel_crtc->config)) {
  3902. const struct drm_display_mode *adjusted_mode =
  3903. &intel_crtc->config->base.adjusted_mode;
  3904. u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
  3905. i915_reg_t reg = TRANS_DP_CTL(pipe);
  3906. temp = I915_READ(reg);
  3907. temp &= ~(TRANS_DP_PORT_SEL_MASK |
  3908. TRANS_DP_SYNC_MASK |
  3909. TRANS_DP_BPC_MASK);
  3910. temp |= TRANS_DP_OUTPUT_ENABLE;
  3911. temp |= bpc << 9; /* same format but at 11:9 */
  3912. if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  3913. temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
  3914. if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  3915. temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
  3916. switch (intel_trans_dp_port_sel(crtc)) {
  3917. case PORT_B:
  3918. temp |= TRANS_DP_PORT_SEL_B;
  3919. break;
  3920. case PORT_C:
  3921. temp |= TRANS_DP_PORT_SEL_C;
  3922. break;
  3923. case PORT_D:
  3924. temp |= TRANS_DP_PORT_SEL_D;
  3925. break;
  3926. default:
  3927. BUG();
  3928. }
  3929. I915_WRITE(reg, temp);
  3930. }
  3931. ironlake_enable_pch_transcoder(dev_priv, pipe);
  3932. }
  3933. static void lpt_pch_enable(struct drm_crtc *crtc)
  3934. {
  3935. struct drm_device *dev = crtc->dev;
  3936. struct drm_i915_private *dev_priv = to_i915(dev);
  3937. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3938. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  3939. assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
  3940. lpt_program_iclkip(crtc);
  3941. /* Set transcoder timing. */
  3942. ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
  3943. lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
  3944. }
  3945. static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  3946. {
  3947. struct drm_i915_private *dev_priv = to_i915(dev);
  3948. i915_reg_t dslreg = PIPEDSL(pipe);
  3949. u32 temp;
  3950. temp = I915_READ(dslreg);
  3951. udelay(500);
  3952. if (wait_for(I915_READ(dslreg) != temp, 5)) {
  3953. if (wait_for(I915_READ(dslreg) != temp, 5))
  3954. DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
  3955. }
  3956. }
  3957. static int
  3958. skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  3959. unsigned scaler_user, int *scaler_id, unsigned int rotation,
  3960. int src_w, int src_h, int dst_w, int dst_h)
  3961. {
  3962. struct intel_crtc_scaler_state *scaler_state =
  3963. &crtc_state->scaler_state;
  3964. struct intel_crtc *intel_crtc =
  3965. to_intel_crtc(crtc_state->base.crtc);
  3966. int need_scaling;
  3967. need_scaling = drm_rotation_90_or_270(rotation) ?
  3968. (src_h != dst_w || src_w != dst_h):
  3969. (src_w != dst_w || src_h != dst_h);
  3970. /*
  3971. * if plane is being disabled or scaler is no more required or force detach
  3972. * - free scaler binded to this plane/crtc
  3973. * - in order to do this, update crtc->scaler_usage
  3974. *
  3975. * Here scaler state in crtc_state is set free so that
  3976. * scaler can be assigned to other user. Actual register
  3977. * update to free the scaler is done in plane/panel-fit programming.
  3978. * For this purpose crtc/plane_state->scaler_id isn't reset here.
  3979. */
  3980. if (force_detach || !need_scaling) {
  3981. if (*scaler_id >= 0) {
  3982. scaler_state->scaler_users &= ~(1 << scaler_user);
  3983. scaler_state->scalers[*scaler_id].in_use = 0;
  3984. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  3985. "Staged freeing scaler id %d scaler_users = 0x%x\n",
  3986. intel_crtc->pipe, scaler_user, *scaler_id,
  3987. scaler_state->scaler_users);
  3988. *scaler_id = -1;
  3989. }
  3990. return 0;
  3991. }
  3992. /* range checks */
  3993. if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
  3994. dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
  3995. src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
  3996. dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
  3997. DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
  3998. "size is out of scaler range\n",
  3999. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
  4000. return -EINVAL;
  4001. }
  4002. /* mark this plane as a scaler user in crtc_state */
  4003. scaler_state->scaler_users |= (1 << scaler_user);
  4004. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  4005. "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
  4006. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
  4007. scaler_state->scaler_users);
  4008. return 0;
  4009. }
  4010. /**
  4011. * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
  4012. *
  4013. * @state: crtc's scaler state
  4014. *
  4015. * Return
  4016. * 0 - scaler_usage updated successfully
  4017. * error - requested scaling cannot be supported or other error condition
  4018. */
  4019. int skl_update_scaler_crtc(struct intel_crtc_state *state)
  4020. {
  4021. const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
  4022. return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
  4023. &state->scaler_state.scaler_id, DRM_ROTATE_0,
  4024. state->pipe_src_w, state->pipe_src_h,
  4025. adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
  4026. }
  4027. /**
  4028. * skl_update_scaler_plane - Stages update to scaler state for a given plane.
  4029. *
  4030. * @state: crtc's scaler state
  4031. * @plane_state: atomic plane state to update
  4032. *
  4033. * Return
  4034. * 0 - scaler_usage updated successfully
  4035. * error - requested scaling cannot be supported or other error condition
  4036. */
  4037. static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
  4038. struct intel_plane_state *plane_state)
  4039. {
  4040. struct intel_plane *intel_plane =
  4041. to_intel_plane(plane_state->base.plane);
  4042. struct drm_framebuffer *fb = plane_state->base.fb;
  4043. int ret;
  4044. bool force_detach = !fb || !plane_state->base.visible;
  4045. ret = skl_update_scaler(crtc_state, force_detach,
  4046. drm_plane_index(&intel_plane->base),
  4047. &plane_state->scaler_id,
  4048. plane_state->base.rotation,
  4049. drm_rect_width(&plane_state->base.src) >> 16,
  4050. drm_rect_height(&plane_state->base.src) >> 16,
  4051. drm_rect_width(&plane_state->base.dst),
  4052. drm_rect_height(&plane_state->base.dst));
  4053. if (ret || plane_state->scaler_id < 0)
  4054. return ret;
  4055. /* check colorkey */
  4056. if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
  4057. DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
  4058. intel_plane->base.base.id,
  4059. intel_plane->base.name);
  4060. return -EINVAL;
  4061. }
  4062. /* Check src format */
  4063. switch (fb->pixel_format) {
  4064. case DRM_FORMAT_RGB565:
  4065. case DRM_FORMAT_XBGR8888:
  4066. case DRM_FORMAT_XRGB8888:
  4067. case DRM_FORMAT_ABGR8888:
  4068. case DRM_FORMAT_ARGB8888:
  4069. case DRM_FORMAT_XRGB2101010:
  4070. case DRM_FORMAT_XBGR2101010:
  4071. case DRM_FORMAT_YUYV:
  4072. case DRM_FORMAT_YVYU:
  4073. case DRM_FORMAT_UYVY:
  4074. case DRM_FORMAT_VYUY:
  4075. break;
  4076. default:
  4077. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
  4078. intel_plane->base.base.id, intel_plane->base.name,
  4079. fb->base.id, fb->pixel_format);
  4080. return -EINVAL;
  4081. }
  4082. return 0;
  4083. }
  4084. static void skylake_scaler_disable(struct intel_crtc *crtc)
  4085. {
  4086. int i;
  4087. for (i = 0; i < crtc->num_scalers; i++)
  4088. skl_detach_scaler(crtc, i);
  4089. }
  4090. static void skylake_pfit_enable(struct intel_crtc *crtc)
  4091. {
  4092. struct drm_device *dev = crtc->base.dev;
  4093. struct drm_i915_private *dev_priv = to_i915(dev);
  4094. int pipe = crtc->pipe;
  4095. struct intel_crtc_scaler_state *scaler_state =
  4096. &crtc->config->scaler_state;
  4097. DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
  4098. if (crtc->config->pch_pfit.enabled) {
  4099. int id;
  4100. if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
  4101. DRM_ERROR("Requesting pfit without getting a scaler first\n");
  4102. return;
  4103. }
  4104. id = scaler_state->scaler_id;
  4105. I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
  4106. PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
  4107. I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
  4108. I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
  4109. DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
  4110. }
  4111. }
  4112. static void ironlake_pfit_enable(struct intel_crtc *crtc)
  4113. {
  4114. struct drm_device *dev = crtc->base.dev;
  4115. struct drm_i915_private *dev_priv = to_i915(dev);
  4116. int pipe = crtc->pipe;
  4117. if (crtc->config->pch_pfit.enabled) {
  4118. /* Force use of hard-coded filter coefficients
  4119. * as some pre-programmed values are broken,
  4120. * e.g. x201.
  4121. */
  4122. if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
  4123. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
  4124. PF_PIPE_SEL_IVB(pipe));
  4125. else
  4126. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
  4127. I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
  4128. I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
  4129. }
  4130. }
  4131. void hsw_enable_ips(struct intel_crtc *crtc)
  4132. {
  4133. struct drm_device *dev = crtc->base.dev;
  4134. struct drm_i915_private *dev_priv = to_i915(dev);
  4135. if (!crtc->config->ips_enabled)
  4136. return;
  4137. /*
  4138. * We can only enable IPS after we enable a plane and wait for a vblank
  4139. * This function is called from post_plane_update, which is run after
  4140. * a vblank wait.
  4141. */
  4142. assert_plane_enabled(dev_priv, crtc->plane);
  4143. if (IS_BROADWELL(dev_priv)) {
  4144. mutex_lock(&dev_priv->rps.hw_lock);
  4145. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
  4146. mutex_unlock(&dev_priv->rps.hw_lock);
  4147. /* Quoting Art Runyan: "its not safe to expect any particular
  4148. * value in IPS_CTL bit 31 after enabling IPS through the
  4149. * mailbox." Moreover, the mailbox may return a bogus state,
  4150. * so we need to just enable it and continue on.
  4151. */
  4152. } else {
  4153. I915_WRITE(IPS_CTL, IPS_ENABLE);
  4154. /* The bit only becomes 1 in the next vblank, so this wait here
  4155. * is essentially intel_wait_for_vblank. If we don't have this
  4156. * and don't wait for vblanks until the end of crtc_enable, then
  4157. * the HW state readout code will complain that the expected
  4158. * IPS_CTL value is not the one we read. */
  4159. if (intel_wait_for_register(dev_priv,
  4160. IPS_CTL, IPS_ENABLE, IPS_ENABLE,
  4161. 50))
  4162. DRM_ERROR("Timed out waiting for IPS enable\n");
  4163. }
  4164. }
  4165. void hsw_disable_ips(struct intel_crtc *crtc)
  4166. {
  4167. struct drm_device *dev = crtc->base.dev;
  4168. struct drm_i915_private *dev_priv = to_i915(dev);
  4169. if (!crtc->config->ips_enabled)
  4170. return;
  4171. assert_plane_enabled(dev_priv, crtc->plane);
  4172. if (IS_BROADWELL(dev_priv)) {
  4173. mutex_lock(&dev_priv->rps.hw_lock);
  4174. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
  4175. mutex_unlock(&dev_priv->rps.hw_lock);
  4176. /* wait for pcode to finish disabling IPS, which may take up to 42ms */
  4177. if (intel_wait_for_register(dev_priv,
  4178. IPS_CTL, IPS_ENABLE, 0,
  4179. 42))
  4180. DRM_ERROR("Timed out waiting for IPS disable\n");
  4181. } else {
  4182. I915_WRITE(IPS_CTL, 0);
  4183. POSTING_READ(IPS_CTL);
  4184. }
  4185. /* We need to wait for a vblank before we can disable the plane. */
  4186. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4187. }
  4188. static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
  4189. {
  4190. if (intel_crtc->overlay) {
  4191. struct drm_device *dev = intel_crtc->base.dev;
  4192. struct drm_i915_private *dev_priv = to_i915(dev);
  4193. mutex_lock(&dev->struct_mutex);
  4194. dev_priv->mm.interruptible = false;
  4195. (void) intel_overlay_switch_off(intel_crtc->overlay);
  4196. dev_priv->mm.interruptible = true;
  4197. mutex_unlock(&dev->struct_mutex);
  4198. }
  4199. /* Let userspace switch the overlay on again. In most cases userspace
  4200. * has to recompute where to put it anyway.
  4201. */
  4202. }
  4203. /**
  4204. * intel_post_enable_primary - Perform operations after enabling primary plane
  4205. * @crtc: the CRTC whose primary plane was just enabled
  4206. *
  4207. * Performs potentially sleeping operations that must be done after the primary
  4208. * plane is enabled, such as updating FBC and IPS. Note that this may be
  4209. * called due to an explicit primary plane update, or due to an implicit
  4210. * re-enable that is caused when a sprite plane is updated to no longer
  4211. * completely hide the primary plane.
  4212. */
  4213. static void
  4214. intel_post_enable_primary(struct drm_crtc *crtc)
  4215. {
  4216. struct drm_device *dev = crtc->dev;
  4217. struct drm_i915_private *dev_priv = to_i915(dev);
  4218. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4219. int pipe = intel_crtc->pipe;
  4220. /*
  4221. * FIXME IPS should be fine as long as one plane is
  4222. * enabled, but in practice it seems to have problems
  4223. * when going from primary only to sprite only and vice
  4224. * versa.
  4225. */
  4226. hsw_enable_ips(intel_crtc);
  4227. /*
  4228. * Gen2 reports pipe underruns whenever all planes are disabled.
  4229. * So don't enable underrun reporting before at least some planes
  4230. * are enabled.
  4231. * FIXME: Need to fix the logic to work when we turn off all planes
  4232. * but leave the pipe running.
  4233. */
  4234. if (IS_GEN2(dev_priv))
  4235. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4236. /* Underruns don't always raise interrupts, so check manually. */
  4237. intel_check_cpu_fifo_underruns(dev_priv);
  4238. intel_check_pch_fifo_underruns(dev_priv);
  4239. }
  4240. /* FIXME move all this to pre_plane_update() with proper state tracking */
  4241. static void
  4242. intel_pre_disable_primary(struct drm_crtc *crtc)
  4243. {
  4244. struct drm_device *dev = crtc->dev;
  4245. struct drm_i915_private *dev_priv = to_i915(dev);
  4246. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4247. int pipe = intel_crtc->pipe;
  4248. /*
  4249. * Gen2 reports pipe underruns whenever all planes are disabled.
  4250. * So diasble underrun reporting before all the planes get disabled.
  4251. * FIXME: Need to fix the logic to work when we turn off all planes
  4252. * but leave the pipe running.
  4253. */
  4254. if (IS_GEN2(dev_priv))
  4255. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4256. /*
  4257. * FIXME IPS should be fine as long as one plane is
  4258. * enabled, but in practice it seems to have problems
  4259. * when going from primary only to sprite only and vice
  4260. * versa.
  4261. */
  4262. hsw_disable_ips(intel_crtc);
  4263. }
  4264. /* FIXME get rid of this and use pre_plane_update */
  4265. static void
  4266. intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  4267. {
  4268. struct drm_device *dev = crtc->dev;
  4269. struct drm_i915_private *dev_priv = to_i915(dev);
  4270. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4271. int pipe = intel_crtc->pipe;
  4272. intel_pre_disable_primary(crtc);
  4273. /*
  4274. * Vblank time updates from the shadow to live plane control register
  4275. * are blocked if the memory self-refresh mode is active at that
  4276. * moment. So to make sure the plane gets truly disabled, disable
  4277. * first the self-refresh mode. The self-refresh enable bit in turn
  4278. * will be checked/applied by the HW only at the next frame start
  4279. * event which is after the vblank start event, so we need to have a
  4280. * wait-for-vblank between disabling the plane and the pipe.
  4281. */
  4282. if (HAS_GMCH_DISPLAY(dev_priv)) {
  4283. intel_set_memory_cxsr(dev_priv, false);
  4284. dev_priv->wm.vlv.cxsr = false;
  4285. intel_wait_for_vblank(dev_priv, pipe);
  4286. }
  4287. }
  4288. static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
  4289. {
  4290. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4291. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4292. struct intel_crtc_state *pipe_config =
  4293. to_intel_crtc_state(crtc->base.state);
  4294. struct drm_plane *primary = crtc->base.primary;
  4295. struct drm_plane_state *old_pri_state =
  4296. drm_atomic_get_existing_plane_state(old_state, primary);
  4297. intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
  4298. crtc->wm.cxsr_allowed = true;
  4299. if (pipe_config->update_wm_post && pipe_config->base.active)
  4300. intel_update_watermarks(crtc);
  4301. if (old_pri_state) {
  4302. struct intel_plane_state *primary_state =
  4303. to_intel_plane_state(primary->state);
  4304. struct intel_plane_state *old_primary_state =
  4305. to_intel_plane_state(old_pri_state);
  4306. intel_fbc_post_update(crtc);
  4307. if (primary_state->base.visible &&
  4308. (needs_modeset(&pipe_config->base) ||
  4309. !old_primary_state->base.visible))
  4310. intel_post_enable_primary(&crtc->base);
  4311. }
  4312. }
  4313. static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
  4314. {
  4315. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4316. struct drm_device *dev = crtc->base.dev;
  4317. struct drm_i915_private *dev_priv = to_i915(dev);
  4318. struct intel_crtc_state *pipe_config =
  4319. to_intel_crtc_state(crtc->base.state);
  4320. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4321. struct drm_plane *primary = crtc->base.primary;
  4322. struct drm_plane_state *old_pri_state =
  4323. drm_atomic_get_existing_plane_state(old_state, primary);
  4324. bool modeset = needs_modeset(&pipe_config->base);
  4325. struct intel_atomic_state *old_intel_state =
  4326. to_intel_atomic_state(old_state);
  4327. if (old_pri_state) {
  4328. struct intel_plane_state *primary_state =
  4329. to_intel_plane_state(primary->state);
  4330. struct intel_plane_state *old_primary_state =
  4331. to_intel_plane_state(old_pri_state);
  4332. intel_fbc_pre_update(crtc, pipe_config, primary_state);
  4333. if (old_primary_state->base.visible &&
  4334. (modeset || !primary_state->base.visible))
  4335. intel_pre_disable_primary(&crtc->base);
  4336. }
  4337. if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
  4338. crtc->wm.cxsr_allowed = false;
  4339. /*
  4340. * Vblank time updates from the shadow to live plane control register
  4341. * are blocked if the memory self-refresh mode is active at that
  4342. * moment. So to make sure the plane gets truly disabled, disable
  4343. * first the self-refresh mode. The self-refresh enable bit in turn
  4344. * will be checked/applied by the HW only at the next frame start
  4345. * event which is after the vblank start event, so we need to have a
  4346. * wait-for-vblank between disabling the plane and the pipe.
  4347. */
  4348. if (old_crtc_state->base.active) {
  4349. intel_set_memory_cxsr(dev_priv, false);
  4350. dev_priv->wm.vlv.cxsr = false;
  4351. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4352. }
  4353. }
  4354. /*
  4355. * IVB workaround: must disable low power watermarks for at least
  4356. * one frame before enabling scaling. LP watermarks can be re-enabled
  4357. * when scaling is disabled.
  4358. *
  4359. * WaCxSRDisabledForSpriteScaling:ivb
  4360. */
  4361. if (pipe_config->disable_lp_wm) {
  4362. ilk_disable_lp_wm(dev);
  4363. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4364. }
  4365. /*
  4366. * If we're doing a modeset, we're done. No need to do any pre-vblank
  4367. * watermark programming here.
  4368. */
  4369. if (needs_modeset(&pipe_config->base))
  4370. return;
  4371. /*
  4372. * For platforms that support atomic watermarks, program the
  4373. * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
  4374. * will be the intermediate values that are safe for both pre- and
  4375. * post- vblank; when vblank happens, the 'active' values will be set
  4376. * to the final 'target' values and we'll do this again to get the
  4377. * optimal watermarks. For gen9+ platforms, the values we program here
  4378. * will be the final target values which will get automatically latched
  4379. * at vblank time; no further programming will be necessary.
  4380. *
  4381. * If a platform hasn't been transitioned to atomic watermarks yet,
  4382. * we'll continue to update watermarks the old way, if flags tell
  4383. * us to.
  4384. */
  4385. if (dev_priv->display.initial_watermarks != NULL)
  4386. dev_priv->display.initial_watermarks(old_intel_state,
  4387. pipe_config);
  4388. else if (pipe_config->update_wm_pre)
  4389. intel_update_watermarks(crtc);
  4390. }
  4391. static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
  4392. {
  4393. struct drm_device *dev = crtc->dev;
  4394. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4395. struct drm_plane *p;
  4396. int pipe = intel_crtc->pipe;
  4397. intel_crtc_dpms_overlay_disable(intel_crtc);
  4398. drm_for_each_plane_mask(p, dev, plane_mask)
  4399. to_intel_plane(p)->disable_plane(p, crtc);
  4400. /*
  4401. * FIXME: Once we grow proper nuclear flip support out of this we need
  4402. * to compute the mask of flip planes precisely. For the time being
  4403. * consider this a flip to a NULL plane.
  4404. */
  4405. intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
  4406. }
  4407. static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
  4408. struct intel_crtc_state *crtc_state,
  4409. struct drm_atomic_state *old_state)
  4410. {
  4411. struct drm_connector_state *old_conn_state;
  4412. struct drm_connector *conn;
  4413. int i;
  4414. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4415. struct drm_connector_state *conn_state = conn->state;
  4416. struct intel_encoder *encoder =
  4417. to_intel_encoder(conn_state->best_encoder);
  4418. if (conn_state->crtc != crtc)
  4419. continue;
  4420. if (encoder->pre_pll_enable)
  4421. encoder->pre_pll_enable(encoder, crtc_state, conn_state);
  4422. }
  4423. }
  4424. static void intel_encoders_pre_enable(struct drm_crtc *crtc,
  4425. struct intel_crtc_state *crtc_state,
  4426. struct drm_atomic_state *old_state)
  4427. {
  4428. struct drm_connector_state *old_conn_state;
  4429. struct drm_connector *conn;
  4430. int i;
  4431. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4432. struct drm_connector_state *conn_state = conn->state;
  4433. struct intel_encoder *encoder =
  4434. to_intel_encoder(conn_state->best_encoder);
  4435. if (conn_state->crtc != crtc)
  4436. continue;
  4437. if (encoder->pre_enable)
  4438. encoder->pre_enable(encoder, crtc_state, conn_state);
  4439. }
  4440. }
  4441. static void intel_encoders_enable(struct drm_crtc *crtc,
  4442. struct intel_crtc_state *crtc_state,
  4443. struct drm_atomic_state *old_state)
  4444. {
  4445. struct drm_connector_state *old_conn_state;
  4446. struct drm_connector *conn;
  4447. int i;
  4448. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4449. struct drm_connector_state *conn_state = conn->state;
  4450. struct intel_encoder *encoder =
  4451. to_intel_encoder(conn_state->best_encoder);
  4452. if (conn_state->crtc != crtc)
  4453. continue;
  4454. encoder->enable(encoder, crtc_state, conn_state);
  4455. intel_opregion_notify_encoder(encoder, true);
  4456. }
  4457. }
  4458. static void intel_encoders_disable(struct drm_crtc *crtc,
  4459. struct intel_crtc_state *old_crtc_state,
  4460. struct drm_atomic_state *old_state)
  4461. {
  4462. struct drm_connector_state *old_conn_state;
  4463. struct drm_connector *conn;
  4464. int i;
  4465. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4466. struct intel_encoder *encoder =
  4467. to_intel_encoder(old_conn_state->best_encoder);
  4468. if (old_conn_state->crtc != crtc)
  4469. continue;
  4470. intel_opregion_notify_encoder(encoder, false);
  4471. encoder->disable(encoder, old_crtc_state, old_conn_state);
  4472. }
  4473. }
  4474. static void intel_encoders_post_disable(struct drm_crtc *crtc,
  4475. struct intel_crtc_state *old_crtc_state,
  4476. struct drm_atomic_state *old_state)
  4477. {
  4478. struct drm_connector_state *old_conn_state;
  4479. struct drm_connector *conn;
  4480. int i;
  4481. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4482. struct intel_encoder *encoder =
  4483. to_intel_encoder(old_conn_state->best_encoder);
  4484. if (old_conn_state->crtc != crtc)
  4485. continue;
  4486. if (encoder->post_disable)
  4487. encoder->post_disable(encoder, old_crtc_state, old_conn_state);
  4488. }
  4489. }
  4490. static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
  4491. struct intel_crtc_state *old_crtc_state,
  4492. struct drm_atomic_state *old_state)
  4493. {
  4494. struct drm_connector_state *old_conn_state;
  4495. struct drm_connector *conn;
  4496. int i;
  4497. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4498. struct intel_encoder *encoder =
  4499. to_intel_encoder(old_conn_state->best_encoder);
  4500. if (old_conn_state->crtc != crtc)
  4501. continue;
  4502. if (encoder->post_pll_disable)
  4503. encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
  4504. }
  4505. }
  4506. static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  4507. struct drm_atomic_state *old_state)
  4508. {
  4509. struct drm_crtc *crtc = pipe_config->base.crtc;
  4510. struct drm_device *dev = crtc->dev;
  4511. struct drm_i915_private *dev_priv = to_i915(dev);
  4512. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4513. int pipe = intel_crtc->pipe;
  4514. struct intel_atomic_state *old_intel_state =
  4515. to_intel_atomic_state(old_state);
  4516. if (WARN_ON(intel_crtc->active))
  4517. return;
  4518. /*
  4519. * Sometimes spurious CPU pipe underruns happen during FDI
  4520. * training, at least with VGA+HDMI cloning. Suppress them.
  4521. *
  4522. * On ILK we get an occasional spurious CPU pipe underruns
  4523. * between eDP port A enable and vdd enable. Also PCH port
  4524. * enable seems to result in the occasional CPU pipe underrun.
  4525. *
  4526. * Spurious PCH underruns also occur during PCH enabling.
  4527. */
  4528. if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
  4529. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4530. if (intel_crtc->config->has_pch_encoder)
  4531. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4532. if (intel_crtc->config->has_pch_encoder)
  4533. intel_prepare_shared_dpll(intel_crtc);
  4534. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4535. intel_dp_set_m_n(intel_crtc, M1_N1);
  4536. intel_set_pipe_timings(intel_crtc);
  4537. intel_set_pipe_src_size(intel_crtc);
  4538. if (intel_crtc->config->has_pch_encoder) {
  4539. intel_cpu_transcoder_set_m_n(intel_crtc,
  4540. &intel_crtc->config->fdi_m_n, NULL);
  4541. }
  4542. ironlake_set_pipeconf(crtc);
  4543. intel_crtc->active = true;
  4544. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4545. if (intel_crtc->config->has_pch_encoder) {
  4546. /* Note: FDI PLL enabling _must_ be done before we enable the
  4547. * cpu pipes, hence this is separate from all the other fdi/pch
  4548. * enabling. */
  4549. ironlake_fdi_pll_enable(intel_crtc);
  4550. } else {
  4551. assert_fdi_tx_disabled(dev_priv, pipe);
  4552. assert_fdi_rx_disabled(dev_priv, pipe);
  4553. }
  4554. ironlake_pfit_enable(intel_crtc);
  4555. /*
  4556. * On ILK+ LUT must be loaded before the pipe is running but with
  4557. * clocks enabled
  4558. */
  4559. intel_color_load_luts(&pipe_config->base);
  4560. if (dev_priv->display.initial_watermarks != NULL)
  4561. dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
  4562. intel_enable_pipe(intel_crtc);
  4563. if (intel_crtc->config->has_pch_encoder)
  4564. ironlake_pch_enable(crtc);
  4565. assert_vblank_disabled(crtc);
  4566. drm_crtc_vblank_on(crtc);
  4567. intel_encoders_enable(crtc, pipe_config, old_state);
  4568. if (HAS_PCH_CPT(dev_priv))
  4569. cpt_verify_modeset(dev, intel_crtc->pipe);
  4570. /* Must wait for vblank to avoid spurious PCH FIFO underruns */
  4571. if (intel_crtc->config->has_pch_encoder)
  4572. intel_wait_for_vblank(dev_priv, pipe);
  4573. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4574. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4575. }
  4576. /* IPS only exists on ULT machines and is tied to pipe A. */
  4577. static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
  4578. {
  4579. return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
  4580. }
  4581. static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  4582. struct drm_atomic_state *old_state)
  4583. {
  4584. struct drm_crtc *crtc = pipe_config->base.crtc;
  4585. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4586. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4587. int pipe = intel_crtc->pipe, hsw_workaround_pipe;
  4588. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4589. struct intel_atomic_state *old_intel_state =
  4590. to_intel_atomic_state(old_state);
  4591. if (WARN_ON(intel_crtc->active))
  4592. return;
  4593. if (intel_crtc->config->has_pch_encoder)
  4594. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4595. false);
  4596. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  4597. if (intel_crtc->config->shared_dpll)
  4598. intel_enable_shared_dpll(intel_crtc);
  4599. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4600. intel_dp_set_m_n(intel_crtc, M1_N1);
  4601. if (!transcoder_is_dsi(cpu_transcoder))
  4602. intel_set_pipe_timings(intel_crtc);
  4603. intel_set_pipe_src_size(intel_crtc);
  4604. if (cpu_transcoder != TRANSCODER_EDP &&
  4605. !transcoder_is_dsi(cpu_transcoder)) {
  4606. I915_WRITE(PIPE_MULT(cpu_transcoder),
  4607. intel_crtc->config->pixel_multiplier - 1);
  4608. }
  4609. if (intel_crtc->config->has_pch_encoder) {
  4610. intel_cpu_transcoder_set_m_n(intel_crtc,
  4611. &intel_crtc->config->fdi_m_n, NULL);
  4612. }
  4613. if (!transcoder_is_dsi(cpu_transcoder))
  4614. haswell_set_pipeconf(crtc);
  4615. haswell_set_pipemisc(crtc);
  4616. intel_color_set_csc(&pipe_config->base);
  4617. intel_crtc->active = true;
  4618. if (intel_crtc->config->has_pch_encoder)
  4619. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4620. else
  4621. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4622. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4623. if (intel_crtc->config->has_pch_encoder)
  4624. dev_priv->display.fdi_link_train(crtc);
  4625. if (!transcoder_is_dsi(cpu_transcoder))
  4626. intel_ddi_enable_pipe_clock(intel_crtc);
  4627. if (INTEL_GEN(dev_priv) >= 9)
  4628. skylake_pfit_enable(intel_crtc);
  4629. else
  4630. ironlake_pfit_enable(intel_crtc);
  4631. /*
  4632. * On ILK+ LUT must be loaded before the pipe is running but with
  4633. * clocks enabled
  4634. */
  4635. intel_color_load_luts(&pipe_config->base);
  4636. intel_ddi_set_pipe_settings(crtc);
  4637. if (!transcoder_is_dsi(cpu_transcoder))
  4638. intel_ddi_enable_transcoder_func(crtc);
  4639. if (dev_priv->display.initial_watermarks != NULL)
  4640. dev_priv->display.initial_watermarks(old_intel_state,
  4641. pipe_config);
  4642. else
  4643. intel_update_watermarks(intel_crtc);
  4644. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4645. if (!transcoder_is_dsi(cpu_transcoder))
  4646. intel_enable_pipe(intel_crtc);
  4647. if (intel_crtc->config->has_pch_encoder)
  4648. lpt_pch_enable(crtc);
  4649. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4650. intel_ddi_set_vc_payload_alloc(crtc, true);
  4651. assert_vblank_disabled(crtc);
  4652. drm_crtc_vblank_on(crtc);
  4653. intel_encoders_enable(crtc, pipe_config, old_state);
  4654. if (intel_crtc->config->has_pch_encoder) {
  4655. intel_wait_for_vblank(dev_priv, pipe);
  4656. intel_wait_for_vblank(dev_priv, pipe);
  4657. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4658. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4659. true);
  4660. }
  4661. /* If we change the relative order between pipe/planes enabling, we need
  4662. * to change the workaround. */
  4663. hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
  4664. if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
  4665. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4666. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4667. }
  4668. }
  4669. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
  4670. {
  4671. struct drm_device *dev = crtc->base.dev;
  4672. struct drm_i915_private *dev_priv = to_i915(dev);
  4673. int pipe = crtc->pipe;
  4674. /* To avoid upsetting the power well on haswell only disable the pfit if
  4675. * it's in use. The hw state code will make sure we get this right. */
  4676. if (force || crtc->config->pch_pfit.enabled) {
  4677. I915_WRITE(PF_CTL(pipe), 0);
  4678. I915_WRITE(PF_WIN_POS(pipe), 0);
  4679. I915_WRITE(PF_WIN_SZ(pipe), 0);
  4680. }
  4681. }
  4682. static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4683. struct drm_atomic_state *old_state)
  4684. {
  4685. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4686. struct drm_device *dev = crtc->dev;
  4687. struct drm_i915_private *dev_priv = to_i915(dev);
  4688. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4689. int pipe = intel_crtc->pipe;
  4690. /*
  4691. * Sometimes spurious CPU pipe underruns happen when the
  4692. * pipe is already disabled, but FDI RX/TX is still enabled.
  4693. * Happens at least with VGA+HDMI cloning. Suppress them.
  4694. */
  4695. if (intel_crtc->config->has_pch_encoder) {
  4696. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4697. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4698. }
  4699. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4700. drm_crtc_vblank_off(crtc);
  4701. assert_vblank_disabled(crtc);
  4702. intel_disable_pipe(intel_crtc);
  4703. ironlake_pfit_disable(intel_crtc, false);
  4704. if (intel_crtc->config->has_pch_encoder)
  4705. ironlake_fdi_disable(crtc);
  4706. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4707. if (intel_crtc->config->has_pch_encoder) {
  4708. ironlake_disable_pch_transcoder(dev_priv, pipe);
  4709. if (HAS_PCH_CPT(dev_priv)) {
  4710. i915_reg_t reg;
  4711. u32 temp;
  4712. /* disable TRANS_DP_CTL */
  4713. reg = TRANS_DP_CTL(pipe);
  4714. temp = I915_READ(reg);
  4715. temp &= ~(TRANS_DP_OUTPUT_ENABLE |
  4716. TRANS_DP_PORT_SEL_MASK);
  4717. temp |= TRANS_DP_PORT_SEL_NONE;
  4718. I915_WRITE(reg, temp);
  4719. /* disable DPLL_SEL */
  4720. temp = I915_READ(PCH_DPLL_SEL);
  4721. temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
  4722. I915_WRITE(PCH_DPLL_SEL, temp);
  4723. }
  4724. ironlake_fdi_pll_disable(intel_crtc);
  4725. }
  4726. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4727. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4728. }
  4729. static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4730. struct drm_atomic_state *old_state)
  4731. {
  4732. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4733. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4734. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4735. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4736. if (intel_crtc->config->has_pch_encoder)
  4737. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4738. false);
  4739. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4740. drm_crtc_vblank_off(crtc);
  4741. assert_vblank_disabled(crtc);
  4742. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4743. if (!transcoder_is_dsi(cpu_transcoder))
  4744. intel_disable_pipe(intel_crtc);
  4745. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4746. intel_ddi_set_vc_payload_alloc(crtc, false);
  4747. if (!transcoder_is_dsi(cpu_transcoder))
  4748. intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
  4749. if (INTEL_GEN(dev_priv) >= 9)
  4750. skylake_scaler_disable(intel_crtc);
  4751. else
  4752. ironlake_pfit_disable(intel_crtc, false);
  4753. if (!transcoder_is_dsi(cpu_transcoder))
  4754. intel_ddi_disable_pipe_clock(intel_crtc);
  4755. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4756. if (old_crtc_state->has_pch_encoder)
  4757. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4758. true);
  4759. }
  4760. static void i9xx_pfit_enable(struct intel_crtc *crtc)
  4761. {
  4762. struct drm_device *dev = crtc->base.dev;
  4763. struct drm_i915_private *dev_priv = to_i915(dev);
  4764. struct intel_crtc_state *pipe_config = crtc->config;
  4765. if (!pipe_config->gmch_pfit.control)
  4766. return;
  4767. /*
  4768. * The panel fitter should only be adjusted whilst the pipe is disabled,
  4769. * according to register description and PRM.
  4770. */
  4771. WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
  4772. assert_pipe_disabled(dev_priv, crtc->pipe);
  4773. I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
  4774. I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
  4775. /* Border color in case we don't scale up to the full screen. Black by
  4776. * default, change to something else for debugging. */
  4777. I915_WRITE(BCLRPAT(crtc->pipe), 0);
  4778. }
  4779. static enum intel_display_power_domain port_to_power_domain(enum port port)
  4780. {
  4781. switch (port) {
  4782. case PORT_A:
  4783. return POWER_DOMAIN_PORT_DDI_A_LANES;
  4784. case PORT_B:
  4785. return POWER_DOMAIN_PORT_DDI_B_LANES;
  4786. case PORT_C:
  4787. return POWER_DOMAIN_PORT_DDI_C_LANES;
  4788. case PORT_D:
  4789. return POWER_DOMAIN_PORT_DDI_D_LANES;
  4790. case PORT_E:
  4791. return POWER_DOMAIN_PORT_DDI_E_LANES;
  4792. default:
  4793. MISSING_CASE(port);
  4794. return POWER_DOMAIN_PORT_OTHER;
  4795. }
  4796. }
  4797. static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
  4798. {
  4799. switch (port) {
  4800. case PORT_A:
  4801. return POWER_DOMAIN_AUX_A;
  4802. case PORT_B:
  4803. return POWER_DOMAIN_AUX_B;
  4804. case PORT_C:
  4805. return POWER_DOMAIN_AUX_C;
  4806. case PORT_D:
  4807. return POWER_DOMAIN_AUX_D;
  4808. case PORT_E:
  4809. /* FIXME: Check VBT for actual wiring of PORT E */
  4810. return POWER_DOMAIN_AUX_D;
  4811. default:
  4812. MISSING_CASE(port);
  4813. return POWER_DOMAIN_AUX_A;
  4814. }
  4815. }
  4816. enum intel_display_power_domain
  4817. intel_display_port_power_domain(struct intel_encoder *intel_encoder)
  4818. {
  4819. struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
  4820. struct intel_digital_port *intel_dig_port;
  4821. switch (intel_encoder->type) {
  4822. case INTEL_OUTPUT_UNKNOWN:
  4823. /* Only DDI platforms should ever use this output type */
  4824. WARN_ON_ONCE(!HAS_DDI(dev_priv));
  4825. case INTEL_OUTPUT_DP:
  4826. case INTEL_OUTPUT_HDMI:
  4827. case INTEL_OUTPUT_EDP:
  4828. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4829. return port_to_power_domain(intel_dig_port->port);
  4830. case INTEL_OUTPUT_DP_MST:
  4831. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4832. return port_to_power_domain(intel_dig_port->port);
  4833. case INTEL_OUTPUT_ANALOG:
  4834. return POWER_DOMAIN_PORT_CRT;
  4835. case INTEL_OUTPUT_DSI:
  4836. return POWER_DOMAIN_PORT_DSI;
  4837. default:
  4838. return POWER_DOMAIN_PORT_OTHER;
  4839. }
  4840. }
  4841. enum intel_display_power_domain
  4842. intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
  4843. {
  4844. struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
  4845. struct intel_digital_port *intel_dig_port;
  4846. switch (intel_encoder->type) {
  4847. case INTEL_OUTPUT_UNKNOWN:
  4848. case INTEL_OUTPUT_HDMI:
  4849. /*
  4850. * Only DDI platforms should ever use these output types.
  4851. * We can get here after the HDMI detect code has already set
  4852. * the type of the shared encoder. Since we can't be sure
  4853. * what's the status of the given connectors, play safe and
  4854. * run the DP detection too.
  4855. */
  4856. WARN_ON_ONCE(!HAS_DDI(dev_priv));
  4857. case INTEL_OUTPUT_DP:
  4858. case INTEL_OUTPUT_EDP:
  4859. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4860. return port_to_aux_power_domain(intel_dig_port->port);
  4861. case INTEL_OUTPUT_DP_MST:
  4862. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4863. return port_to_aux_power_domain(intel_dig_port->port);
  4864. default:
  4865. MISSING_CASE(intel_encoder->type);
  4866. return POWER_DOMAIN_AUX_A;
  4867. }
  4868. }
  4869. static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
  4870. struct intel_crtc_state *crtc_state)
  4871. {
  4872. struct drm_device *dev = crtc->dev;
  4873. struct drm_encoder *encoder;
  4874. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4875. enum pipe pipe = intel_crtc->pipe;
  4876. unsigned long mask;
  4877. enum transcoder transcoder = crtc_state->cpu_transcoder;
  4878. if (!crtc_state->base.active)
  4879. return 0;
  4880. mask = BIT(POWER_DOMAIN_PIPE(pipe));
  4881. mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
  4882. if (crtc_state->pch_pfit.enabled ||
  4883. crtc_state->pch_pfit.force_thru)
  4884. mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
  4885. drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
  4886. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  4887. mask |= BIT(intel_display_port_power_domain(intel_encoder));
  4888. }
  4889. if (crtc_state->shared_dpll)
  4890. mask |= BIT(POWER_DOMAIN_PLLS);
  4891. return mask;
  4892. }
  4893. static unsigned long
  4894. modeset_get_crtc_power_domains(struct drm_crtc *crtc,
  4895. struct intel_crtc_state *crtc_state)
  4896. {
  4897. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4898. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4899. enum intel_display_power_domain domain;
  4900. unsigned long domains, new_domains, old_domains;
  4901. old_domains = intel_crtc->enabled_power_domains;
  4902. intel_crtc->enabled_power_domains = new_domains =
  4903. get_crtc_power_domains(crtc, crtc_state);
  4904. domains = new_domains & ~old_domains;
  4905. for_each_power_domain(domain, domains)
  4906. intel_display_power_get(dev_priv, domain);
  4907. return old_domains & ~new_domains;
  4908. }
  4909. static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
  4910. unsigned long domains)
  4911. {
  4912. enum intel_display_power_domain domain;
  4913. for_each_power_domain(domain, domains)
  4914. intel_display_power_put(dev_priv, domain);
  4915. }
  4916. static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
  4917. {
  4918. int max_cdclk_freq = dev_priv->max_cdclk_freq;
  4919. if (INTEL_INFO(dev_priv)->gen >= 9 ||
  4920. IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  4921. return max_cdclk_freq;
  4922. else if (IS_CHERRYVIEW(dev_priv))
  4923. return max_cdclk_freq*95/100;
  4924. else if (INTEL_INFO(dev_priv)->gen < 4)
  4925. return 2*max_cdclk_freq*90/100;
  4926. else
  4927. return max_cdclk_freq*90/100;
  4928. }
  4929. static int skl_calc_cdclk(int max_pixclk, int vco);
  4930. static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
  4931. {
  4932. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  4933. u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
  4934. int max_cdclk, vco;
  4935. vco = dev_priv->skl_preferred_vco_freq;
  4936. WARN_ON(vco != 8100000 && vco != 8640000);
  4937. /*
  4938. * Use the lower (vco 8640) cdclk values as a
  4939. * first guess. skl_calc_cdclk() will correct it
  4940. * if the preferred vco is 8100 instead.
  4941. */
  4942. if (limit == SKL_DFSM_CDCLK_LIMIT_675)
  4943. max_cdclk = 617143;
  4944. else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
  4945. max_cdclk = 540000;
  4946. else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
  4947. max_cdclk = 432000;
  4948. else
  4949. max_cdclk = 308571;
  4950. dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
  4951. } else if (IS_BROXTON(dev_priv)) {
  4952. dev_priv->max_cdclk_freq = 624000;
  4953. } else if (IS_BROADWELL(dev_priv)) {
  4954. /*
  4955. * FIXME with extra cooling we can allow
  4956. * 540 MHz for ULX and 675 Mhz for ULT.
  4957. * How can we know if extra cooling is
  4958. * available? PCI ID, VTB, something else?
  4959. */
  4960. if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  4961. dev_priv->max_cdclk_freq = 450000;
  4962. else if (IS_BDW_ULX(dev_priv))
  4963. dev_priv->max_cdclk_freq = 450000;
  4964. else if (IS_BDW_ULT(dev_priv))
  4965. dev_priv->max_cdclk_freq = 540000;
  4966. else
  4967. dev_priv->max_cdclk_freq = 675000;
  4968. } else if (IS_CHERRYVIEW(dev_priv)) {
  4969. dev_priv->max_cdclk_freq = 320000;
  4970. } else if (IS_VALLEYVIEW(dev_priv)) {
  4971. dev_priv->max_cdclk_freq = 400000;
  4972. } else {
  4973. /* otherwise assume cdclk is fixed */
  4974. dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
  4975. }
  4976. dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
  4977. DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
  4978. dev_priv->max_cdclk_freq);
  4979. DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
  4980. dev_priv->max_dotclk_freq);
  4981. }
  4982. static void intel_update_cdclk(struct drm_i915_private *dev_priv)
  4983. {
  4984. dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
  4985. if (INTEL_GEN(dev_priv) >= 9)
  4986. DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
  4987. dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
  4988. dev_priv->cdclk_pll.ref);
  4989. else
  4990. DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
  4991. dev_priv->cdclk_freq);
  4992. /*
  4993. * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
  4994. * Programmng [sic] note: bit[9:2] should be programmed to the number
  4995. * of cdclk that generates 4MHz reference clock freq which is used to
  4996. * generate GMBus clock. This will vary with the cdclk freq.
  4997. */
  4998. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  4999. I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
  5000. }
  5001. /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
  5002. static int skl_cdclk_decimal(int cdclk)
  5003. {
  5004. return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
  5005. }
  5006. static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
  5007. {
  5008. int ratio;
  5009. if (cdclk == dev_priv->cdclk_pll.ref)
  5010. return 0;
  5011. switch (cdclk) {
  5012. default:
  5013. MISSING_CASE(cdclk);
  5014. case 144000:
  5015. case 288000:
  5016. case 384000:
  5017. case 576000:
  5018. ratio = 60;
  5019. break;
  5020. case 624000:
  5021. ratio = 65;
  5022. break;
  5023. }
  5024. return dev_priv->cdclk_pll.ref * ratio;
  5025. }
  5026. static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
  5027. {
  5028. I915_WRITE(BXT_DE_PLL_ENABLE, 0);
  5029. /* Timeout 200us */
  5030. if (intel_wait_for_register(dev_priv,
  5031. BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
  5032. 1))
  5033. DRM_ERROR("timeout waiting for DE PLL unlock\n");
  5034. dev_priv->cdclk_pll.vco = 0;
  5035. }
  5036. static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
  5037. {
  5038. int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
  5039. u32 val;
  5040. val = I915_READ(BXT_DE_PLL_CTL);
  5041. val &= ~BXT_DE_PLL_RATIO_MASK;
  5042. val |= BXT_DE_PLL_RATIO(ratio);
  5043. I915_WRITE(BXT_DE_PLL_CTL, val);
  5044. I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
  5045. /* Timeout 200us */
  5046. if (intel_wait_for_register(dev_priv,
  5047. BXT_DE_PLL_ENABLE,
  5048. BXT_DE_PLL_LOCK,
  5049. BXT_DE_PLL_LOCK,
  5050. 1))
  5051. DRM_ERROR("timeout waiting for DE PLL lock\n");
  5052. dev_priv->cdclk_pll.vco = vco;
  5053. }
  5054. static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
  5055. {
  5056. u32 val, divider;
  5057. int vco, ret;
  5058. vco = bxt_de_pll_vco(dev_priv, cdclk);
  5059. DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
  5060. /* cdclk = vco / 2 / div{1,1.5,2,4} */
  5061. switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
  5062. case 8:
  5063. divider = BXT_CDCLK_CD2X_DIV_SEL_4;
  5064. break;
  5065. case 4:
  5066. divider = BXT_CDCLK_CD2X_DIV_SEL_2;
  5067. break;
  5068. case 3:
  5069. divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
  5070. break;
  5071. case 2:
  5072. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  5073. break;
  5074. default:
  5075. WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
  5076. WARN_ON(vco != 0);
  5077. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  5078. break;
  5079. }
  5080. /* Inform power controller of upcoming frequency change */
  5081. mutex_lock(&dev_priv->rps.hw_lock);
  5082. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  5083. 0x80000000);
  5084. mutex_unlock(&dev_priv->rps.hw_lock);
  5085. if (ret) {
  5086. DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
  5087. ret, cdclk);
  5088. return;
  5089. }
  5090. if (dev_priv->cdclk_pll.vco != 0 &&
  5091. dev_priv->cdclk_pll.vco != vco)
  5092. bxt_de_pll_disable(dev_priv);
  5093. if (dev_priv->cdclk_pll.vco != vco)
  5094. bxt_de_pll_enable(dev_priv, vco);
  5095. val = divider | skl_cdclk_decimal(cdclk);
  5096. /*
  5097. * FIXME if only the cd2x divider needs changing, it could be done
  5098. * without shutting off the pipe (if only one pipe is active).
  5099. */
  5100. val |= BXT_CDCLK_CD2X_PIPE_NONE;
  5101. /*
  5102. * Disable SSA Precharge when CD clock frequency < 500 MHz,
  5103. * enable otherwise.
  5104. */
  5105. if (cdclk >= 500000)
  5106. val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  5107. I915_WRITE(CDCLK_CTL, val);
  5108. mutex_lock(&dev_priv->rps.hw_lock);
  5109. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  5110. DIV_ROUND_UP(cdclk, 25000));
  5111. mutex_unlock(&dev_priv->rps.hw_lock);
  5112. if (ret) {
  5113. DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
  5114. ret, cdclk);
  5115. return;
  5116. }
  5117. intel_update_cdclk(dev_priv);
  5118. }
  5119. static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
  5120. {
  5121. u32 cdctl, expected;
  5122. intel_update_cdclk(dev_priv);
  5123. if (dev_priv->cdclk_pll.vco == 0 ||
  5124. dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
  5125. goto sanitize;
  5126. /* DPLL okay; verify the cdclock
  5127. *
  5128. * Some BIOS versions leave an incorrect decimal frequency value and
  5129. * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
  5130. * so sanitize this register.
  5131. */
  5132. cdctl = I915_READ(CDCLK_CTL);
  5133. /*
  5134. * Let's ignore the pipe field, since BIOS could have configured the
  5135. * dividers both synching to an active pipe, or asynchronously
  5136. * (PIPE_NONE).
  5137. */
  5138. cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
  5139. expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
  5140. skl_cdclk_decimal(dev_priv->cdclk_freq);
  5141. /*
  5142. * Disable SSA Precharge when CD clock frequency < 500 MHz,
  5143. * enable otherwise.
  5144. */
  5145. if (dev_priv->cdclk_freq >= 500000)
  5146. expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  5147. if (cdctl == expected)
  5148. /* All well; nothing to sanitize */
  5149. return;
  5150. sanitize:
  5151. DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
  5152. /* force cdclk programming */
  5153. dev_priv->cdclk_freq = 0;
  5154. /* force full PLL disable + enable */
  5155. dev_priv->cdclk_pll.vco = -1;
  5156. }
  5157. void bxt_init_cdclk(struct drm_i915_private *dev_priv)
  5158. {
  5159. bxt_sanitize_cdclk(dev_priv);
  5160. if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
  5161. return;
  5162. /*
  5163. * FIXME:
  5164. * - The initial CDCLK needs to be read from VBT.
  5165. * Need to make this change after VBT has changes for BXT.
  5166. */
  5167. bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
  5168. }
  5169. void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
  5170. {
  5171. bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
  5172. }
  5173. static int skl_calc_cdclk(int max_pixclk, int vco)
  5174. {
  5175. if (vco == 8640000) {
  5176. if (max_pixclk > 540000)
  5177. return 617143;
  5178. else if (max_pixclk > 432000)
  5179. return 540000;
  5180. else if (max_pixclk > 308571)
  5181. return 432000;
  5182. else
  5183. return 308571;
  5184. } else {
  5185. if (max_pixclk > 540000)
  5186. return 675000;
  5187. else if (max_pixclk > 450000)
  5188. return 540000;
  5189. else if (max_pixclk > 337500)
  5190. return 450000;
  5191. else
  5192. return 337500;
  5193. }
  5194. }
  5195. static void
  5196. skl_dpll0_update(struct drm_i915_private *dev_priv)
  5197. {
  5198. u32 val;
  5199. dev_priv->cdclk_pll.ref = 24000;
  5200. dev_priv->cdclk_pll.vco = 0;
  5201. val = I915_READ(LCPLL1_CTL);
  5202. if ((val & LCPLL_PLL_ENABLE) == 0)
  5203. return;
  5204. if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
  5205. return;
  5206. val = I915_READ(DPLL_CTRL1);
  5207. if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
  5208. DPLL_CTRL1_SSC(SKL_DPLL0) |
  5209. DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
  5210. DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
  5211. return;
  5212. switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
  5213. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
  5214. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
  5215. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
  5216. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
  5217. dev_priv->cdclk_pll.vco = 8100000;
  5218. break;
  5219. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
  5220. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
  5221. dev_priv->cdclk_pll.vco = 8640000;
  5222. break;
  5223. default:
  5224. MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
  5225. break;
  5226. }
  5227. }
  5228. void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
  5229. {
  5230. bool changed = dev_priv->skl_preferred_vco_freq != vco;
  5231. dev_priv->skl_preferred_vco_freq = vco;
  5232. if (changed)
  5233. intel_update_max_cdclk(dev_priv);
  5234. }
  5235. static void
  5236. skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
  5237. {
  5238. int min_cdclk = skl_calc_cdclk(0, vco);
  5239. u32 val;
  5240. WARN_ON(vco != 8100000 && vco != 8640000);
  5241. /* select the minimum CDCLK before enabling DPLL 0 */
  5242. val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
  5243. I915_WRITE(CDCLK_CTL, val);
  5244. POSTING_READ(CDCLK_CTL);
  5245. /*
  5246. * We always enable DPLL0 with the lowest link rate possible, but still
  5247. * taking into account the VCO required to operate the eDP panel at the
  5248. * desired frequency. The usual DP link rates operate with a VCO of
  5249. * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
  5250. * The modeset code is responsible for the selection of the exact link
  5251. * rate later on, with the constraint of choosing a frequency that
  5252. * works with vco.
  5253. */
  5254. val = I915_READ(DPLL_CTRL1);
  5255. val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
  5256. DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
  5257. val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
  5258. if (vco == 8640000)
  5259. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
  5260. SKL_DPLL0);
  5261. else
  5262. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
  5263. SKL_DPLL0);
  5264. I915_WRITE(DPLL_CTRL1, val);
  5265. POSTING_READ(DPLL_CTRL1);
  5266. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
  5267. if (intel_wait_for_register(dev_priv,
  5268. LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  5269. 5))
  5270. DRM_ERROR("DPLL0 not locked\n");
  5271. dev_priv->cdclk_pll.vco = vco;
  5272. /* We'll want to keep using the current vco from now on. */
  5273. skl_set_preferred_cdclk_vco(dev_priv, vco);
  5274. }
  5275. static void
  5276. skl_dpll0_disable(struct drm_i915_private *dev_priv)
  5277. {
  5278. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
  5279. if (intel_wait_for_register(dev_priv,
  5280. LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
  5281. 1))
  5282. DRM_ERROR("Couldn't disable DPLL0\n");
  5283. dev_priv->cdclk_pll.vco = 0;
  5284. }
  5285. static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
  5286. {
  5287. int ret;
  5288. u32 val;
  5289. /* inform PCU we want to change CDCLK */
  5290. val = SKL_CDCLK_PREPARE_FOR_CHANGE;
  5291. mutex_lock(&dev_priv->rps.hw_lock);
  5292. ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
  5293. mutex_unlock(&dev_priv->rps.hw_lock);
  5294. return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
  5295. }
  5296. static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
  5297. {
  5298. return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
  5299. }
  5300. static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
  5301. {
  5302. u32 freq_select, pcu_ack;
  5303. WARN_ON((cdclk == 24000) != (vco == 0));
  5304. DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
  5305. if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
  5306. DRM_ERROR("failed to inform PCU about cdclk change\n");
  5307. return;
  5308. }
  5309. /* set CDCLK_CTL */
  5310. switch (cdclk) {
  5311. case 450000:
  5312. case 432000:
  5313. freq_select = CDCLK_FREQ_450_432;
  5314. pcu_ack = 1;
  5315. break;
  5316. case 540000:
  5317. freq_select = CDCLK_FREQ_540;
  5318. pcu_ack = 2;
  5319. break;
  5320. case 308571:
  5321. case 337500:
  5322. default:
  5323. freq_select = CDCLK_FREQ_337_308;
  5324. pcu_ack = 0;
  5325. break;
  5326. case 617143:
  5327. case 675000:
  5328. freq_select = CDCLK_FREQ_675_617;
  5329. pcu_ack = 3;
  5330. break;
  5331. }
  5332. if (dev_priv->cdclk_pll.vco != 0 &&
  5333. dev_priv->cdclk_pll.vco != vco)
  5334. skl_dpll0_disable(dev_priv);
  5335. if (dev_priv->cdclk_pll.vco != vco)
  5336. skl_dpll0_enable(dev_priv, vco);
  5337. I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
  5338. POSTING_READ(CDCLK_CTL);
  5339. /* inform PCU of the change */
  5340. mutex_lock(&dev_priv->rps.hw_lock);
  5341. sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
  5342. mutex_unlock(&dev_priv->rps.hw_lock);
  5343. intel_update_cdclk(dev_priv);
  5344. }
  5345. static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
  5346. void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
  5347. {
  5348. skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
  5349. }
  5350. void skl_init_cdclk(struct drm_i915_private *dev_priv)
  5351. {
  5352. int cdclk, vco;
  5353. skl_sanitize_cdclk(dev_priv);
  5354. if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
  5355. /*
  5356. * Use the current vco as our initial
  5357. * guess as to what the preferred vco is.
  5358. */
  5359. if (dev_priv->skl_preferred_vco_freq == 0)
  5360. skl_set_preferred_cdclk_vco(dev_priv,
  5361. dev_priv->cdclk_pll.vco);
  5362. return;
  5363. }
  5364. vco = dev_priv->skl_preferred_vco_freq;
  5365. if (vco == 0)
  5366. vco = 8100000;
  5367. cdclk = skl_calc_cdclk(0, vco);
  5368. skl_set_cdclk(dev_priv, cdclk, vco);
  5369. }
  5370. static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  5371. {
  5372. uint32_t cdctl, expected;
  5373. /*
  5374. * check if the pre-os intialized the display
  5375. * There is SWF18 scratchpad register defined which is set by the
  5376. * pre-os which can be used by the OS drivers to check the status
  5377. */
  5378. if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
  5379. goto sanitize;
  5380. intel_update_cdclk(dev_priv);
  5381. /* Is PLL enabled and locked ? */
  5382. if (dev_priv->cdclk_pll.vco == 0 ||
  5383. dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
  5384. goto sanitize;
  5385. /* DPLL okay; verify the cdclock
  5386. *
  5387. * Noticed in some instances that the freq selection is correct but
  5388. * decimal part is programmed wrong from BIOS where pre-os does not
  5389. * enable display. Verify the same as well.
  5390. */
  5391. cdctl = I915_READ(CDCLK_CTL);
  5392. expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
  5393. skl_cdclk_decimal(dev_priv->cdclk_freq);
  5394. if (cdctl == expected)
  5395. /* All well; nothing to sanitize */
  5396. return;
  5397. sanitize:
  5398. DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
  5399. /* force cdclk programming */
  5400. dev_priv->cdclk_freq = 0;
  5401. /* force full PLL disable + enable */
  5402. dev_priv->cdclk_pll.vco = -1;
  5403. }
  5404. /* Adjust CDclk dividers to allow high res or save power if possible */
  5405. static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
  5406. {
  5407. struct drm_i915_private *dev_priv = to_i915(dev);
  5408. u32 val, cmd;
  5409. WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
  5410. != dev_priv->cdclk_freq);
  5411. if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
  5412. cmd = 2;
  5413. else if (cdclk == 266667)
  5414. cmd = 1;
  5415. else
  5416. cmd = 0;
  5417. mutex_lock(&dev_priv->rps.hw_lock);
  5418. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  5419. val &= ~DSPFREQGUAR_MASK;
  5420. val |= (cmd << DSPFREQGUAR_SHIFT);
  5421. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  5422. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  5423. DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
  5424. 50)) {
  5425. DRM_ERROR("timed out waiting for CDclk change\n");
  5426. }
  5427. mutex_unlock(&dev_priv->rps.hw_lock);
  5428. mutex_lock(&dev_priv->sb_lock);
  5429. if (cdclk == 400000) {
  5430. u32 divider;
  5431. divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  5432. /* adjust cdclk divider */
  5433. val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
  5434. val &= ~CCK_FREQUENCY_VALUES;
  5435. val |= divider;
  5436. vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
  5437. if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
  5438. CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
  5439. 50))
  5440. DRM_ERROR("timed out waiting for CDclk change\n");
  5441. }
  5442. /* adjust self-refresh exit latency value */
  5443. val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
  5444. val &= ~0x7f;
  5445. /*
  5446. * For high bandwidth configs, we set a higher latency in the bunit
  5447. * so that the core display fetch happens in time to avoid underruns.
  5448. */
  5449. if (cdclk == 400000)
  5450. val |= 4500 / 250; /* 4.5 usec */
  5451. else
  5452. val |= 3000 / 250; /* 3.0 usec */
  5453. vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
  5454. mutex_unlock(&dev_priv->sb_lock);
  5455. intel_update_cdclk(dev_priv);
  5456. }
  5457. static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
  5458. {
  5459. struct drm_i915_private *dev_priv = to_i915(dev);
  5460. u32 val, cmd;
  5461. WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
  5462. != dev_priv->cdclk_freq);
  5463. switch (cdclk) {
  5464. case 333333:
  5465. case 320000:
  5466. case 266667:
  5467. case 200000:
  5468. break;
  5469. default:
  5470. MISSING_CASE(cdclk);
  5471. return;
  5472. }
  5473. /*
  5474. * Specs are full of misinformation, but testing on actual
  5475. * hardware has shown that we just need to write the desired
  5476. * CCK divider into the Punit register.
  5477. */
  5478. cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  5479. mutex_lock(&dev_priv->rps.hw_lock);
  5480. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  5481. val &= ~DSPFREQGUAR_MASK_CHV;
  5482. val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
  5483. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  5484. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  5485. DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
  5486. 50)) {
  5487. DRM_ERROR("timed out waiting for CDclk change\n");
  5488. }
  5489. mutex_unlock(&dev_priv->rps.hw_lock);
  5490. intel_update_cdclk(dev_priv);
  5491. }
  5492. static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
  5493. int max_pixclk)
  5494. {
  5495. int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
  5496. int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
  5497. /*
  5498. * Really only a few cases to deal with, as only 4 CDclks are supported:
  5499. * 200MHz
  5500. * 267MHz
  5501. * 320/333MHz (depends on HPLL freq)
  5502. * 400MHz (VLV only)
  5503. * So we check to see whether we're above 90% (VLV) or 95% (CHV)
  5504. * of the lower bin and adjust if needed.
  5505. *
  5506. * We seem to get an unstable or solid color picture at 200MHz.
  5507. * Not sure what's wrong. For now use 200MHz only when all pipes
  5508. * are off.
  5509. */
  5510. if (!IS_CHERRYVIEW(dev_priv) &&
  5511. max_pixclk > freq_320*limit/100)
  5512. return 400000;
  5513. else if (max_pixclk > 266667*limit/100)
  5514. return freq_320;
  5515. else if (max_pixclk > 0)
  5516. return 266667;
  5517. else
  5518. return 200000;
  5519. }
  5520. static int bxt_calc_cdclk(int max_pixclk)
  5521. {
  5522. if (max_pixclk > 576000)
  5523. return 624000;
  5524. else if (max_pixclk > 384000)
  5525. return 576000;
  5526. else if (max_pixclk > 288000)
  5527. return 384000;
  5528. else if (max_pixclk > 144000)
  5529. return 288000;
  5530. else
  5531. return 144000;
  5532. }
  5533. /* Compute the max pixel clock for new configuration. */
  5534. static int intel_mode_max_pixclk(struct drm_device *dev,
  5535. struct drm_atomic_state *state)
  5536. {
  5537. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  5538. struct drm_i915_private *dev_priv = to_i915(dev);
  5539. struct drm_crtc *crtc;
  5540. struct drm_crtc_state *crtc_state;
  5541. unsigned max_pixclk = 0, i;
  5542. enum pipe pipe;
  5543. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  5544. sizeof(intel_state->min_pixclk));
  5545. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  5546. int pixclk = 0;
  5547. if (crtc_state->enable)
  5548. pixclk = crtc_state->adjusted_mode.crtc_clock;
  5549. intel_state->min_pixclk[i] = pixclk;
  5550. }
  5551. for_each_pipe(dev_priv, pipe)
  5552. max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
  5553. return max_pixclk;
  5554. }
  5555. static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
  5556. {
  5557. struct drm_device *dev = state->dev;
  5558. struct drm_i915_private *dev_priv = to_i915(dev);
  5559. int max_pixclk = intel_mode_max_pixclk(dev, state);
  5560. struct intel_atomic_state *intel_state =
  5561. to_intel_atomic_state(state);
  5562. intel_state->cdclk = intel_state->dev_cdclk =
  5563. valleyview_calc_cdclk(dev_priv, max_pixclk);
  5564. if (!intel_state->active_crtcs)
  5565. intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
  5566. return 0;
  5567. }
  5568. static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
  5569. {
  5570. int max_pixclk = ilk_max_pixel_rate(state);
  5571. struct intel_atomic_state *intel_state =
  5572. to_intel_atomic_state(state);
  5573. intel_state->cdclk = intel_state->dev_cdclk =
  5574. bxt_calc_cdclk(max_pixclk);
  5575. if (!intel_state->active_crtcs)
  5576. intel_state->dev_cdclk = bxt_calc_cdclk(0);
  5577. return 0;
  5578. }
  5579. static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
  5580. {
  5581. unsigned int credits, default_credits;
  5582. if (IS_CHERRYVIEW(dev_priv))
  5583. default_credits = PFI_CREDIT(12);
  5584. else
  5585. default_credits = PFI_CREDIT(8);
  5586. if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
  5587. /* CHV suggested value is 31 or 63 */
  5588. if (IS_CHERRYVIEW(dev_priv))
  5589. credits = PFI_CREDIT_63;
  5590. else
  5591. credits = PFI_CREDIT(15);
  5592. } else {
  5593. credits = default_credits;
  5594. }
  5595. /*
  5596. * WA - write default credits before re-programming
  5597. * FIXME: should we also set the resend bit here?
  5598. */
  5599. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5600. default_credits);
  5601. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5602. credits | PFI_CREDIT_RESEND);
  5603. /*
  5604. * FIXME is this guaranteed to clear
  5605. * immediately or should we poll for it?
  5606. */
  5607. WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
  5608. }
  5609. static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  5610. {
  5611. struct drm_device *dev = old_state->dev;
  5612. struct drm_i915_private *dev_priv = to_i915(dev);
  5613. struct intel_atomic_state *old_intel_state =
  5614. to_intel_atomic_state(old_state);
  5615. unsigned req_cdclk = old_intel_state->dev_cdclk;
  5616. /*
  5617. * FIXME: We can end up here with all power domains off, yet
  5618. * with a CDCLK frequency other than the minimum. To account
  5619. * for this take the PIPE-A power domain, which covers the HW
  5620. * blocks needed for the following programming. This can be
  5621. * removed once it's guaranteed that we get here either with
  5622. * the minimum CDCLK set, or the required power domains
  5623. * enabled.
  5624. */
  5625. intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
  5626. if (IS_CHERRYVIEW(dev_priv))
  5627. cherryview_set_cdclk(dev, req_cdclk);
  5628. else
  5629. valleyview_set_cdclk(dev, req_cdclk);
  5630. vlv_program_pfi_credits(dev_priv);
  5631. intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
  5632. }
  5633. static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  5634. struct drm_atomic_state *old_state)
  5635. {
  5636. struct drm_crtc *crtc = pipe_config->base.crtc;
  5637. struct drm_device *dev = crtc->dev;
  5638. struct drm_i915_private *dev_priv = to_i915(dev);
  5639. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5640. int pipe = intel_crtc->pipe;
  5641. if (WARN_ON(intel_crtc->active))
  5642. return;
  5643. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  5644. intel_dp_set_m_n(intel_crtc, M1_N1);
  5645. intel_set_pipe_timings(intel_crtc);
  5646. intel_set_pipe_src_size(intel_crtc);
  5647. if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  5648. struct drm_i915_private *dev_priv = to_i915(dev);
  5649. I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
  5650. I915_WRITE(CHV_CANVAS(pipe), 0);
  5651. }
  5652. i9xx_set_pipeconf(intel_crtc);
  5653. intel_crtc->active = true;
  5654. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5655. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  5656. if (IS_CHERRYVIEW(dev_priv)) {
  5657. chv_prepare_pll(intel_crtc, intel_crtc->config);
  5658. chv_enable_pll(intel_crtc, intel_crtc->config);
  5659. } else {
  5660. vlv_prepare_pll(intel_crtc, intel_crtc->config);
  5661. vlv_enable_pll(intel_crtc, intel_crtc->config);
  5662. }
  5663. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  5664. i9xx_pfit_enable(intel_crtc);
  5665. intel_color_load_luts(&pipe_config->base);
  5666. intel_update_watermarks(intel_crtc);
  5667. intel_enable_pipe(intel_crtc);
  5668. assert_vblank_disabled(crtc);
  5669. drm_crtc_vblank_on(crtc);
  5670. intel_encoders_enable(crtc, pipe_config, old_state);
  5671. }
  5672. static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
  5673. {
  5674. struct drm_device *dev = crtc->base.dev;
  5675. struct drm_i915_private *dev_priv = to_i915(dev);
  5676. I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
  5677. I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
  5678. }
  5679. static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
  5680. struct drm_atomic_state *old_state)
  5681. {
  5682. struct drm_crtc *crtc = pipe_config->base.crtc;
  5683. struct drm_device *dev = crtc->dev;
  5684. struct drm_i915_private *dev_priv = to_i915(dev);
  5685. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5686. enum pipe pipe = intel_crtc->pipe;
  5687. if (WARN_ON(intel_crtc->active))
  5688. return;
  5689. i9xx_set_pll_dividers(intel_crtc);
  5690. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  5691. intel_dp_set_m_n(intel_crtc, M1_N1);
  5692. intel_set_pipe_timings(intel_crtc);
  5693. intel_set_pipe_src_size(intel_crtc);
  5694. i9xx_set_pipeconf(intel_crtc);
  5695. intel_crtc->active = true;
  5696. if (!IS_GEN2(dev_priv))
  5697. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5698. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  5699. i9xx_enable_pll(intel_crtc);
  5700. i9xx_pfit_enable(intel_crtc);
  5701. intel_color_load_luts(&pipe_config->base);
  5702. intel_update_watermarks(intel_crtc);
  5703. intel_enable_pipe(intel_crtc);
  5704. assert_vblank_disabled(crtc);
  5705. drm_crtc_vblank_on(crtc);
  5706. intel_encoders_enable(crtc, pipe_config, old_state);
  5707. }
  5708. static void i9xx_pfit_disable(struct intel_crtc *crtc)
  5709. {
  5710. struct drm_device *dev = crtc->base.dev;
  5711. struct drm_i915_private *dev_priv = to_i915(dev);
  5712. if (!crtc->config->gmch_pfit.control)
  5713. return;
  5714. assert_pipe_disabled(dev_priv, crtc->pipe);
  5715. DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
  5716. I915_READ(PFIT_CONTROL));
  5717. I915_WRITE(PFIT_CONTROL, 0);
  5718. }
  5719. static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
  5720. struct drm_atomic_state *old_state)
  5721. {
  5722. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  5723. struct drm_device *dev = crtc->dev;
  5724. struct drm_i915_private *dev_priv = to_i915(dev);
  5725. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5726. int pipe = intel_crtc->pipe;
  5727. /*
  5728. * On gen2 planes are double buffered but the pipe isn't, so we must
  5729. * wait for planes to fully turn off before disabling the pipe.
  5730. */
  5731. if (IS_GEN2(dev_priv))
  5732. intel_wait_for_vblank(dev_priv, pipe);
  5733. intel_encoders_disable(crtc, old_crtc_state, old_state);
  5734. drm_crtc_vblank_off(crtc);
  5735. assert_vblank_disabled(crtc);
  5736. intel_disable_pipe(intel_crtc);
  5737. i9xx_pfit_disable(intel_crtc);
  5738. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  5739. if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
  5740. if (IS_CHERRYVIEW(dev_priv))
  5741. chv_disable_pll(dev_priv, pipe);
  5742. else if (IS_VALLEYVIEW(dev_priv))
  5743. vlv_disable_pll(dev_priv, pipe);
  5744. else
  5745. i9xx_disable_pll(intel_crtc);
  5746. }
  5747. intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
  5748. if (!IS_GEN2(dev_priv))
  5749. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  5750. }
  5751. static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
  5752. {
  5753. struct intel_encoder *encoder;
  5754. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5755. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  5756. enum intel_display_power_domain domain;
  5757. unsigned long domains;
  5758. struct drm_atomic_state *state;
  5759. struct intel_crtc_state *crtc_state;
  5760. int ret;
  5761. if (!intel_crtc->active)
  5762. return;
  5763. if (to_intel_plane_state(crtc->primary->state)->base.visible) {
  5764. WARN_ON(intel_crtc->flip_work);
  5765. intel_pre_disable_primary_noatomic(crtc);
  5766. intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
  5767. to_intel_plane_state(crtc->primary->state)->base.visible = false;
  5768. }
  5769. state = drm_atomic_state_alloc(crtc->dev);
  5770. state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
  5771. /* Everything's already locked, -EDEADLK can't happen. */
  5772. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  5773. ret = drm_atomic_add_affected_connectors(state, crtc);
  5774. WARN_ON(IS_ERR(crtc_state) || ret);
  5775. dev_priv->display.crtc_disable(crtc_state, state);
  5776. drm_atomic_state_put(state);
  5777. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
  5778. crtc->base.id, crtc->name);
  5779. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
  5780. crtc->state->active = false;
  5781. intel_crtc->active = false;
  5782. crtc->enabled = false;
  5783. crtc->state->connector_mask = 0;
  5784. crtc->state->encoder_mask = 0;
  5785. for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
  5786. encoder->base.crtc = NULL;
  5787. intel_fbc_disable(intel_crtc);
  5788. intel_update_watermarks(intel_crtc);
  5789. intel_disable_shared_dpll(intel_crtc);
  5790. domains = intel_crtc->enabled_power_domains;
  5791. for_each_power_domain(domain, domains)
  5792. intel_display_power_put(dev_priv, domain);
  5793. intel_crtc->enabled_power_domains = 0;
  5794. dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
  5795. dev_priv->min_pixclk[intel_crtc->pipe] = 0;
  5796. }
  5797. /*
  5798. * turn all crtc's off, but do not adjust state
  5799. * This has to be paired with a call to intel_modeset_setup_hw_state.
  5800. */
  5801. int intel_display_suspend(struct drm_device *dev)
  5802. {
  5803. struct drm_i915_private *dev_priv = to_i915(dev);
  5804. struct drm_atomic_state *state;
  5805. int ret;
  5806. state = drm_atomic_helper_suspend(dev);
  5807. ret = PTR_ERR_OR_ZERO(state);
  5808. if (ret)
  5809. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  5810. else
  5811. dev_priv->modeset_restore_state = state;
  5812. return ret;
  5813. }
  5814. void intel_encoder_destroy(struct drm_encoder *encoder)
  5815. {
  5816. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  5817. drm_encoder_cleanup(encoder);
  5818. kfree(intel_encoder);
  5819. }
  5820. /* Cross check the actual hw state with our own modeset state tracking (and it's
  5821. * internal consistency). */
  5822. static void intel_connector_verify_state(struct intel_connector *connector)
  5823. {
  5824. struct drm_crtc *crtc = connector->base.state->crtc;
  5825. DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  5826. connector->base.base.id,
  5827. connector->base.name);
  5828. if (connector->get_hw_state(connector)) {
  5829. struct intel_encoder *encoder = connector->encoder;
  5830. struct drm_connector_state *conn_state = connector->base.state;
  5831. I915_STATE_WARN(!crtc,
  5832. "connector enabled without attached crtc\n");
  5833. if (!crtc)
  5834. return;
  5835. I915_STATE_WARN(!crtc->state->active,
  5836. "connector is active, but attached crtc isn't\n");
  5837. if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
  5838. return;
  5839. I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
  5840. "atomic encoder doesn't match attached encoder\n");
  5841. I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
  5842. "attached encoder crtc differs from connector crtc\n");
  5843. } else {
  5844. I915_STATE_WARN(crtc && crtc->state->active,
  5845. "attached crtc is active, but connector isn't\n");
  5846. I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
  5847. "best encoder set without crtc!\n");
  5848. }
  5849. }
  5850. int intel_connector_init(struct intel_connector *connector)
  5851. {
  5852. drm_atomic_helper_connector_reset(&connector->base);
  5853. if (!connector->base.state)
  5854. return -ENOMEM;
  5855. return 0;
  5856. }
  5857. struct intel_connector *intel_connector_alloc(void)
  5858. {
  5859. struct intel_connector *connector;
  5860. connector = kzalloc(sizeof *connector, GFP_KERNEL);
  5861. if (!connector)
  5862. return NULL;
  5863. if (intel_connector_init(connector) < 0) {
  5864. kfree(connector);
  5865. return NULL;
  5866. }
  5867. return connector;
  5868. }
  5869. /* Simple connector->get_hw_state implementation for encoders that support only
  5870. * one connector and no cloning and hence the encoder state determines the state
  5871. * of the connector. */
  5872. bool intel_connector_get_hw_state(struct intel_connector *connector)
  5873. {
  5874. enum pipe pipe = 0;
  5875. struct intel_encoder *encoder = connector->encoder;
  5876. return encoder->get_hw_state(encoder, &pipe);
  5877. }
  5878. static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  5879. {
  5880. if (crtc_state->base.enable && crtc_state->has_pch_encoder)
  5881. return crtc_state->fdi_lanes;
  5882. return 0;
  5883. }
  5884. static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  5885. struct intel_crtc_state *pipe_config)
  5886. {
  5887. struct drm_i915_private *dev_priv = to_i915(dev);
  5888. struct drm_atomic_state *state = pipe_config->base.state;
  5889. struct intel_crtc *other_crtc;
  5890. struct intel_crtc_state *other_crtc_state;
  5891. DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
  5892. pipe_name(pipe), pipe_config->fdi_lanes);
  5893. if (pipe_config->fdi_lanes > 4) {
  5894. DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
  5895. pipe_name(pipe), pipe_config->fdi_lanes);
  5896. return -EINVAL;
  5897. }
  5898. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  5899. if (pipe_config->fdi_lanes > 2) {
  5900. DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
  5901. pipe_config->fdi_lanes);
  5902. return -EINVAL;
  5903. } else {
  5904. return 0;
  5905. }
  5906. }
  5907. if (INTEL_INFO(dev_priv)->num_pipes == 2)
  5908. return 0;
  5909. /* Ivybridge 3 pipe is really complicated */
  5910. switch (pipe) {
  5911. case PIPE_A:
  5912. return 0;
  5913. case PIPE_B:
  5914. if (pipe_config->fdi_lanes <= 2)
  5915. return 0;
  5916. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
  5917. other_crtc_state =
  5918. intel_atomic_get_crtc_state(state, other_crtc);
  5919. if (IS_ERR(other_crtc_state))
  5920. return PTR_ERR(other_crtc_state);
  5921. if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
  5922. DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
  5923. pipe_name(pipe), pipe_config->fdi_lanes);
  5924. return -EINVAL;
  5925. }
  5926. return 0;
  5927. case PIPE_C:
  5928. if (pipe_config->fdi_lanes > 2) {
  5929. DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
  5930. pipe_name(pipe), pipe_config->fdi_lanes);
  5931. return -EINVAL;
  5932. }
  5933. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
  5934. other_crtc_state =
  5935. intel_atomic_get_crtc_state(state, other_crtc);
  5936. if (IS_ERR(other_crtc_state))
  5937. return PTR_ERR(other_crtc_state);
  5938. if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
  5939. DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
  5940. return -EINVAL;
  5941. }
  5942. return 0;
  5943. default:
  5944. BUG();
  5945. }
  5946. }
  5947. #define RETRY 1
  5948. static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
  5949. struct intel_crtc_state *pipe_config)
  5950. {
  5951. struct drm_device *dev = intel_crtc->base.dev;
  5952. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  5953. int lane, link_bw, fdi_dotclock, ret;
  5954. bool needs_recompute = false;
  5955. retry:
  5956. /* FDI is a binary signal running at ~2.7GHz, encoding
  5957. * each output octet as 10 bits. The actual frequency
  5958. * is stored as a divider into a 100MHz clock, and the
  5959. * mode pixel clock is stored in units of 1KHz.
  5960. * Hence the bw of each lane in terms of the mode signal
  5961. * is:
  5962. */
  5963. link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
  5964. fdi_dotclock = adjusted_mode->crtc_clock;
  5965. lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
  5966. pipe_config->pipe_bpp);
  5967. pipe_config->fdi_lanes = lane;
  5968. intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
  5969. link_bw, &pipe_config->fdi_m_n);
  5970. ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
  5971. if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
  5972. pipe_config->pipe_bpp -= 2*3;
  5973. DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
  5974. pipe_config->pipe_bpp);
  5975. needs_recompute = true;
  5976. pipe_config->bw_constrained = true;
  5977. goto retry;
  5978. }
  5979. if (needs_recompute)
  5980. return RETRY;
  5981. return ret;
  5982. }
  5983. static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
  5984. struct intel_crtc_state *pipe_config)
  5985. {
  5986. if (pipe_config->pipe_bpp > 24)
  5987. return false;
  5988. /* HSW can handle pixel rate up to cdclk? */
  5989. if (IS_HASWELL(dev_priv))
  5990. return true;
  5991. /*
  5992. * We compare against max which means we must take
  5993. * the increased cdclk requirement into account when
  5994. * calculating the new cdclk.
  5995. *
  5996. * Should measure whether using a lower cdclk w/o IPS
  5997. */
  5998. return ilk_pipe_pixel_rate(pipe_config) <=
  5999. dev_priv->max_cdclk_freq * 95 / 100;
  6000. }
  6001. static void hsw_compute_ips_config(struct intel_crtc *crtc,
  6002. struct intel_crtc_state *pipe_config)
  6003. {
  6004. struct drm_device *dev = crtc->base.dev;
  6005. struct drm_i915_private *dev_priv = to_i915(dev);
  6006. pipe_config->ips_enabled = i915.enable_ips &&
  6007. hsw_crtc_supports_ips(crtc) &&
  6008. pipe_config_supports_ips(dev_priv, pipe_config);
  6009. }
  6010. static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
  6011. {
  6012. const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6013. /* GDG double wide on either pipe, otherwise pipe A only */
  6014. return INTEL_INFO(dev_priv)->gen < 4 &&
  6015. (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
  6016. }
  6017. static int intel_crtc_compute_config(struct intel_crtc *crtc,
  6018. struct intel_crtc_state *pipe_config)
  6019. {
  6020. struct drm_device *dev = crtc->base.dev;
  6021. struct drm_i915_private *dev_priv = to_i915(dev);
  6022. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  6023. int clock_limit = dev_priv->max_dotclk_freq;
  6024. if (INTEL_GEN(dev_priv) < 4) {
  6025. clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
  6026. /*
  6027. * Enable double wide mode when the dot clock
  6028. * is > 90% of the (display) core speed.
  6029. */
  6030. if (intel_crtc_supports_double_wide(crtc) &&
  6031. adjusted_mode->crtc_clock > clock_limit) {
  6032. clock_limit = dev_priv->max_dotclk_freq;
  6033. pipe_config->double_wide = true;
  6034. }
  6035. }
  6036. if (adjusted_mode->crtc_clock > clock_limit) {
  6037. DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
  6038. adjusted_mode->crtc_clock, clock_limit,
  6039. yesno(pipe_config->double_wide));
  6040. return -EINVAL;
  6041. }
  6042. /*
  6043. * Pipe horizontal size must be even in:
  6044. * - DVO ganged mode
  6045. * - LVDS dual channel mode
  6046. * - Double wide pipe
  6047. */
  6048. if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
  6049. intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
  6050. pipe_config->pipe_src_w &= ~1;
  6051. /* Cantiga+ cannot handle modes with a hsync front porch of 0.
  6052. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
  6053. */
  6054. if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
  6055. adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
  6056. return -EINVAL;
  6057. if (HAS_IPS(dev_priv))
  6058. hsw_compute_ips_config(crtc, pipe_config);
  6059. if (pipe_config->has_pch_encoder)
  6060. return ironlake_fdi_compute_config(crtc, pipe_config);
  6061. return 0;
  6062. }
  6063. static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6064. {
  6065. u32 cdctl;
  6066. skl_dpll0_update(dev_priv);
  6067. if (dev_priv->cdclk_pll.vco == 0)
  6068. return dev_priv->cdclk_pll.ref;
  6069. cdctl = I915_READ(CDCLK_CTL);
  6070. if (dev_priv->cdclk_pll.vco == 8640000) {
  6071. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  6072. case CDCLK_FREQ_450_432:
  6073. return 432000;
  6074. case CDCLK_FREQ_337_308:
  6075. return 308571;
  6076. case CDCLK_FREQ_540:
  6077. return 540000;
  6078. case CDCLK_FREQ_675_617:
  6079. return 617143;
  6080. default:
  6081. MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
  6082. }
  6083. } else {
  6084. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  6085. case CDCLK_FREQ_450_432:
  6086. return 450000;
  6087. case CDCLK_FREQ_337_308:
  6088. return 337500;
  6089. case CDCLK_FREQ_540:
  6090. return 540000;
  6091. case CDCLK_FREQ_675_617:
  6092. return 675000;
  6093. default:
  6094. MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
  6095. }
  6096. }
  6097. return dev_priv->cdclk_pll.ref;
  6098. }
  6099. static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
  6100. {
  6101. u32 val;
  6102. dev_priv->cdclk_pll.ref = 19200;
  6103. dev_priv->cdclk_pll.vco = 0;
  6104. val = I915_READ(BXT_DE_PLL_ENABLE);
  6105. if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
  6106. return;
  6107. if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
  6108. return;
  6109. val = I915_READ(BXT_DE_PLL_CTL);
  6110. dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
  6111. dev_priv->cdclk_pll.ref;
  6112. }
  6113. static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6114. {
  6115. u32 divider;
  6116. int div, vco;
  6117. bxt_de_pll_update(dev_priv);
  6118. vco = dev_priv->cdclk_pll.vco;
  6119. if (vco == 0)
  6120. return dev_priv->cdclk_pll.ref;
  6121. divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
  6122. switch (divider) {
  6123. case BXT_CDCLK_CD2X_DIV_SEL_1:
  6124. div = 2;
  6125. break;
  6126. case BXT_CDCLK_CD2X_DIV_SEL_1_5:
  6127. div = 3;
  6128. break;
  6129. case BXT_CDCLK_CD2X_DIV_SEL_2:
  6130. div = 4;
  6131. break;
  6132. case BXT_CDCLK_CD2X_DIV_SEL_4:
  6133. div = 8;
  6134. break;
  6135. default:
  6136. MISSING_CASE(divider);
  6137. return dev_priv->cdclk_pll.ref;
  6138. }
  6139. return DIV_ROUND_CLOSEST(vco, div);
  6140. }
  6141. static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6142. {
  6143. uint32_t lcpll = I915_READ(LCPLL_CTL);
  6144. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  6145. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  6146. return 800000;
  6147. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  6148. return 450000;
  6149. else if (freq == LCPLL_CLK_FREQ_450)
  6150. return 450000;
  6151. else if (freq == LCPLL_CLK_FREQ_54O_BDW)
  6152. return 540000;
  6153. else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
  6154. return 337500;
  6155. else
  6156. return 675000;
  6157. }
  6158. static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6159. {
  6160. uint32_t lcpll = I915_READ(LCPLL_CTL);
  6161. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  6162. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  6163. return 800000;
  6164. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  6165. return 450000;
  6166. else if (freq == LCPLL_CLK_FREQ_450)
  6167. return 450000;
  6168. else if (IS_HSW_ULT(dev_priv))
  6169. return 337500;
  6170. else
  6171. return 540000;
  6172. }
  6173. static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6174. {
  6175. return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
  6176. CCK_DISPLAY_CLOCK_CONTROL);
  6177. }
  6178. static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6179. {
  6180. return 450000;
  6181. }
  6182. static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6183. {
  6184. return 400000;
  6185. }
  6186. static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6187. {
  6188. return 333333;
  6189. }
  6190. static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6191. {
  6192. return 200000;
  6193. }
  6194. static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6195. {
  6196. struct pci_dev *pdev = dev_priv->drm.pdev;
  6197. u16 gcfgc = 0;
  6198. pci_read_config_word(pdev, GCFGC, &gcfgc);
  6199. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  6200. case GC_DISPLAY_CLOCK_267_MHZ_PNV:
  6201. return 266667;
  6202. case GC_DISPLAY_CLOCK_333_MHZ_PNV:
  6203. return 333333;
  6204. case GC_DISPLAY_CLOCK_444_MHZ_PNV:
  6205. return 444444;
  6206. case GC_DISPLAY_CLOCK_200_MHZ_PNV:
  6207. return 200000;
  6208. default:
  6209. DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
  6210. case GC_DISPLAY_CLOCK_133_MHZ_PNV:
  6211. return 133333;
  6212. case GC_DISPLAY_CLOCK_167_MHZ_PNV:
  6213. return 166667;
  6214. }
  6215. }
  6216. static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6217. {
  6218. struct pci_dev *pdev = dev_priv->drm.pdev;
  6219. u16 gcfgc = 0;
  6220. pci_read_config_word(pdev, GCFGC, &gcfgc);
  6221. if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
  6222. return 133333;
  6223. else {
  6224. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  6225. case GC_DISPLAY_CLOCK_333_MHZ:
  6226. return 333333;
  6227. default:
  6228. case GC_DISPLAY_CLOCK_190_200_MHZ:
  6229. return 190000;
  6230. }
  6231. }
  6232. }
  6233. static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6234. {
  6235. return 266667;
  6236. }
  6237. static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6238. {
  6239. struct pci_dev *pdev = dev_priv->drm.pdev;
  6240. u16 hpllcc = 0;
  6241. /*
  6242. * 852GM/852GMV only supports 133 MHz and the HPLLCC
  6243. * encoding is different :(
  6244. * FIXME is this the right way to detect 852GM/852GMV?
  6245. */
  6246. if (pdev->revision == 0x1)
  6247. return 133333;
  6248. pci_bus_read_config_word(pdev->bus,
  6249. PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
  6250. /* Assume that the hardware is in the high speed state. This
  6251. * should be the default.
  6252. */
  6253. switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
  6254. case GC_CLOCK_133_200:
  6255. case GC_CLOCK_133_200_2:
  6256. case GC_CLOCK_100_200:
  6257. return 200000;
  6258. case GC_CLOCK_166_250:
  6259. return 250000;
  6260. case GC_CLOCK_100_133:
  6261. return 133333;
  6262. case GC_CLOCK_133_266:
  6263. case GC_CLOCK_133_266_2:
  6264. case GC_CLOCK_166_266:
  6265. return 266667;
  6266. }
  6267. /* Shouldn't happen */
  6268. return 0;
  6269. }
  6270. static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6271. {
  6272. return 133333;
  6273. }
  6274. static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
  6275. {
  6276. static const unsigned int blb_vco[8] = {
  6277. [0] = 3200000,
  6278. [1] = 4000000,
  6279. [2] = 5333333,
  6280. [3] = 4800000,
  6281. [4] = 6400000,
  6282. };
  6283. static const unsigned int pnv_vco[8] = {
  6284. [0] = 3200000,
  6285. [1] = 4000000,
  6286. [2] = 5333333,
  6287. [3] = 4800000,
  6288. [4] = 2666667,
  6289. };
  6290. static const unsigned int cl_vco[8] = {
  6291. [0] = 3200000,
  6292. [1] = 4000000,
  6293. [2] = 5333333,
  6294. [3] = 6400000,
  6295. [4] = 3333333,
  6296. [5] = 3566667,
  6297. [6] = 4266667,
  6298. };
  6299. static const unsigned int elk_vco[8] = {
  6300. [0] = 3200000,
  6301. [1] = 4000000,
  6302. [2] = 5333333,
  6303. [3] = 4800000,
  6304. };
  6305. static const unsigned int ctg_vco[8] = {
  6306. [0] = 3200000,
  6307. [1] = 4000000,
  6308. [2] = 5333333,
  6309. [3] = 6400000,
  6310. [4] = 2666667,
  6311. [5] = 4266667,
  6312. };
  6313. const unsigned int *vco_table;
  6314. unsigned int vco;
  6315. uint8_t tmp = 0;
  6316. /* FIXME other chipsets? */
  6317. if (IS_GM45(dev_priv))
  6318. vco_table = ctg_vco;
  6319. else if (IS_G4X(dev_priv))
  6320. vco_table = elk_vco;
  6321. else if (IS_CRESTLINE(dev_priv))
  6322. vco_table = cl_vco;
  6323. else if (IS_PINEVIEW(dev_priv))
  6324. vco_table = pnv_vco;
  6325. else if (IS_G33(dev_priv))
  6326. vco_table = blb_vco;
  6327. else
  6328. return 0;
  6329. tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
  6330. vco = vco_table[tmp & 0x7];
  6331. if (vco == 0)
  6332. DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
  6333. else
  6334. DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
  6335. return vco;
  6336. }
  6337. static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6338. {
  6339. struct pci_dev *pdev = dev_priv->drm.pdev;
  6340. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6341. uint16_t tmp = 0;
  6342. pci_read_config_word(pdev, GCFGC, &tmp);
  6343. cdclk_sel = (tmp >> 12) & 0x1;
  6344. switch (vco) {
  6345. case 2666667:
  6346. case 4000000:
  6347. case 5333333:
  6348. return cdclk_sel ? 333333 : 222222;
  6349. case 3200000:
  6350. return cdclk_sel ? 320000 : 228571;
  6351. default:
  6352. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
  6353. return 222222;
  6354. }
  6355. }
  6356. static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6357. {
  6358. struct pci_dev *pdev = dev_priv->drm.pdev;
  6359. static const uint8_t div_3200[] = { 16, 10, 8 };
  6360. static const uint8_t div_4000[] = { 20, 12, 10 };
  6361. static const uint8_t div_5333[] = { 24, 16, 14 };
  6362. const uint8_t *div_table;
  6363. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6364. uint16_t tmp = 0;
  6365. pci_read_config_word(pdev, GCFGC, &tmp);
  6366. cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
  6367. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  6368. goto fail;
  6369. switch (vco) {
  6370. case 3200000:
  6371. div_table = div_3200;
  6372. break;
  6373. case 4000000:
  6374. div_table = div_4000;
  6375. break;
  6376. case 5333333:
  6377. div_table = div_5333;
  6378. break;
  6379. default:
  6380. goto fail;
  6381. }
  6382. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  6383. fail:
  6384. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
  6385. return 200000;
  6386. }
  6387. static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6388. {
  6389. struct pci_dev *pdev = dev_priv->drm.pdev;
  6390. static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
  6391. static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
  6392. static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
  6393. static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
  6394. const uint8_t *div_table;
  6395. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6396. uint16_t tmp = 0;
  6397. pci_read_config_word(pdev, GCFGC, &tmp);
  6398. cdclk_sel = (tmp >> 4) & 0x7;
  6399. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  6400. goto fail;
  6401. switch (vco) {
  6402. case 3200000:
  6403. div_table = div_3200;
  6404. break;
  6405. case 4000000:
  6406. div_table = div_4000;
  6407. break;
  6408. case 4800000:
  6409. div_table = div_4800;
  6410. break;
  6411. case 5333333:
  6412. div_table = div_5333;
  6413. break;
  6414. default:
  6415. goto fail;
  6416. }
  6417. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  6418. fail:
  6419. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
  6420. return 190476;
  6421. }
  6422. static void
  6423. intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
  6424. {
  6425. while (*num > DATA_LINK_M_N_MASK ||
  6426. *den > DATA_LINK_M_N_MASK) {
  6427. *num >>= 1;
  6428. *den >>= 1;
  6429. }
  6430. }
  6431. static void compute_m_n(unsigned int m, unsigned int n,
  6432. uint32_t *ret_m, uint32_t *ret_n)
  6433. {
  6434. *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
  6435. *ret_m = div_u64((uint64_t) m * *ret_n, n);
  6436. intel_reduce_m_n_ratio(ret_m, ret_n);
  6437. }
  6438. void
  6439. intel_link_compute_m_n(int bits_per_pixel, int nlanes,
  6440. int pixel_clock, int link_clock,
  6441. struct intel_link_m_n *m_n)
  6442. {
  6443. m_n->tu = 64;
  6444. compute_m_n(bits_per_pixel * pixel_clock,
  6445. link_clock * nlanes * 8,
  6446. &m_n->gmch_m, &m_n->gmch_n);
  6447. compute_m_n(pixel_clock, link_clock,
  6448. &m_n->link_m, &m_n->link_n);
  6449. }
  6450. static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  6451. {
  6452. if (i915.panel_use_ssc >= 0)
  6453. return i915.panel_use_ssc != 0;
  6454. return dev_priv->vbt.lvds_use_ssc
  6455. && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  6456. }
  6457. static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
  6458. {
  6459. return (1 << dpll->n) << 16 | dpll->m2;
  6460. }
  6461. static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
  6462. {
  6463. return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
  6464. }
  6465. static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
  6466. struct intel_crtc_state *crtc_state,
  6467. struct dpll *reduced_clock)
  6468. {
  6469. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6470. u32 fp, fp2 = 0;
  6471. if (IS_PINEVIEW(dev_priv)) {
  6472. fp = pnv_dpll_compute_fp(&crtc_state->dpll);
  6473. if (reduced_clock)
  6474. fp2 = pnv_dpll_compute_fp(reduced_clock);
  6475. } else {
  6476. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  6477. if (reduced_clock)
  6478. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  6479. }
  6480. crtc_state->dpll_hw_state.fp0 = fp;
  6481. crtc->lowfreq_avail = false;
  6482. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6483. reduced_clock) {
  6484. crtc_state->dpll_hw_state.fp1 = fp2;
  6485. crtc->lowfreq_avail = true;
  6486. } else {
  6487. crtc_state->dpll_hw_state.fp1 = fp;
  6488. }
  6489. }
  6490. static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
  6491. pipe)
  6492. {
  6493. u32 reg_val;
  6494. /*
  6495. * PLLB opamp always calibrates to max value of 0x3f, force enable it
  6496. * and set it to a reasonable value instead.
  6497. */
  6498. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  6499. reg_val &= 0xffffff00;
  6500. reg_val |= 0x00000030;
  6501. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  6502. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  6503. reg_val &= 0x8cffffff;
  6504. reg_val = 0x8c000000;
  6505. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  6506. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  6507. reg_val &= 0xffffff00;
  6508. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  6509. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  6510. reg_val &= 0x00ffffff;
  6511. reg_val |= 0xb0000000;
  6512. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  6513. }
  6514. static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
  6515. struct intel_link_m_n *m_n)
  6516. {
  6517. struct drm_device *dev = crtc->base.dev;
  6518. struct drm_i915_private *dev_priv = to_i915(dev);
  6519. int pipe = crtc->pipe;
  6520. I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6521. I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
  6522. I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
  6523. I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
  6524. }
  6525. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  6526. struct intel_link_m_n *m_n,
  6527. struct intel_link_m_n *m2_n2)
  6528. {
  6529. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6530. int pipe = crtc->pipe;
  6531. enum transcoder transcoder = crtc->config->cpu_transcoder;
  6532. if (INTEL_GEN(dev_priv) >= 5) {
  6533. I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6534. I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
  6535. I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
  6536. I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
  6537. /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
  6538. * for gen < 8) and if DRRS is supported (to make sure the
  6539. * registers are not unnecessarily accessed).
  6540. */
  6541. if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
  6542. INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
  6543. I915_WRITE(PIPE_DATA_M2(transcoder),
  6544. TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
  6545. I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
  6546. I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
  6547. I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
  6548. }
  6549. } else {
  6550. I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6551. I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
  6552. I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
  6553. I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
  6554. }
  6555. }
  6556. void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
  6557. {
  6558. struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
  6559. if (m_n == M1_N1) {
  6560. dp_m_n = &crtc->config->dp_m_n;
  6561. dp_m2_n2 = &crtc->config->dp_m2_n2;
  6562. } else if (m_n == M2_N2) {
  6563. /*
  6564. * M2_N2 registers are not supported. Hence m2_n2 divider value
  6565. * needs to be programmed into M1_N1.
  6566. */
  6567. dp_m_n = &crtc->config->dp_m2_n2;
  6568. } else {
  6569. DRM_ERROR("Unsupported divider value\n");
  6570. return;
  6571. }
  6572. if (crtc->config->has_pch_encoder)
  6573. intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
  6574. else
  6575. intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
  6576. }
  6577. static void vlv_compute_dpll(struct intel_crtc *crtc,
  6578. struct intel_crtc_state *pipe_config)
  6579. {
  6580. pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
  6581. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6582. if (crtc->pipe != PIPE_A)
  6583. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6584. /* DPLL not used with DSI, but still need the rest set up */
  6585. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  6586. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
  6587. DPLL_EXT_BUFFER_ENABLE_VLV;
  6588. pipe_config->dpll_hw_state.dpll_md =
  6589. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6590. }
  6591. static void chv_compute_dpll(struct intel_crtc *crtc,
  6592. struct intel_crtc_state *pipe_config)
  6593. {
  6594. pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
  6595. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6596. if (crtc->pipe != PIPE_A)
  6597. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6598. /* DPLL not used with DSI, but still need the rest set up */
  6599. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  6600. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
  6601. pipe_config->dpll_hw_state.dpll_md =
  6602. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6603. }
  6604. static void vlv_prepare_pll(struct intel_crtc *crtc,
  6605. const struct intel_crtc_state *pipe_config)
  6606. {
  6607. struct drm_device *dev = crtc->base.dev;
  6608. struct drm_i915_private *dev_priv = to_i915(dev);
  6609. enum pipe pipe = crtc->pipe;
  6610. u32 mdiv;
  6611. u32 bestn, bestm1, bestm2, bestp1, bestp2;
  6612. u32 coreclk, reg_val;
  6613. /* Enable Refclk */
  6614. I915_WRITE(DPLL(pipe),
  6615. pipe_config->dpll_hw_state.dpll &
  6616. ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
  6617. /* No need to actually set up the DPLL with DSI */
  6618. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6619. return;
  6620. mutex_lock(&dev_priv->sb_lock);
  6621. bestn = pipe_config->dpll.n;
  6622. bestm1 = pipe_config->dpll.m1;
  6623. bestm2 = pipe_config->dpll.m2;
  6624. bestp1 = pipe_config->dpll.p1;
  6625. bestp2 = pipe_config->dpll.p2;
  6626. /* See eDP HDMI DPIO driver vbios notes doc */
  6627. /* PLL B needs special handling */
  6628. if (pipe == PIPE_B)
  6629. vlv_pllb_recal_opamp(dev_priv, pipe);
  6630. /* Set up Tx target for periodic Rcomp update */
  6631. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
  6632. /* Disable target IRef on PLL */
  6633. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
  6634. reg_val &= 0x00ffffff;
  6635. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
  6636. /* Disable fast lock */
  6637. vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
  6638. /* Set idtafcrecal before PLL is enabled */
  6639. mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
  6640. mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
  6641. mdiv |= ((bestn << DPIO_N_SHIFT));
  6642. mdiv |= (1 << DPIO_K_SHIFT);
  6643. /*
  6644. * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
  6645. * but we don't support that).
  6646. * Note: don't use the DAC post divider as it seems unstable.
  6647. */
  6648. mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
  6649. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6650. mdiv |= DPIO_ENABLE_CALIBRATION;
  6651. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6652. /* Set HBR and RBR LPF coefficients */
  6653. if (pipe_config->port_clock == 162000 ||
  6654. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
  6655. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
  6656. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6657. 0x009f0003);
  6658. else
  6659. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6660. 0x00d0000f);
  6661. if (intel_crtc_has_dp_encoder(pipe_config)) {
  6662. /* Use SSC source */
  6663. if (pipe == PIPE_A)
  6664. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6665. 0x0df40000);
  6666. else
  6667. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6668. 0x0df70000);
  6669. } else { /* HDMI or VGA */
  6670. /* Use bend source */
  6671. if (pipe == PIPE_A)
  6672. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6673. 0x0df70000);
  6674. else
  6675. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6676. 0x0df40000);
  6677. }
  6678. coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
  6679. coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
  6680. if (intel_crtc_has_dp_encoder(crtc->config))
  6681. coreclk |= 0x01000000;
  6682. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  6683. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
  6684. mutex_unlock(&dev_priv->sb_lock);
  6685. }
  6686. static void chv_prepare_pll(struct intel_crtc *crtc,
  6687. const struct intel_crtc_state *pipe_config)
  6688. {
  6689. struct drm_device *dev = crtc->base.dev;
  6690. struct drm_i915_private *dev_priv = to_i915(dev);
  6691. enum pipe pipe = crtc->pipe;
  6692. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  6693. u32 loopfilter, tribuf_calcntr;
  6694. u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
  6695. u32 dpio_val;
  6696. int vco;
  6697. /* Enable Refclk and SSC */
  6698. I915_WRITE(DPLL(pipe),
  6699. pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
  6700. /* No need to actually set up the DPLL with DSI */
  6701. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6702. return;
  6703. bestn = pipe_config->dpll.n;
  6704. bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
  6705. bestm1 = pipe_config->dpll.m1;
  6706. bestm2 = pipe_config->dpll.m2 >> 22;
  6707. bestp1 = pipe_config->dpll.p1;
  6708. bestp2 = pipe_config->dpll.p2;
  6709. vco = pipe_config->dpll.vco;
  6710. dpio_val = 0;
  6711. loopfilter = 0;
  6712. mutex_lock(&dev_priv->sb_lock);
  6713. /* p1 and p2 divider */
  6714. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
  6715. 5 << DPIO_CHV_S1_DIV_SHIFT |
  6716. bestp1 << DPIO_CHV_P1_DIV_SHIFT |
  6717. bestp2 << DPIO_CHV_P2_DIV_SHIFT |
  6718. 1 << DPIO_CHV_K_DIV_SHIFT);
  6719. /* Feedback post-divider - m2 */
  6720. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
  6721. /* Feedback refclk divider - n and m1 */
  6722. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
  6723. DPIO_CHV_M1_DIV_BY_2 |
  6724. 1 << DPIO_CHV_N_DIV_SHIFT);
  6725. /* M2 fraction division */
  6726. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
  6727. /* M2 fraction division enable */
  6728. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  6729. dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
  6730. dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
  6731. if (bestm2_frac)
  6732. dpio_val |= DPIO_CHV_FRAC_DIV_EN;
  6733. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
  6734. /* Program digital lock detect threshold */
  6735. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
  6736. dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
  6737. DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
  6738. dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
  6739. if (!bestm2_frac)
  6740. dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
  6741. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
  6742. /* Loop filter */
  6743. if (vco == 5400000) {
  6744. loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
  6745. loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
  6746. loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6747. tribuf_calcntr = 0x9;
  6748. } else if (vco <= 6200000) {
  6749. loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
  6750. loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
  6751. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6752. tribuf_calcntr = 0x9;
  6753. } else if (vco <= 6480000) {
  6754. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6755. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6756. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6757. tribuf_calcntr = 0x8;
  6758. } else {
  6759. /* Not supported. Apply the same limits as in the max case */
  6760. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6761. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6762. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6763. tribuf_calcntr = 0;
  6764. }
  6765. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
  6766. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
  6767. dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
  6768. dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
  6769. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
  6770. /* AFC Recal */
  6771. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
  6772. vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
  6773. DPIO_AFC_RECAL);
  6774. mutex_unlock(&dev_priv->sb_lock);
  6775. }
  6776. /**
  6777. * vlv_force_pll_on - forcibly enable just the PLL
  6778. * @dev_priv: i915 private structure
  6779. * @pipe: pipe PLL to enable
  6780. * @dpll: PLL configuration
  6781. *
  6782. * Enable the PLL for @pipe using the supplied @dpll config. To be used
  6783. * in cases where we need the PLL enabled even when @pipe is not going to
  6784. * be enabled.
  6785. */
  6786. int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
  6787. const struct dpll *dpll)
  6788. {
  6789. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  6790. struct intel_crtc_state *pipe_config;
  6791. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  6792. if (!pipe_config)
  6793. return -ENOMEM;
  6794. pipe_config->base.crtc = &crtc->base;
  6795. pipe_config->pixel_multiplier = 1;
  6796. pipe_config->dpll = *dpll;
  6797. if (IS_CHERRYVIEW(dev_priv)) {
  6798. chv_compute_dpll(crtc, pipe_config);
  6799. chv_prepare_pll(crtc, pipe_config);
  6800. chv_enable_pll(crtc, pipe_config);
  6801. } else {
  6802. vlv_compute_dpll(crtc, pipe_config);
  6803. vlv_prepare_pll(crtc, pipe_config);
  6804. vlv_enable_pll(crtc, pipe_config);
  6805. }
  6806. kfree(pipe_config);
  6807. return 0;
  6808. }
  6809. /**
  6810. * vlv_force_pll_off - forcibly disable just the PLL
  6811. * @dev_priv: i915 private structure
  6812. * @pipe: pipe PLL to disable
  6813. *
  6814. * Disable the PLL for @pipe. To be used in cases where we need
  6815. * the PLL enabled even when @pipe is not going to be enabled.
  6816. */
  6817. void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
  6818. {
  6819. if (IS_CHERRYVIEW(dev_priv))
  6820. chv_disable_pll(dev_priv, pipe);
  6821. else
  6822. vlv_disable_pll(dev_priv, pipe);
  6823. }
  6824. static void i9xx_compute_dpll(struct intel_crtc *crtc,
  6825. struct intel_crtc_state *crtc_state,
  6826. struct dpll *reduced_clock)
  6827. {
  6828. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6829. u32 dpll;
  6830. struct dpll *clock = &crtc_state->dpll;
  6831. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6832. dpll = DPLL_VGA_MODE_DIS;
  6833. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  6834. dpll |= DPLLB_MODE_LVDS;
  6835. else
  6836. dpll |= DPLLB_MODE_DAC_SERIAL;
  6837. if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
  6838. dpll |= (crtc_state->pixel_multiplier - 1)
  6839. << SDVO_MULTIPLIER_SHIFT_HIRES;
  6840. }
  6841. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  6842. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  6843. dpll |= DPLL_SDVO_HIGH_SPEED;
  6844. if (intel_crtc_has_dp_encoder(crtc_state))
  6845. dpll |= DPLL_SDVO_HIGH_SPEED;
  6846. /* compute bitmask from p1 value */
  6847. if (IS_PINEVIEW(dev_priv))
  6848. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
  6849. else {
  6850. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6851. if (IS_G4X(dev_priv) && reduced_clock)
  6852. dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  6853. }
  6854. switch (clock->p2) {
  6855. case 5:
  6856. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  6857. break;
  6858. case 7:
  6859. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  6860. break;
  6861. case 10:
  6862. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  6863. break;
  6864. case 14:
  6865. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  6866. break;
  6867. }
  6868. if (INTEL_GEN(dev_priv) >= 4)
  6869. dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
  6870. if (crtc_state->sdvo_tv_clock)
  6871. dpll |= PLL_REF_INPUT_TVCLKINBC;
  6872. else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6873. intel_panel_use_ssc(dev_priv))
  6874. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6875. else
  6876. dpll |= PLL_REF_INPUT_DREFCLK;
  6877. dpll |= DPLL_VCO_ENABLE;
  6878. crtc_state->dpll_hw_state.dpll = dpll;
  6879. if (INTEL_GEN(dev_priv) >= 4) {
  6880. u32 dpll_md = (crtc_state->pixel_multiplier - 1)
  6881. << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6882. crtc_state->dpll_hw_state.dpll_md = dpll_md;
  6883. }
  6884. }
  6885. static void i8xx_compute_dpll(struct intel_crtc *crtc,
  6886. struct intel_crtc_state *crtc_state,
  6887. struct dpll *reduced_clock)
  6888. {
  6889. struct drm_device *dev = crtc->base.dev;
  6890. struct drm_i915_private *dev_priv = to_i915(dev);
  6891. u32 dpll;
  6892. struct dpll *clock = &crtc_state->dpll;
  6893. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6894. dpll = DPLL_VGA_MODE_DIS;
  6895. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6896. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6897. } else {
  6898. if (clock->p1 == 2)
  6899. dpll |= PLL_P1_DIVIDE_BY_TWO;
  6900. else
  6901. dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6902. if (clock->p2 == 4)
  6903. dpll |= PLL_P2_DIVIDE_BY_4;
  6904. }
  6905. if (!IS_I830(dev_priv) &&
  6906. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
  6907. dpll |= DPLL_DVO_2X_MODE;
  6908. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6909. intel_panel_use_ssc(dev_priv))
  6910. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6911. else
  6912. dpll |= PLL_REF_INPUT_DREFCLK;
  6913. dpll |= DPLL_VCO_ENABLE;
  6914. crtc_state->dpll_hw_state.dpll = dpll;
  6915. }
  6916. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
  6917. {
  6918. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  6919. enum pipe pipe = intel_crtc->pipe;
  6920. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  6921. const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
  6922. uint32_t crtc_vtotal, crtc_vblank_end;
  6923. int vsyncshift = 0;
  6924. /* We need to be careful not to changed the adjusted mode, for otherwise
  6925. * the hw state checker will get angry at the mismatch. */
  6926. crtc_vtotal = adjusted_mode->crtc_vtotal;
  6927. crtc_vblank_end = adjusted_mode->crtc_vblank_end;
  6928. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  6929. /* the chip adds 2 halflines automatically */
  6930. crtc_vtotal -= 1;
  6931. crtc_vblank_end -= 1;
  6932. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  6933. vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
  6934. else
  6935. vsyncshift = adjusted_mode->crtc_hsync_start -
  6936. adjusted_mode->crtc_htotal / 2;
  6937. if (vsyncshift < 0)
  6938. vsyncshift += adjusted_mode->crtc_htotal;
  6939. }
  6940. if (INTEL_GEN(dev_priv) > 3)
  6941. I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
  6942. I915_WRITE(HTOTAL(cpu_transcoder),
  6943. (adjusted_mode->crtc_hdisplay - 1) |
  6944. ((adjusted_mode->crtc_htotal - 1) << 16));
  6945. I915_WRITE(HBLANK(cpu_transcoder),
  6946. (adjusted_mode->crtc_hblank_start - 1) |
  6947. ((adjusted_mode->crtc_hblank_end - 1) << 16));
  6948. I915_WRITE(HSYNC(cpu_transcoder),
  6949. (adjusted_mode->crtc_hsync_start - 1) |
  6950. ((adjusted_mode->crtc_hsync_end - 1) << 16));
  6951. I915_WRITE(VTOTAL(cpu_transcoder),
  6952. (adjusted_mode->crtc_vdisplay - 1) |
  6953. ((crtc_vtotal - 1) << 16));
  6954. I915_WRITE(VBLANK(cpu_transcoder),
  6955. (adjusted_mode->crtc_vblank_start - 1) |
  6956. ((crtc_vblank_end - 1) << 16));
  6957. I915_WRITE(VSYNC(cpu_transcoder),
  6958. (adjusted_mode->crtc_vsync_start - 1) |
  6959. ((adjusted_mode->crtc_vsync_end - 1) << 16));
  6960. /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
  6961. * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
  6962. * documented on the DDI_FUNC_CTL register description, EDP Input Select
  6963. * bits. */
  6964. if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
  6965. (pipe == PIPE_B || pipe == PIPE_C))
  6966. I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
  6967. }
  6968. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
  6969. {
  6970. struct drm_device *dev = intel_crtc->base.dev;
  6971. struct drm_i915_private *dev_priv = to_i915(dev);
  6972. enum pipe pipe = intel_crtc->pipe;
  6973. /* pipesrc controls the size that is scaled from, which should
  6974. * always be the user's requested size.
  6975. */
  6976. I915_WRITE(PIPESRC(pipe),
  6977. ((intel_crtc->config->pipe_src_w - 1) << 16) |
  6978. (intel_crtc->config->pipe_src_h - 1));
  6979. }
  6980. static void intel_get_pipe_timings(struct intel_crtc *crtc,
  6981. struct intel_crtc_state *pipe_config)
  6982. {
  6983. struct drm_device *dev = crtc->base.dev;
  6984. struct drm_i915_private *dev_priv = to_i915(dev);
  6985. enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
  6986. uint32_t tmp;
  6987. tmp = I915_READ(HTOTAL(cpu_transcoder));
  6988. pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
  6989. pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
  6990. tmp = I915_READ(HBLANK(cpu_transcoder));
  6991. pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
  6992. pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
  6993. tmp = I915_READ(HSYNC(cpu_transcoder));
  6994. pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
  6995. pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
  6996. tmp = I915_READ(VTOTAL(cpu_transcoder));
  6997. pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
  6998. pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
  6999. tmp = I915_READ(VBLANK(cpu_transcoder));
  7000. pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
  7001. pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
  7002. tmp = I915_READ(VSYNC(cpu_transcoder));
  7003. pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
  7004. pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
  7005. if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
  7006. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
  7007. pipe_config->base.adjusted_mode.crtc_vtotal += 1;
  7008. pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
  7009. }
  7010. }
  7011. static void intel_get_pipe_src_size(struct intel_crtc *crtc,
  7012. struct intel_crtc_state *pipe_config)
  7013. {
  7014. struct drm_device *dev = crtc->base.dev;
  7015. struct drm_i915_private *dev_priv = to_i915(dev);
  7016. u32 tmp;
  7017. tmp = I915_READ(PIPESRC(crtc->pipe));
  7018. pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
  7019. pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
  7020. pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
  7021. pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
  7022. }
  7023. void intel_mode_from_pipe_config(struct drm_display_mode *mode,
  7024. struct intel_crtc_state *pipe_config)
  7025. {
  7026. mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
  7027. mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
  7028. mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
  7029. mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
  7030. mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
  7031. mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
  7032. mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
  7033. mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
  7034. mode->flags = pipe_config->base.adjusted_mode.flags;
  7035. mode->type = DRM_MODE_TYPE_DRIVER;
  7036. mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
  7037. mode->flags |= pipe_config->base.adjusted_mode.flags;
  7038. mode->hsync = drm_mode_hsync(mode);
  7039. mode->vrefresh = drm_mode_vrefresh(mode);
  7040. drm_mode_set_name(mode);
  7041. }
  7042. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
  7043. {
  7044. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  7045. uint32_t pipeconf;
  7046. pipeconf = 0;
  7047. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  7048. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  7049. pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
  7050. if (intel_crtc->config->double_wide)
  7051. pipeconf |= PIPECONF_DOUBLE_WIDE;
  7052. /* only g4x and later have fancy bpc/dither controls */
  7053. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  7054. IS_CHERRYVIEW(dev_priv)) {
  7055. /* Bspec claims that we can't use dithering for 30bpp pipes. */
  7056. if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
  7057. pipeconf |= PIPECONF_DITHER_EN |
  7058. PIPECONF_DITHER_TYPE_SP;
  7059. switch (intel_crtc->config->pipe_bpp) {
  7060. case 18:
  7061. pipeconf |= PIPECONF_6BPC;
  7062. break;
  7063. case 24:
  7064. pipeconf |= PIPECONF_8BPC;
  7065. break;
  7066. case 30:
  7067. pipeconf |= PIPECONF_10BPC;
  7068. break;
  7069. default:
  7070. /* Case prevented by intel_choose_pipe_bpp_dither. */
  7071. BUG();
  7072. }
  7073. }
  7074. if (HAS_PIPE_CXSR(dev_priv)) {
  7075. if (intel_crtc->lowfreq_avail) {
  7076. DRM_DEBUG_KMS("enabling CxSR downclocking\n");
  7077. pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
  7078. } else {
  7079. DRM_DEBUG_KMS("disabling CxSR downclocking\n");
  7080. }
  7081. }
  7082. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  7083. if (INTEL_GEN(dev_priv) < 4 ||
  7084. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  7085. pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  7086. else
  7087. pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
  7088. } else
  7089. pipeconf |= PIPECONF_PROGRESSIVE;
  7090. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  7091. intel_crtc->config->limited_color_range)
  7092. pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
  7093. I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
  7094. POSTING_READ(PIPECONF(intel_crtc->pipe));
  7095. }
  7096. static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  7097. struct intel_crtc_state *crtc_state)
  7098. {
  7099. struct drm_device *dev = crtc->base.dev;
  7100. struct drm_i915_private *dev_priv = to_i915(dev);
  7101. const struct intel_limit *limit;
  7102. int refclk = 48000;
  7103. memset(&crtc_state->dpll_hw_state, 0,
  7104. sizeof(crtc_state->dpll_hw_state));
  7105. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7106. if (intel_panel_use_ssc(dev_priv)) {
  7107. refclk = dev_priv->vbt.lvds_ssc_freq;
  7108. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7109. }
  7110. limit = &intel_limits_i8xx_lvds;
  7111. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
  7112. limit = &intel_limits_i8xx_dvo;
  7113. } else {
  7114. limit = &intel_limits_i8xx_dac;
  7115. }
  7116. if (!crtc_state->clock_set &&
  7117. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7118. refclk, NULL, &crtc_state->dpll)) {
  7119. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7120. return -EINVAL;
  7121. }
  7122. i8xx_compute_dpll(crtc, crtc_state, NULL);
  7123. return 0;
  7124. }
  7125. static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
  7126. struct intel_crtc_state *crtc_state)
  7127. {
  7128. struct drm_device *dev = crtc->base.dev;
  7129. struct drm_i915_private *dev_priv = to_i915(dev);
  7130. const struct intel_limit *limit;
  7131. int refclk = 96000;
  7132. memset(&crtc_state->dpll_hw_state, 0,
  7133. sizeof(crtc_state->dpll_hw_state));
  7134. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7135. if (intel_panel_use_ssc(dev_priv)) {
  7136. refclk = dev_priv->vbt.lvds_ssc_freq;
  7137. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7138. }
  7139. if (intel_is_dual_link_lvds(dev))
  7140. limit = &intel_limits_g4x_dual_channel_lvds;
  7141. else
  7142. limit = &intel_limits_g4x_single_channel_lvds;
  7143. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
  7144. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
  7145. limit = &intel_limits_g4x_hdmi;
  7146. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
  7147. limit = &intel_limits_g4x_sdvo;
  7148. } else {
  7149. /* The option is for other outputs */
  7150. limit = &intel_limits_i9xx_sdvo;
  7151. }
  7152. if (!crtc_state->clock_set &&
  7153. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7154. refclk, NULL, &crtc_state->dpll)) {
  7155. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7156. return -EINVAL;
  7157. }
  7158. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7159. return 0;
  7160. }
  7161. static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
  7162. struct intel_crtc_state *crtc_state)
  7163. {
  7164. struct drm_device *dev = crtc->base.dev;
  7165. struct drm_i915_private *dev_priv = to_i915(dev);
  7166. const struct intel_limit *limit;
  7167. int refclk = 96000;
  7168. memset(&crtc_state->dpll_hw_state, 0,
  7169. sizeof(crtc_state->dpll_hw_state));
  7170. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7171. if (intel_panel_use_ssc(dev_priv)) {
  7172. refclk = dev_priv->vbt.lvds_ssc_freq;
  7173. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7174. }
  7175. limit = &intel_limits_pineview_lvds;
  7176. } else {
  7177. limit = &intel_limits_pineview_sdvo;
  7178. }
  7179. if (!crtc_state->clock_set &&
  7180. !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7181. refclk, NULL, &crtc_state->dpll)) {
  7182. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7183. return -EINVAL;
  7184. }
  7185. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7186. return 0;
  7187. }
  7188. static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
  7189. struct intel_crtc_state *crtc_state)
  7190. {
  7191. struct drm_device *dev = crtc->base.dev;
  7192. struct drm_i915_private *dev_priv = to_i915(dev);
  7193. const struct intel_limit *limit;
  7194. int refclk = 96000;
  7195. memset(&crtc_state->dpll_hw_state, 0,
  7196. sizeof(crtc_state->dpll_hw_state));
  7197. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7198. if (intel_panel_use_ssc(dev_priv)) {
  7199. refclk = dev_priv->vbt.lvds_ssc_freq;
  7200. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7201. }
  7202. limit = &intel_limits_i9xx_lvds;
  7203. } else {
  7204. limit = &intel_limits_i9xx_sdvo;
  7205. }
  7206. if (!crtc_state->clock_set &&
  7207. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7208. refclk, NULL, &crtc_state->dpll)) {
  7209. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7210. return -EINVAL;
  7211. }
  7212. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7213. return 0;
  7214. }
  7215. static int chv_crtc_compute_clock(struct intel_crtc *crtc,
  7216. struct intel_crtc_state *crtc_state)
  7217. {
  7218. int refclk = 100000;
  7219. const struct intel_limit *limit = &intel_limits_chv;
  7220. memset(&crtc_state->dpll_hw_state, 0,
  7221. sizeof(crtc_state->dpll_hw_state));
  7222. if (!crtc_state->clock_set &&
  7223. !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7224. refclk, NULL, &crtc_state->dpll)) {
  7225. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7226. return -EINVAL;
  7227. }
  7228. chv_compute_dpll(crtc, crtc_state);
  7229. return 0;
  7230. }
  7231. static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
  7232. struct intel_crtc_state *crtc_state)
  7233. {
  7234. int refclk = 100000;
  7235. const struct intel_limit *limit = &intel_limits_vlv;
  7236. memset(&crtc_state->dpll_hw_state, 0,
  7237. sizeof(crtc_state->dpll_hw_state));
  7238. if (!crtc_state->clock_set &&
  7239. !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7240. refclk, NULL, &crtc_state->dpll)) {
  7241. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7242. return -EINVAL;
  7243. }
  7244. vlv_compute_dpll(crtc, crtc_state);
  7245. return 0;
  7246. }
  7247. static void i9xx_get_pfit_config(struct intel_crtc *crtc,
  7248. struct intel_crtc_state *pipe_config)
  7249. {
  7250. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7251. uint32_t tmp;
  7252. if (INTEL_GEN(dev_priv) <= 3 &&
  7253. (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
  7254. return;
  7255. tmp = I915_READ(PFIT_CONTROL);
  7256. if (!(tmp & PFIT_ENABLE))
  7257. return;
  7258. /* Check whether the pfit is attached to our pipe. */
  7259. if (INTEL_GEN(dev_priv) < 4) {
  7260. if (crtc->pipe != PIPE_B)
  7261. return;
  7262. } else {
  7263. if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
  7264. return;
  7265. }
  7266. pipe_config->gmch_pfit.control = tmp;
  7267. pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
  7268. }
  7269. static void vlv_crtc_clock_get(struct intel_crtc *crtc,
  7270. struct intel_crtc_state *pipe_config)
  7271. {
  7272. struct drm_device *dev = crtc->base.dev;
  7273. struct drm_i915_private *dev_priv = to_i915(dev);
  7274. int pipe = pipe_config->cpu_transcoder;
  7275. struct dpll clock;
  7276. u32 mdiv;
  7277. int refclk = 100000;
  7278. /* In case of DSI, DPLL will not be used */
  7279. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  7280. return;
  7281. mutex_lock(&dev_priv->sb_lock);
  7282. mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
  7283. mutex_unlock(&dev_priv->sb_lock);
  7284. clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
  7285. clock.m2 = mdiv & DPIO_M2DIV_MASK;
  7286. clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
  7287. clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
  7288. clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
  7289. pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
  7290. }
  7291. static void
  7292. i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  7293. struct intel_initial_plane_config *plane_config)
  7294. {
  7295. struct drm_device *dev = crtc->base.dev;
  7296. struct drm_i915_private *dev_priv = to_i915(dev);
  7297. u32 val, base, offset;
  7298. int pipe = crtc->pipe, plane = crtc->plane;
  7299. int fourcc, pixel_format;
  7300. unsigned int aligned_height;
  7301. struct drm_framebuffer *fb;
  7302. struct intel_framebuffer *intel_fb;
  7303. val = I915_READ(DSPCNTR(plane));
  7304. if (!(val & DISPLAY_PLANE_ENABLE))
  7305. return;
  7306. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7307. if (!intel_fb) {
  7308. DRM_DEBUG_KMS("failed to alloc fb\n");
  7309. return;
  7310. }
  7311. fb = &intel_fb->base;
  7312. if (INTEL_GEN(dev_priv) >= 4) {
  7313. if (val & DISPPLANE_TILED) {
  7314. plane_config->tiling = I915_TILING_X;
  7315. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  7316. }
  7317. }
  7318. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  7319. fourcc = i9xx_format_to_fourcc(pixel_format);
  7320. fb->pixel_format = fourcc;
  7321. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  7322. if (INTEL_GEN(dev_priv) >= 4) {
  7323. if (plane_config->tiling)
  7324. offset = I915_READ(DSPTILEOFF(plane));
  7325. else
  7326. offset = I915_READ(DSPLINOFF(plane));
  7327. base = I915_READ(DSPSURF(plane)) & 0xfffff000;
  7328. } else {
  7329. base = I915_READ(DSPADDR(plane));
  7330. }
  7331. plane_config->base = base;
  7332. val = I915_READ(PIPESRC(pipe));
  7333. fb->width = ((val >> 16) & 0xfff) + 1;
  7334. fb->height = ((val >> 0) & 0xfff) + 1;
  7335. val = I915_READ(DSPSTRIDE(pipe));
  7336. fb->pitches[0] = val & 0xffffffc0;
  7337. aligned_height = intel_fb_align_height(dev, fb->height,
  7338. fb->pixel_format,
  7339. fb->modifier[0]);
  7340. plane_config->size = fb->pitches[0] * aligned_height;
  7341. DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  7342. pipe_name(pipe), plane, fb->width, fb->height,
  7343. fb->bits_per_pixel, base, fb->pitches[0],
  7344. plane_config->size);
  7345. plane_config->fb = intel_fb;
  7346. }
  7347. static void chv_crtc_clock_get(struct intel_crtc *crtc,
  7348. struct intel_crtc_state *pipe_config)
  7349. {
  7350. struct drm_device *dev = crtc->base.dev;
  7351. struct drm_i915_private *dev_priv = to_i915(dev);
  7352. int pipe = pipe_config->cpu_transcoder;
  7353. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  7354. struct dpll clock;
  7355. u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
  7356. int refclk = 100000;
  7357. /* In case of DSI, DPLL will not be used */
  7358. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  7359. return;
  7360. mutex_lock(&dev_priv->sb_lock);
  7361. cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
  7362. pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
  7363. pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
  7364. pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
  7365. pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  7366. mutex_unlock(&dev_priv->sb_lock);
  7367. clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
  7368. clock.m2 = (pll_dw0 & 0xff) << 22;
  7369. if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
  7370. clock.m2 |= pll_dw2 & 0x3fffff;
  7371. clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
  7372. clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
  7373. clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
  7374. pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
  7375. }
  7376. static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  7377. struct intel_crtc_state *pipe_config)
  7378. {
  7379. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7380. enum intel_display_power_domain power_domain;
  7381. uint32_t tmp;
  7382. bool ret;
  7383. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  7384. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7385. return false;
  7386. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  7387. pipe_config->shared_dpll = NULL;
  7388. ret = false;
  7389. tmp = I915_READ(PIPECONF(crtc->pipe));
  7390. if (!(tmp & PIPECONF_ENABLE))
  7391. goto out;
  7392. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  7393. IS_CHERRYVIEW(dev_priv)) {
  7394. switch (tmp & PIPECONF_BPC_MASK) {
  7395. case PIPECONF_6BPC:
  7396. pipe_config->pipe_bpp = 18;
  7397. break;
  7398. case PIPECONF_8BPC:
  7399. pipe_config->pipe_bpp = 24;
  7400. break;
  7401. case PIPECONF_10BPC:
  7402. pipe_config->pipe_bpp = 30;
  7403. break;
  7404. default:
  7405. break;
  7406. }
  7407. }
  7408. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  7409. (tmp & PIPECONF_COLOR_RANGE_SELECT))
  7410. pipe_config->limited_color_range = true;
  7411. if (INTEL_GEN(dev_priv) < 4)
  7412. pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
  7413. intel_get_pipe_timings(crtc, pipe_config);
  7414. intel_get_pipe_src_size(crtc, pipe_config);
  7415. i9xx_get_pfit_config(crtc, pipe_config);
  7416. if (INTEL_GEN(dev_priv) >= 4) {
  7417. /* No way to read it out on pipes B and C */
  7418. if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
  7419. tmp = dev_priv->chv_dpll_md[crtc->pipe];
  7420. else
  7421. tmp = I915_READ(DPLL_MD(crtc->pipe));
  7422. pipe_config->pixel_multiplier =
  7423. ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
  7424. >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
  7425. pipe_config->dpll_hw_state.dpll_md = tmp;
  7426. } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
  7427. IS_G33(dev_priv)) {
  7428. tmp = I915_READ(DPLL(crtc->pipe));
  7429. pipe_config->pixel_multiplier =
  7430. ((tmp & SDVO_MULTIPLIER_MASK)
  7431. >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
  7432. } else {
  7433. /* Note that on i915G/GM the pixel multiplier is in the sdvo
  7434. * port and will be fixed up in the encoder->get_config
  7435. * function. */
  7436. pipe_config->pixel_multiplier = 1;
  7437. }
  7438. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
  7439. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  7440. /*
  7441. * DPLL_DVO_2X_MODE must be enabled for both DPLLs
  7442. * on 830. Filter it out here so that we don't
  7443. * report errors due to that.
  7444. */
  7445. if (IS_I830(dev_priv))
  7446. pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
  7447. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
  7448. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
  7449. } else {
  7450. /* Mask out read-only status bits. */
  7451. pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
  7452. DPLL_PORTC_READY_MASK |
  7453. DPLL_PORTB_READY_MASK);
  7454. }
  7455. if (IS_CHERRYVIEW(dev_priv))
  7456. chv_crtc_clock_get(crtc, pipe_config);
  7457. else if (IS_VALLEYVIEW(dev_priv))
  7458. vlv_crtc_clock_get(crtc, pipe_config);
  7459. else
  7460. i9xx_crtc_clock_get(crtc, pipe_config);
  7461. /*
  7462. * Normally the dotclock is filled in by the encoder .get_config()
  7463. * but in case the pipe is enabled w/o any ports we need a sane
  7464. * default.
  7465. */
  7466. pipe_config->base.adjusted_mode.crtc_clock =
  7467. pipe_config->port_clock / pipe_config->pixel_multiplier;
  7468. ret = true;
  7469. out:
  7470. intel_display_power_put(dev_priv, power_domain);
  7471. return ret;
  7472. }
  7473. static void ironlake_init_pch_refclk(struct drm_device *dev)
  7474. {
  7475. struct drm_i915_private *dev_priv = to_i915(dev);
  7476. struct intel_encoder *encoder;
  7477. int i;
  7478. u32 val, final;
  7479. bool has_lvds = false;
  7480. bool has_cpu_edp = false;
  7481. bool has_panel = false;
  7482. bool has_ck505 = false;
  7483. bool can_ssc = false;
  7484. bool using_ssc_source = false;
  7485. /* We need to take the global config into account */
  7486. for_each_intel_encoder(dev, encoder) {
  7487. switch (encoder->type) {
  7488. case INTEL_OUTPUT_LVDS:
  7489. has_panel = true;
  7490. has_lvds = true;
  7491. break;
  7492. case INTEL_OUTPUT_EDP:
  7493. has_panel = true;
  7494. if (enc_to_dig_port(&encoder->base)->port == PORT_A)
  7495. has_cpu_edp = true;
  7496. break;
  7497. default:
  7498. break;
  7499. }
  7500. }
  7501. if (HAS_PCH_IBX(dev_priv)) {
  7502. has_ck505 = dev_priv->vbt.display_clock_mode;
  7503. can_ssc = has_ck505;
  7504. } else {
  7505. has_ck505 = false;
  7506. can_ssc = true;
  7507. }
  7508. /* Check if any DPLLs are using the SSC source */
  7509. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  7510. u32 temp = I915_READ(PCH_DPLL(i));
  7511. if (!(temp & DPLL_VCO_ENABLE))
  7512. continue;
  7513. if ((temp & PLL_REF_INPUT_MASK) ==
  7514. PLLB_REF_INPUT_SPREADSPECTRUMIN) {
  7515. using_ssc_source = true;
  7516. break;
  7517. }
  7518. }
  7519. DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
  7520. has_panel, has_lvds, has_ck505, using_ssc_source);
  7521. /* Ironlake: try to setup display ref clock before DPLL
  7522. * enabling. This is only under driver's control after
  7523. * PCH B stepping, previous chipset stepping should be
  7524. * ignoring this setting.
  7525. */
  7526. val = I915_READ(PCH_DREF_CONTROL);
  7527. /* As we must carefully and slowly disable/enable each source in turn,
  7528. * compute the final state we want first and check if we need to
  7529. * make any changes at all.
  7530. */
  7531. final = val;
  7532. final &= ~DREF_NONSPREAD_SOURCE_MASK;
  7533. if (has_ck505)
  7534. final |= DREF_NONSPREAD_CK505_ENABLE;
  7535. else
  7536. final |= DREF_NONSPREAD_SOURCE_ENABLE;
  7537. final &= ~DREF_SSC_SOURCE_MASK;
  7538. final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7539. final &= ~DREF_SSC1_ENABLE;
  7540. if (has_panel) {
  7541. final |= DREF_SSC_SOURCE_ENABLE;
  7542. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7543. final |= DREF_SSC1_ENABLE;
  7544. if (has_cpu_edp) {
  7545. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7546. final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7547. else
  7548. final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7549. } else
  7550. final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7551. } else if (using_ssc_source) {
  7552. final |= DREF_SSC_SOURCE_ENABLE;
  7553. final |= DREF_SSC1_ENABLE;
  7554. }
  7555. if (final == val)
  7556. return;
  7557. /* Always enable nonspread source */
  7558. val &= ~DREF_NONSPREAD_SOURCE_MASK;
  7559. if (has_ck505)
  7560. val |= DREF_NONSPREAD_CK505_ENABLE;
  7561. else
  7562. val |= DREF_NONSPREAD_SOURCE_ENABLE;
  7563. if (has_panel) {
  7564. val &= ~DREF_SSC_SOURCE_MASK;
  7565. val |= DREF_SSC_SOURCE_ENABLE;
  7566. /* SSC must be turned on before enabling the CPU output */
  7567. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7568. DRM_DEBUG_KMS("Using SSC on panel\n");
  7569. val |= DREF_SSC1_ENABLE;
  7570. } else
  7571. val &= ~DREF_SSC1_ENABLE;
  7572. /* Get SSC going before enabling the outputs */
  7573. I915_WRITE(PCH_DREF_CONTROL, val);
  7574. POSTING_READ(PCH_DREF_CONTROL);
  7575. udelay(200);
  7576. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7577. /* Enable CPU source on CPU attached eDP */
  7578. if (has_cpu_edp) {
  7579. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7580. DRM_DEBUG_KMS("Using SSC on eDP\n");
  7581. val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7582. } else
  7583. val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7584. } else
  7585. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7586. I915_WRITE(PCH_DREF_CONTROL, val);
  7587. POSTING_READ(PCH_DREF_CONTROL);
  7588. udelay(200);
  7589. } else {
  7590. DRM_DEBUG_KMS("Disabling CPU source output\n");
  7591. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7592. /* Turn off CPU output */
  7593. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7594. I915_WRITE(PCH_DREF_CONTROL, val);
  7595. POSTING_READ(PCH_DREF_CONTROL);
  7596. udelay(200);
  7597. if (!using_ssc_source) {
  7598. DRM_DEBUG_KMS("Disabling SSC source\n");
  7599. /* Turn off the SSC source */
  7600. val &= ~DREF_SSC_SOURCE_MASK;
  7601. val |= DREF_SSC_SOURCE_DISABLE;
  7602. /* Turn off SSC1 */
  7603. val &= ~DREF_SSC1_ENABLE;
  7604. I915_WRITE(PCH_DREF_CONTROL, val);
  7605. POSTING_READ(PCH_DREF_CONTROL);
  7606. udelay(200);
  7607. }
  7608. }
  7609. BUG_ON(val != final);
  7610. }
  7611. static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
  7612. {
  7613. uint32_t tmp;
  7614. tmp = I915_READ(SOUTH_CHICKEN2);
  7615. tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
  7616. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7617. if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
  7618. FDI_MPHY_IOSFSB_RESET_STATUS, 100))
  7619. DRM_ERROR("FDI mPHY reset assert timeout\n");
  7620. tmp = I915_READ(SOUTH_CHICKEN2);
  7621. tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
  7622. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7623. if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
  7624. FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
  7625. DRM_ERROR("FDI mPHY reset de-assert timeout\n");
  7626. }
  7627. /* WaMPhyProgramming:hsw */
  7628. static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
  7629. {
  7630. uint32_t tmp;
  7631. tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
  7632. tmp &= ~(0xFF << 24);
  7633. tmp |= (0x12 << 24);
  7634. intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
  7635. tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
  7636. tmp |= (1 << 11);
  7637. intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
  7638. tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
  7639. tmp |= (1 << 11);
  7640. intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
  7641. tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
  7642. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7643. intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
  7644. tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
  7645. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7646. intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
  7647. tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
  7648. tmp &= ~(7 << 13);
  7649. tmp |= (5 << 13);
  7650. intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
  7651. tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
  7652. tmp &= ~(7 << 13);
  7653. tmp |= (5 << 13);
  7654. intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
  7655. tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
  7656. tmp &= ~0xFF;
  7657. tmp |= 0x1C;
  7658. intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
  7659. tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
  7660. tmp &= ~0xFF;
  7661. tmp |= 0x1C;
  7662. intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
  7663. tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
  7664. tmp &= ~(0xFF << 16);
  7665. tmp |= (0x1C << 16);
  7666. intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
  7667. tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
  7668. tmp &= ~(0xFF << 16);
  7669. tmp |= (0x1C << 16);
  7670. intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
  7671. tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
  7672. tmp |= (1 << 27);
  7673. intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
  7674. tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
  7675. tmp |= (1 << 27);
  7676. intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
  7677. tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
  7678. tmp &= ~(0xF << 28);
  7679. tmp |= (4 << 28);
  7680. intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
  7681. tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
  7682. tmp &= ~(0xF << 28);
  7683. tmp |= (4 << 28);
  7684. intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
  7685. }
  7686. /* Implements 3 different sequences from BSpec chapter "Display iCLK
  7687. * Programming" based on the parameters passed:
  7688. * - Sequence to enable CLKOUT_DP
  7689. * - Sequence to enable CLKOUT_DP without spread
  7690. * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
  7691. */
  7692. static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
  7693. bool with_fdi)
  7694. {
  7695. struct drm_i915_private *dev_priv = to_i915(dev);
  7696. uint32_t reg, tmp;
  7697. if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
  7698. with_spread = true;
  7699. if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
  7700. with_fdi, "LP PCH doesn't have FDI\n"))
  7701. with_fdi = false;
  7702. mutex_lock(&dev_priv->sb_lock);
  7703. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7704. tmp &= ~SBI_SSCCTL_DISABLE;
  7705. tmp |= SBI_SSCCTL_PATHALT;
  7706. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7707. udelay(24);
  7708. if (with_spread) {
  7709. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7710. tmp &= ~SBI_SSCCTL_PATHALT;
  7711. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7712. if (with_fdi) {
  7713. lpt_reset_fdi_mphy(dev_priv);
  7714. lpt_program_fdi_mphy(dev_priv);
  7715. }
  7716. }
  7717. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  7718. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7719. tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7720. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7721. mutex_unlock(&dev_priv->sb_lock);
  7722. }
  7723. /* Sequence to disable CLKOUT_DP */
  7724. static void lpt_disable_clkout_dp(struct drm_device *dev)
  7725. {
  7726. struct drm_i915_private *dev_priv = to_i915(dev);
  7727. uint32_t reg, tmp;
  7728. mutex_lock(&dev_priv->sb_lock);
  7729. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  7730. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7731. tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7732. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7733. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7734. if (!(tmp & SBI_SSCCTL_DISABLE)) {
  7735. if (!(tmp & SBI_SSCCTL_PATHALT)) {
  7736. tmp |= SBI_SSCCTL_PATHALT;
  7737. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7738. udelay(32);
  7739. }
  7740. tmp |= SBI_SSCCTL_DISABLE;
  7741. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7742. }
  7743. mutex_unlock(&dev_priv->sb_lock);
  7744. }
  7745. #define BEND_IDX(steps) ((50 + (steps)) / 5)
  7746. static const uint16_t sscdivintphase[] = {
  7747. [BEND_IDX( 50)] = 0x3B23,
  7748. [BEND_IDX( 45)] = 0x3B23,
  7749. [BEND_IDX( 40)] = 0x3C23,
  7750. [BEND_IDX( 35)] = 0x3C23,
  7751. [BEND_IDX( 30)] = 0x3D23,
  7752. [BEND_IDX( 25)] = 0x3D23,
  7753. [BEND_IDX( 20)] = 0x3E23,
  7754. [BEND_IDX( 15)] = 0x3E23,
  7755. [BEND_IDX( 10)] = 0x3F23,
  7756. [BEND_IDX( 5)] = 0x3F23,
  7757. [BEND_IDX( 0)] = 0x0025,
  7758. [BEND_IDX( -5)] = 0x0025,
  7759. [BEND_IDX(-10)] = 0x0125,
  7760. [BEND_IDX(-15)] = 0x0125,
  7761. [BEND_IDX(-20)] = 0x0225,
  7762. [BEND_IDX(-25)] = 0x0225,
  7763. [BEND_IDX(-30)] = 0x0325,
  7764. [BEND_IDX(-35)] = 0x0325,
  7765. [BEND_IDX(-40)] = 0x0425,
  7766. [BEND_IDX(-45)] = 0x0425,
  7767. [BEND_IDX(-50)] = 0x0525,
  7768. };
  7769. /*
  7770. * Bend CLKOUT_DP
  7771. * steps -50 to 50 inclusive, in steps of 5
  7772. * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
  7773. * change in clock period = -(steps / 10) * 5.787 ps
  7774. */
  7775. static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
  7776. {
  7777. uint32_t tmp;
  7778. int idx = BEND_IDX(steps);
  7779. if (WARN_ON(steps % 5 != 0))
  7780. return;
  7781. if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
  7782. return;
  7783. mutex_lock(&dev_priv->sb_lock);
  7784. if (steps % 10 != 0)
  7785. tmp = 0xAAAAAAAB;
  7786. else
  7787. tmp = 0x00000000;
  7788. intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
  7789. tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
  7790. tmp &= 0xffff0000;
  7791. tmp |= sscdivintphase[idx];
  7792. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
  7793. mutex_unlock(&dev_priv->sb_lock);
  7794. }
  7795. #undef BEND_IDX
  7796. static void lpt_init_pch_refclk(struct drm_device *dev)
  7797. {
  7798. struct intel_encoder *encoder;
  7799. bool has_vga = false;
  7800. for_each_intel_encoder(dev, encoder) {
  7801. switch (encoder->type) {
  7802. case INTEL_OUTPUT_ANALOG:
  7803. has_vga = true;
  7804. break;
  7805. default:
  7806. break;
  7807. }
  7808. }
  7809. if (has_vga) {
  7810. lpt_bend_clkout_dp(to_i915(dev), 0);
  7811. lpt_enable_clkout_dp(dev, true, true);
  7812. } else {
  7813. lpt_disable_clkout_dp(dev);
  7814. }
  7815. }
  7816. /*
  7817. * Initialize reference clocks when the driver loads
  7818. */
  7819. void intel_init_pch_refclk(struct drm_device *dev)
  7820. {
  7821. struct drm_i915_private *dev_priv = to_i915(dev);
  7822. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
  7823. ironlake_init_pch_refclk(dev);
  7824. else if (HAS_PCH_LPT(dev_priv))
  7825. lpt_init_pch_refclk(dev);
  7826. }
  7827. static void ironlake_set_pipeconf(struct drm_crtc *crtc)
  7828. {
  7829. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7830. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7831. int pipe = intel_crtc->pipe;
  7832. uint32_t val;
  7833. val = 0;
  7834. switch (intel_crtc->config->pipe_bpp) {
  7835. case 18:
  7836. val |= PIPECONF_6BPC;
  7837. break;
  7838. case 24:
  7839. val |= PIPECONF_8BPC;
  7840. break;
  7841. case 30:
  7842. val |= PIPECONF_10BPC;
  7843. break;
  7844. case 36:
  7845. val |= PIPECONF_12BPC;
  7846. break;
  7847. default:
  7848. /* Case prevented by intel_choose_pipe_bpp_dither. */
  7849. BUG();
  7850. }
  7851. if (intel_crtc->config->dither)
  7852. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7853. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7854. val |= PIPECONF_INTERLACED_ILK;
  7855. else
  7856. val |= PIPECONF_PROGRESSIVE;
  7857. if (intel_crtc->config->limited_color_range)
  7858. val |= PIPECONF_COLOR_RANGE_SELECT;
  7859. I915_WRITE(PIPECONF(pipe), val);
  7860. POSTING_READ(PIPECONF(pipe));
  7861. }
  7862. static void haswell_set_pipeconf(struct drm_crtc *crtc)
  7863. {
  7864. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7865. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7866. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  7867. u32 val = 0;
  7868. if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
  7869. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7870. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7871. val |= PIPECONF_INTERLACED_ILK;
  7872. else
  7873. val |= PIPECONF_PROGRESSIVE;
  7874. I915_WRITE(PIPECONF(cpu_transcoder), val);
  7875. POSTING_READ(PIPECONF(cpu_transcoder));
  7876. }
  7877. static void haswell_set_pipemisc(struct drm_crtc *crtc)
  7878. {
  7879. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7880. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7881. if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
  7882. u32 val = 0;
  7883. switch (intel_crtc->config->pipe_bpp) {
  7884. case 18:
  7885. val |= PIPEMISC_DITHER_6_BPC;
  7886. break;
  7887. case 24:
  7888. val |= PIPEMISC_DITHER_8_BPC;
  7889. break;
  7890. case 30:
  7891. val |= PIPEMISC_DITHER_10_BPC;
  7892. break;
  7893. case 36:
  7894. val |= PIPEMISC_DITHER_12_BPC;
  7895. break;
  7896. default:
  7897. /* Case prevented by pipe_config_set_bpp. */
  7898. BUG();
  7899. }
  7900. if (intel_crtc->config->dither)
  7901. val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
  7902. I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
  7903. }
  7904. }
  7905. int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
  7906. {
  7907. /*
  7908. * Account for spread spectrum to avoid
  7909. * oversubscribing the link. Max center spread
  7910. * is 2.5%; use 5% for safety's sake.
  7911. */
  7912. u32 bps = target_clock * bpp * 21 / 20;
  7913. return DIV_ROUND_UP(bps, link_bw * 8);
  7914. }
  7915. static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
  7916. {
  7917. return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
  7918. }
  7919. static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
  7920. struct intel_crtc_state *crtc_state,
  7921. struct dpll *reduced_clock)
  7922. {
  7923. struct drm_crtc *crtc = &intel_crtc->base;
  7924. struct drm_device *dev = crtc->dev;
  7925. struct drm_i915_private *dev_priv = to_i915(dev);
  7926. u32 dpll, fp, fp2;
  7927. int factor;
  7928. /* Enable autotuning of the PLL clock (if permissible) */
  7929. factor = 21;
  7930. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7931. if ((intel_panel_use_ssc(dev_priv) &&
  7932. dev_priv->vbt.lvds_ssc_freq == 100000) ||
  7933. (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
  7934. factor = 25;
  7935. } else if (crtc_state->sdvo_tv_clock)
  7936. factor = 20;
  7937. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  7938. if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
  7939. fp |= FP_CB_TUNE;
  7940. if (reduced_clock) {
  7941. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  7942. if (reduced_clock->m < factor * reduced_clock->n)
  7943. fp2 |= FP_CB_TUNE;
  7944. } else {
  7945. fp2 = fp;
  7946. }
  7947. dpll = 0;
  7948. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  7949. dpll |= DPLLB_MODE_LVDS;
  7950. else
  7951. dpll |= DPLLB_MODE_DAC_SERIAL;
  7952. dpll |= (crtc_state->pixel_multiplier - 1)
  7953. << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
  7954. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  7955. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  7956. dpll |= DPLL_SDVO_HIGH_SPEED;
  7957. if (intel_crtc_has_dp_encoder(crtc_state))
  7958. dpll |= DPLL_SDVO_HIGH_SPEED;
  7959. /*
  7960. * The high speed IO clock is only really required for
  7961. * SDVO/HDMI/DP, but we also enable it for CRT to make it
  7962. * possible to share the DPLL between CRT and HDMI. Enabling
  7963. * the clock needlessly does no real harm, except use up a
  7964. * bit of power potentially.
  7965. *
  7966. * We'll limit this to IVB with 3 pipes, since it has only two
  7967. * DPLLs and so DPLL sharing is the only way to get three pipes
  7968. * driving PCH ports at the same time. On SNB we could do this,
  7969. * and potentially avoid enabling the second DPLL, but it's not
  7970. * clear if it''s a win or loss power wise. No point in doing
  7971. * this on ILK at all since it has a fixed DPLL<->pipe mapping.
  7972. */
  7973. if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
  7974. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
  7975. dpll |= DPLL_SDVO_HIGH_SPEED;
  7976. /* compute bitmask from p1 value */
  7977. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  7978. /* also FPA1 */
  7979. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  7980. switch (crtc_state->dpll.p2) {
  7981. case 5:
  7982. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  7983. break;
  7984. case 7:
  7985. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  7986. break;
  7987. case 10:
  7988. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  7989. break;
  7990. case 14:
  7991. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  7992. break;
  7993. }
  7994. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  7995. intel_panel_use_ssc(dev_priv))
  7996. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  7997. else
  7998. dpll |= PLL_REF_INPUT_DREFCLK;
  7999. dpll |= DPLL_VCO_ENABLE;
  8000. crtc_state->dpll_hw_state.dpll = dpll;
  8001. crtc_state->dpll_hw_state.fp0 = fp;
  8002. crtc_state->dpll_hw_state.fp1 = fp2;
  8003. }
  8004. static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
  8005. struct intel_crtc_state *crtc_state)
  8006. {
  8007. struct drm_device *dev = crtc->base.dev;
  8008. struct drm_i915_private *dev_priv = to_i915(dev);
  8009. struct dpll reduced_clock;
  8010. bool has_reduced_clock = false;
  8011. struct intel_shared_dpll *pll;
  8012. const struct intel_limit *limit;
  8013. int refclk = 120000;
  8014. memset(&crtc_state->dpll_hw_state, 0,
  8015. sizeof(crtc_state->dpll_hw_state));
  8016. crtc->lowfreq_avail = false;
  8017. /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
  8018. if (!crtc_state->has_pch_encoder)
  8019. return 0;
  8020. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  8021. if (intel_panel_use_ssc(dev_priv)) {
  8022. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
  8023. dev_priv->vbt.lvds_ssc_freq);
  8024. refclk = dev_priv->vbt.lvds_ssc_freq;
  8025. }
  8026. if (intel_is_dual_link_lvds(dev)) {
  8027. if (refclk == 100000)
  8028. limit = &intel_limits_ironlake_dual_lvds_100m;
  8029. else
  8030. limit = &intel_limits_ironlake_dual_lvds;
  8031. } else {
  8032. if (refclk == 100000)
  8033. limit = &intel_limits_ironlake_single_lvds_100m;
  8034. else
  8035. limit = &intel_limits_ironlake_single_lvds;
  8036. }
  8037. } else {
  8038. limit = &intel_limits_ironlake_dac;
  8039. }
  8040. if (!crtc_state->clock_set &&
  8041. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  8042. refclk, NULL, &crtc_state->dpll)) {
  8043. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  8044. return -EINVAL;
  8045. }
  8046. ironlake_compute_dpll(crtc, crtc_state,
  8047. has_reduced_clock ? &reduced_clock : NULL);
  8048. pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
  8049. if (pll == NULL) {
  8050. DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
  8051. pipe_name(crtc->pipe));
  8052. return -EINVAL;
  8053. }
  8054. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  8055. has_reduced_clock)
  8056. crtc->lowfreq_avail = true;
  8057. return 0;
  8058. }
  8059. static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
  8060. struct intel_link_m_n *m_n)
  8061. {
  8062. struct drm_device *dev = crtc->base.dev;
  8063. struct drm_i915_private *dev_priv = to_i915(dev);
  8064. enum pipe pipe = crtc->pipe;
  8065. m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
  8066. m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
  8067. m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
  8068. & ~TU_SIZE_MASK;
  8069. m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
  8070. m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
  8071. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8072. }
  8073. static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
  8074. enum transcoder transcoder,
  8075. struct intel_link_m_n *m_n,
  8076. struct intel_link_m_n *m2_n2)
  8077. {
  8078. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8079. enum pipe pipe = crtc->pipe;
  8080. if (INTEL_GEN(dev_priv) >= 5) {
  8081. m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
  8082. m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
  8083. m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
  8084. & ~TU_SIZE_MASK;
  8085. m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
  8086. m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
  8087. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8088. /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
  8089. * gen < 8) and if DRRS is supported (to make sure the
  8090. * registers are not unnecessarily read).
  8091. */
  8092. if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
  8093. crtc->config->has_drrs) {
  8094. m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
  8095. m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
  8096. m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
  8097. & ~TU_SIZE_MASK;
  8098. m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
  8099. m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
  8100. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8101. }
  8102. } else {
  8103. m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
  8104. m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
  8105. m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
  8106. & ~TU_SIZE_MASK;
  8107. m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
  8108. m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
  8109. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8110. }
  8111. }
  8112. void intel_dp_get_m_n(struct intel_crtc *crtc,
  8113. struct intel_crtc_state *pipe_config)
  8114. {
  8115. if (pipe_config->has_pch_encoder)
  8116. intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
  8117. else
  8118. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  8119. &pipe_config->dp_m_n,
  8120. &pipe_config->dp_m2_n2);
  8121. }
  8122. static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
  8123. struct intel_crtc_state *pipe_config)
  8124. {
  8125. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  8126. &pipe_config->fdi_m_n, NULL);
  8127. }
  8128. static void skylake_get_pfit_config(struct intel_crtc *crtc,
  8129. struct intel_crtc_state *pipe_config)
  8130. {
  8131. struct drm_device *dev = crtc->base.dev;
  8132. struct drm_i915_private *dev_priv = to_i915(dev);
  8133. struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
  8134. uint32_t ps_ctrl = 0;
  8135. int id = -1;
  8136. int i;
  8137. /* find scaler attached to this pipe */
  8138. for (i = 0; i < crtc->num_scalers; i++) {
  8139. ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
  8140. if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
  8141. id = i;
  8142. pipe_config->pch_pfit.enabled = true;
  8143. pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
  8144. pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
  8145. break;
  8146. }
  8147. }
  8148. scaler_state->scaler_id = id;
  8149. if (id >= 0) {
  8150. scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
  8151. } else {
  8152. scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
  8153. }
  8154. }
  8155. static void
  8156. skylake_get_initial_plane_config(struct intel_crtc *crtc,
  8157. struct intel_initial_plane_config *plane_config)
  8158. {
  8159. struct drm_device *dev = crtc->base.dev;
  8160. struct drm_i915_private *dev_priv = to_i915(dev);
  8161. u32 val, base, offset, stride_mult, tiling;
  8162. int pipe = crtc->pipe;
  8163. int fourcc, pixel_format;
  8164. unsigned int aligned_height;
  8165. struct drm_framebuffer *fb;
  8166. struct intel_framebuffer *intel_fb;
  8167. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  8168. if (!intel_fb) {
  8169. DRM_DEBUG_KMS("failed to alloc fb\n");
  8170. return;
  8171. }
  8172. fb = &intel_fb->base;
  8173. val = I915_READ(PLANE_CTL(pipe, 0));
  8174. if (!(val & PLANE_CTL_ENABLE))
  8175. goto error;
  8176. pixel_format = val & PLANE_CTL_FORMAT_MASK;
  8177. fourcc = skl_format_to_fourcc(pixel_format,
  8178. val & PLANE_CTL_ORDER_RGBX,
  8179. val & PLANE_CTL_ALPHA_MASK);
  8180. fb->pixel_format = fourcc;
  8181. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  8182. tiling = val & PLANE_CTL_TILED_MASK;
  8183. switch (tiling) {
  8184. case PLANE_CTL_TILED_LINEAR:
  8185. fb->modifier[0] = DRM_FORMAT_MOD_NONE;
  8186. break;
  8187. case PLANE_CTL_TILED_X:
  8188. plane_config->tiling = I915_TILING_X;
  8189. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  8190. break;
  8191. case PLANE_CTL_TILED_Y:
  8192. fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
  8193. break;
  8194. case PLANE_CTL_TILED_YF:
  8195. fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
  8196. break;
  8197. default:
  8198. MISSING_CASE(tiling);
  8199. goto error;
  8200. }
  8201. base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
  8202. plane_config->base = base;
  8203. offset = I915_READ(PLANE_OFFSET(pipe, 0));
  8204. val = I915_READ(PLANE_SIZE(pipe, 0));
  8205. fb->height = ((val >> 16) & 0xfff) + 1;
  8206. fb->width = ((val >> 0) & 0x1fff) + 1;
  8207. val = I915_READ(PLANE_STRIDE(pipe, 0));
  8208. stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
  8209. fb->pixel_format);
  8210. fb->pitches[0] = (val & 0x3ff) * stride_mult;
  8211. aligned_height = intel_fb_align_height(dev, fb->height,
  8212. fb->pixel_format,
  8213. fb->modifier[0]);
  8214. plane_config->size = fb->pitches[0] * aligned_height;
  8215. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  8216. pipe_name(pipe), fb->width, fb->height,
  8217. fb->bits_per_pixel, base, fb->pitches[0],
  8218. plane_config->size);
  8219. plane_config->fb = intel_fb;
  8220. return;
  8221. error:
  8222. kfree(intel_fb);
  8223. }
  8224. static void ironlake_get_pfit_config(struct intel_crtc *crtc,
  8225. struct intel_crtc_state *pipe_config)
  8226. {
  8227. struct drm_device *dev = crtc->base.dev;
  8228. struct drm_i915_private *dev_priv = to_i915(dev);
  8229. uint32_t tmp;
  8230. tmp = I915_READ(PF_CTL(crtc->pipe));
  8231. if (tmp & PF_ENABLE) {
  8232. pipe_config->pch_pfit.enabled = true;
  8233. pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
  8234. pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
  8235. /* We currently do not free assignements of panel fitters on
  8236. * ivb/hsw (since we don't use the higher upscaling modes which
  8237. * differentiates them) so just WARN about this case for now. */
  8238. if (IS_GEN7(dev_priv)) {
  8239. WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
  8240. PF_PIPE_SEL_IVB(crtc->pipe));
  8241. }
  8242. }
  8243. }
  8244. static void
  8245. ironlake_get_initial_plane_config(struct intel_crtc *crtc,
  8246. struct intel_initial_plane_config *plane_config)
  8247. {
  8248. struct drm_device *dev = crtc->base.dev;
  8249. struct drm_i915_private *dev_priv = to_i915(dev);
  8250. u32 val, base, offset;
  8251. int pipe = crtc->pipe;
  8252. int fourcc, pixel_format;
  8253. unsigned int aligned_height;
  8254. struct drm_framebuffer *fb;
  8255. struct intel_framebuffer *intel_fb;
  8256. val = I915_READ(DSPCNTR(pipe));
  8257. if (!(val & DISPLAY_PLANE_ENABLE))
  8258. return;
  8259. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  8260. if (!intel_fb) {
  8261. DRM_DEBUG_KMS("failed to alloc fb\n");
  8262. return;
  8263. }
  8264. fb = &intel_fb->base;
  8265. if (INTEL_GEN(dev_priv) >= 4) {
  8266. if (val & DISPPLANE_TILED) {
  8267. plane_config->tiling = I915_TILING_X;
  8268. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  8269. }
  8270. }
  8271. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  8272. fourcc = i9xx_format_to_fourcc(pixel_format);
  8273. fb->pixel_format = fourcc;
  8274. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  8275. base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
  8276. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  8277. offset = I915_READ(DSPOFFSET(pipe));
  8278. } else {
  8279. if (plane_config->tiling)
  8280. offset = I915_READ(DSPTILEOFF(pipe));
  8281. else
  8282. offset = I915_READ(DSPLINOFF(pipe));
  8283. }
  8284. plane_config->base = base;
  8285. val = I915_READ(PIPESRC(pipe));
  8286. fb->width = ((val >> 16) & 0xfff) + 1;
  8287. fb->height = ((val >> 0) & 0xfff) + 1;
  8288. val = I915_READ(DSPSTRIDE(pipe));
  8289. fb->pitches[0] = val & 0xffffffc0;
  8290. aligned_height = intel_fb_align_height(dev, fb->height,
  8291. fb->pixel_format,
  8292. fb->modifier[0]);
  8293. plane_config->size = fb->pitches[0] * aligned_height;
  8294. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  8295. pipe_name(pipe), fb->width, fb->height,
  8296. fb->bits_per_pixel, base, fb->pitches[0],
  8297. plane_config->size);
  8298. plane_config->fb = intel_fb;
  8299. }
  8300. static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  8301. struct intel_crtc_state *pipe_config)
  8302. {
  8303. struct drm_device *dev = crtc->base.dev;
  8304. struct drm_i915_private *dev_priv = to_i915(dev);
  8305. enum intel_display_power_domain power_domain;
  8306. uint32_t tmp;
  8307. bool ret;
  8308. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  8309. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8310. return false;
  8311. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  8312. pipe_config->shared_dpll = NULL;
  8313. ret = false;
  8314. tmp = I915_READ(PIPECONF(crtc->pipe));
  8315. if (!(tmp & PIPECONF_ENABLE))
  8316. goto out;
  8317. switch (tmp & PIPECONF_BPC_MASK) {
  8318. case PIPECONF_6BPC:
  8319. pipe_config->pipe_bpp = 18;
  8320. break;
  8321. case PIPECONF_8BPC:
  8322. pipe_config->pipe_bpp = 24;
  8323. break;
  8324. case PIPECONF_10BPC:
  8325. pipe_config->pipe_bpp = 30;
  8326. break;
  8327. case PIPECONF_12BPC:
  8328. pipe_config->pipe_bpp = 36;
  8329. break;
  8330. default:
  8331. break;
  8332. }
  8333. if (tmp & PIPECONF_COLOR_RANGE_SELECT)
  8334. pipe_config->limited_color_range = true;
  8335. if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
  8336. struct intel_shared_dpll *pll;
  8337. enum intel_dpll_id pll_id;
  8338. pipe_config->has_pch_encoder = true;
  8339. tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
  8340. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  8341. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  8342. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  8343. if (HAS_PCH_IBX(dev_priv)) {
  8344. /*
  8345. * The pipe->pch transcoder and pch transcoder->pll
  8346. * mapping is fixed.
  8347. */
  8348. pll_id = (enum intel_dpll_id) crtc->pipe;
  8349. } else {
  8350. tmp = I915_READ(PCH_DPLL_SEL);
  8351. if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
  8352. pll_id = DPLL_ID_PCH_PLL_B;
  8353. else
  8354. pll_id= DPLL_ID_PCH_PLL_A;
  8355. }
  8356. pipe_config->shared_dpll =
  8357. intel_get_shared_dpll_by_id(dev_priv, pll_id);
  8358. pll = pipe_config->shared_dpll;
  8359. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  8360. &pipe_config->dpll_hw_state));
  8361. tmp = pipe_config->dpll_hw_state.dpll;
  8362. pipe_config->pixel_multiplier =
  8363. ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
  8364. >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
  8365. ironlake_pch_clock_get(crtc, pipe_config);
  8366. } else {
  8367. pipe_config->pixel_multiplier = 1;
  8368. }
  8369. intel_get_pipe_timings(crtc, pipe_config);
  8370. intel_get_pipe_src_size(crtc, pipe_config);
  8371. ironlake_get_pfit_config(crtc, pipe_config);
  8372. ret = true;
  8373. out:
  8374. intel_display_power_put(dev_priv, power_domain);
  8375. return ret;
  8376. }
  8377. static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  8378. {
  8379. struct drm_device *dev = &dev_priv->drm;
  8380. struct intel_crtc *crtc;
  8381. for_each_intel_crtc(dev, crtc)
  8382. I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
  8383. pipe_name(crtc->pipe));
  8384. I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
  8385. I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
  8386. I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
  8387. I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
  8388. I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
  8389. I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
  8390. "CPU PWM1 enabled\n");
  8391. if (IS_HASWELL(dev_priv))
  8392. I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
  8393. "CPU PWM2 enabled\n");
  8394. I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
  8395. "PCH PWM1 enabled\n");
  8396. I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  8397. "Utility pin enabled\n");
  8398. I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
  8399. /*
  8400. * In theory we can still leave IRQs enabled, as long as only the HPD
  8401. * interrupts remain enabled. We used to check for that, but since it's
  8402. * gen-specific and since we only disable LCPLL after we fully disable
  8403. * the interrupts, the check below should be enough.
  8404. */
  8405. I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
  8406. }
  8407. static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
  8408. {
  8409. if (IS_HASWELL(dev_priv))
  8410. return I915_READ(D_COMP_HSW);
  8411. else
  8412. return I915_READ(D_COMP_BDW);
  8413. }
  8414. static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
  8415. {
  8416. if (IS_HASWELL(dev_priv)) {
  8417. mutex_lock(&dev_priv->rps.hw_lock);
  8418. if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
  8419. val))
  8420. DRM_DEBUG_KMS("Failed to write to D_COMP\n");
  8421. mutex_unlock(&dev_priv->rps.hw_lock);
  8422. } else {
  8423. I915_WRITE(D_COMP_BDW, val);
  8424. POSTING_READ(D_COMP_BDW);
  8425. }
  8426. }
  8427. /*
  8428. * This function implements pieces of two sequences from BSpec:
  8429. * - Sequence for display software to disable LCPLL
  8430. * - Sequence for display software to allow package C8+
  8431. * The steps implemented here are just the steps that actually touch the LCPLL
  8432. * register. Callers should take care of disabling all the display engine
  8433. * functions, doing the mode unset, fixing interrupts, etc.
  8434. */
  8435. static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  8436. bool switch_to_fclk, bool allow_power_down)
  8437. {
  8438. uint32_t val;
  8439. assert_can_disable_lcpll(dev_priv);
  8440. val = I915_READ(LCPLL_CTL);
  8441. if (switch_to_fclk) {
  8442. val |= LCPLL_CD_SOURCE_FCLK;
  8443. I915_WRITE(LCPLL_CTL, val);
  8444. if (wait_for_us(I915_READ(LCPLL_CTL) &
  8445. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  8446. DRM_ERROR("Switching to FCLK failed\n");
  8447. val = I915_READ(LCPLL_CTL);
  8448. }
  8449. val |= LCPLL_PLL_DISABLE;
  8450. I915_WRITE(LCPLL_CTL, val);
  8451. POSTING_READ(LCPLL_CTL);
  8452. if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
  8453. DRM_ERROR("LCPLL still locked\n");
  8454. val = hsw_read_dcomp(dev_priv);
  8455. val |= D_COMP_COMP_DISABLE;
  8456. hsw_write_dcomp(dev_priv, val);
  8457. ndelay(100);
  8458. if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
  8459. 1))
  8460. DRM_ERROR("D_COMP RCOMP still in progress\n");
  8461. if (allow_power_down) {
  8462. val = I915_READ(LCPLL_CTL);
  8463. val |= LCPLL_POWER_DOWN_ALLOW;
  8464. I915_WRITE(LCPLL_CTL, val);
  8465. POSTING_READ(LCPLL_CTL);
  8466. }
  8467. }
  8468. /*
  8469. * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  8470. * source.
  8471. */
  8472. static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  8473. {
  8474. uint32_t val;
  8475. val = I915_READ(LCPLL_CTL);
  8476. if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
  8477. LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
  8478. return;
  8479. /*
  8480. * Make sure we're not on PC8 state before disabling PC8, otherwise
  8481. * we'll hang the machine. To prevent PC8 state, just enable force_wake.
  8482. */
  8483. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  8484. if (val & LCPLL_POWER_DOWN_ALLOW) {
  8485. val &= ~LCPLL_POWER_DOWN_ALLOW;
  8486. I915_WRITE(LCPLL_CTL, val);
  8487. POSTING_READ(LCPLL_CTL);
  8488. }
  8489. val = hsw_read_dcomp(dev_priv);
  8490. val |= D_COMP_COMP_FORCE;
  8491. val &= ~D_COMP_COMP_DISABLE;
  8492. hsw_write_dcomp(dev_priv, val);
  8493. val = I915_READ(LCPLL_CTL);
  8494. val &= ~LCPLL_PLL_DISABLE;
  8495. I915_WRITE(LCPLL_CTL, val);
  8496. if (intel_wait_for_register(dev_priv,
  8497. LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  8498. 5))
  8499. DRM_ERROR("LCPLL not locked yet\n");
  8500. if (val & LCPLL_CD_SOURCE_FCLK) {
  8501. val = I915_READ(LCPLL_CTL);
  8502. val &= ~LCPLL_CD_SOURCE_FCLK;
  8503. I915_WRITE(LCPLL_CTL, val);
  8504. if (wait_for_us((I915_READ(LCPLL_CTL) &
  8505. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  8506. DRM_ERROR("Switching back to LCPLL failed\n");
  8507. }
  8508. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  8509. intel_update_cdclk(dev_priv);
  8510. }
  8511. /*
  8512. * Package states C8 and deeper are really deep PC states that can only be
  8513. * reached when all the devices on the system allow it, so even if the graphics
  8514. * device allows PC8+, it doesn't mean the system will actually get to these
  8515. * states. Our driver only allows PC8+ when going into runtime PM.
  8516. *
  8517. * The requirements for PC8+ are that all the outputs are disabled, the power
  8518. * well is disabled and most interrupts are disabled, and these are also
  8519. * requirements for runtime PM. When these conditions are met, we manually do
  8520. * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
  8521. * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
  8522. * hang the machine.
  8523. *
  8524. * When we really reach PC8 or deeper states (not just when we allow it) we lose
  8525. * the state of some registers, so when we come back from PC8+ we need to
  8526. * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
  8527. * need to take care of the registers kept by RC6. Notice that this happens even
  8528. * if we don't put the device in PCI D3 state (which is what currently happens
  8529. * because of the runtime PM support).
  8530. *
  8531. * For more, read "Display Sequences for Package C8" on the hardware
  8532. * documentation.
  8533. */
  8534. void hsw_enable_pc8(struct drm_i915_private *dev_priv)
  8535. {
  8536. struct drm_device *dev = &dev_priv->drm;
  8537. uint32_t val;
  8538. DRM_DEBUG_KMS("Enabling package C8+\n");
  8539. if (HAS_PCH_LPT_LP(dev_priv)) {
  8540. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8541. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  8542. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8543. }
  8544. lpt_disable_clkout_dp(dev);
  8545. hsw_disable_lcpll(dev_priv, true, true);
  8546. }
  8547. void hsw_disable_pc8(struct drm_i915_private *dev_priv)
  8548. {
  8549. struct drm_device *dev = &dev_priv->drm;
  8550. uint32_t val;
  8551. DRM_DEBUG_KMS("Disabling package C8+\n");
  8552. hsw_restore_lcpll(dev_priv);
  8553. lpt_init_pch_refclk(dev);
  8554. if (HAS_PCH_LPT_LP(dev_priv)) {
  8555. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8556. val |= PCH_LP_PARTITION_LEVEL_DISABLE;
  8557. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8558. }
  8559. }
  8560. static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8561. {
  8562. struct drm_device *dev = old_state->dev;
  8563. struct intel_atomic_state *old_intel_state =
  8564. to_intel_atomic_state(old_state);
  8565. unsigned int req_cdclk = old_intel_state->dev_cdclk;
  8566. bxt_set_cdclk(to_i915(dev), req_cdclk);
  8567. }
  8568. static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
  8569. int pixel_rate)
  8570. {
  8571. struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
  8572. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  8573. if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
  8574. pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
  8575. /* BSpec says "Do not use DisplayPort with CDCLK less than
  8576. * 432 MHz, audio enabled, port width x4, and link rate
  8577. * HBR2 (5.4 GHz), or else there may be audio corruption or
  8578. * screen corruption."
  8579. */
  8580. if (intel_crtc_has_dp_encoder(crtc_state) &&
  8581. crtc_state->has_audio &&
  8582. crtc_state->port_clock >= 540000 &&
  8583. crtc_state->lane_count == 4)
  8584. pixel_rate = max(432000, pixel_rate);
  8585. return pixel_rate;
  8586. }
  8587. /* compute the max rate for new configuration */
  8588. static int ilk_max_pixel_rate(struct drm_atomic_state *state)
  8589. {
  8590. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8591. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8592. struct drm_crtc *crtc;
  8593. struct drm_crtc_state *cstate;
  8594. struct intel_crtc_state *crtc_state;
  8595. unsigned max_pixel_rate = 0, i;
  8596. enum pipe pipe;
  8597. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  8598. sizeof(intel_state->min_pixclk));
  8599. for_each_crtc_in_state(state, crtc, cstate, i) {
  8600. int pixel_rate;
  8601. crtc_state = to_intel_crtc_state(cstate);
  8602. if (!crtc_state->base.enable) {
  8603. intel_state->min_pixclk[i] = 0;
  8604. continue;
  8605. }
  8606. pixel_rate = ilk_pipe_pixel_rate(crtc_state);
  8607. if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
  8608. pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
  8609. pixel_rate);
  8610. intel_state->min_pixclk[i] = pixel_rate;
  8611. }
  8612. for_each_pipe(dev_priv, pipe)
  8613. max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
  8614. return max_pixel_rate;
  8615. }
  8616. static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
  8617. {
  8618. struct drm_i915_private *dev_priv = to_i915(dev);
  8619. uint32_t val, data;
  8620. int ret;
  8621. if (WARN((I915_READ(LCPLL_CTL) &
  8622. (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
  8623. LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
  8624. LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
  8625. LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
  8626. "trying to change cdclk frequency with cdclk not enabled\n"))
  8627. return;
  8628. mutex_lock(&dev_priv->rps.hw_lock);
  8629. ret = sandybridge_pcode_write(dev_priv,
  8630. BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
  8631. mutex_unlock(&dev_priv->rps.hw_lock);
  8632. if (ret) {
  8633. DRM_ERROR("failed to inform pcode about cdclk change\n");
  8634. return;
  8635. }
  8636. val = I915_READ(LCPLL_CTL);
  8637. val |= LCPLL_CD_SOURCE_FCLK;
  8638. I915_WRITE(LCPLL_CTL, val);
  8639. if (wait_for_us(I915_READ(LCPLL_CTL) &
  8640. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  8641. DRM_ERROR("Switching to FCLK failed\n");
  8642. val = I915_READ(LCPLL_CTL);
  8643. val &= ~LCPLL_CLK_FREQ_MASK;
  8644. switch (cdclk) {
  8645. case 450000:
  8646. val |= LCPLL_CLK_FREQ_450;
  8647. data = 0;
  8648. break;
  8649. case 540000:
  8650. val |= LCPLL_CLK_FREQ_54O_BDW;
  8651. data = 1;
  8652. break;
  8653. case 337500:
  8654. val |= LCPLL_CLK_FREQ_337_5_BDW;
  8655. data = 2;
  8656. break;
  8657. case 675000:
  8658. val |= LCPLL_CLK_FREQ_675_BDW;
  8659. data = 3;
  8660. break;
  8661. default:
  8662. WARN(1, "invalid cdclk frequency\n");
  8663. return;
  8664. }
  8665. I915_WRITE(LCPLL_CTL, val);
  8666. val = I915_READ(LCPLL_CTL);
  8667. val &= ~LCPLL_CD_SOURCE_FCLK;
  8668. I915_WRITE(LCPLL_CTL, val);
  8669. if (wait_for_us((I915_READ(LCPLL_CTL) &
  8670. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  8671. DRM_ERROR("Switching back to LCPLL failed\n");
  8672. mutex_lock(&dev_priv->rps.hw_lock);
  8673. sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
  8674. mutex_unlock(&dev_priv->rps.hw_lock);
  8675. I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
  8676. intel_update_cdclk(dev_priv);
  8677. WARN(cdclk != dev_priv->cdclk_freq,
  8678. "cdclk requested %d kHz but got %d kHz\n",
  8679. cdclk, dev_priv->cdclk_freq);
  8680. }
  8681. static int broadwell_calc_cdclk(int max_pixclk)
  8682. {
  8683. if (max_pixclk > 540000)
  8684. return 675000;
  8685. else if (max_pixclk > 450000)
  8686. return 540000;
  8687. else if (max_pixclk > 337500)
  8688. return 450000;
  8689. else
  8690. return 337500;
  8691. }
  8692. static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
  8693. {
  8694. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8695. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8696. int max_pixclk = ilk_max_pixel_rate(state);
  8697. int cdclk;
  8698. /*
  8699. * FIXME should also account for plane ratio
  8700. * once 64bpp pixel formats are supported.
  8701. */
  8702. cdclk = broadwell_calc_cdclk(max_pixclk);
  8703. if (cdclk > dev_priv->max_cdclk_freq) {
  8704. DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  8705. cdclk, dev_priv->max_cdclk_freq);
  8706. return -EINVAL;
  8707. }
  8708. intel_state->cdclk = intel_state->dev_cdclk = cdclk;
  8709. if (!intel_state->active_crtcs)
  8710. intel_state->dev_cdclk = broadwell_calc_cdclk(0);
  8711. return 0;
  8712. }
  8713. static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8714. {
  8715. struct drm_device *dev = old_state->dev;
  8716. struct intel_atomic_state *old_intel_state =
  8717. to_intel_atomic_state(old_state);
  8718. unsigned req_cdclk = old_intel_state->dev_cdclk;
  8719. broadwell_set_cdclk(dev, req_cdclk);
  8720. }
  8721. static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
  8722. {
  8723. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8724. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8725. const int max_pixclk = ilk_max_pixel_rate(state);
  8726. int vco = intel_state->cdclk_pll_vco;
  8727. int cdclk;
  8728. /*
  8729. * FIXME should also account for plane ratio
  8730. * once 64bpp pixel formats are supported.
  8731. */
  8732. cdclk = skl_calc_cdclk(max_pixclk, vco);
  8733. /*
  8734. * FIXME move the cdclk caclulation to
  8735. * compute_config() so we can fail gracegully.
  8736. */
  8737. if (cdclk > dev_priv->max_cdclk_freq) {
  8738. DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  8739. cdclk, dev_priv->max_cdclk_freq);
  8740. cdclk = dev_priv->max_cdclk_freq;
  8741. }
  8742. intel_state->cdclk = intel_state->dev_cdclk = cdclk;
  8743. if (!intel_state->active_crtcs)
  8744. intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
  8745. return 0;
  8746. }
  8747. static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8748. {
  8749. struct drm_i915_private *dev_priv = to_i915(old_state->dev);
  8750. struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
  8751. unsigned int req_cdclk = intel_state->dev_cdclk;
  8752. unsigned int req_vco = intel_state->cdclk_pll_vco;
  8753. skl_set_cdclk(dev_priv, req_cdclk, req_vco);
  8754. }
  8755. static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
  8756. struct intel_crtc_state *crtc_state)
  8757. {
  8758. if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
  8759. if (!intel_ddi_pll_select(crtc, crtc_state))
  8760. return -EINVAL;
  8761. }
  8762. crtc->lowfreq_avail = false;
  8763. return 0;
  8764. }
  8765. static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
  8766. enum port port,
  8767. struct intel_crtc_state *pipe_config)
  8768. {
  8769. enum intel_dpll_id id;
  8770. switch (port) {
  8771. case PORT_A:
  8772. id = DPLL_ID_SKL_DPLL0;
  8773. break;
  8774. case PORT_B:
  8775. id = DPLL_ID_SKL_DPLL1;
  8776. break;
  8777. case PORT_C:
  8778. id = DPLL_ID_SKL_DPLL2;
  8779. break;
  8780. default:
  8781. DRM_ERROR("Incorrect port type\n");
  8782. return;
  8783. }
  8784. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8785. }
  8786. static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
  8787. enum port port,
  8788. struct intel_crtc_state *pipe_config)
  8789. {
  8790. enum intel_dpll_id id;
  8791. u32 temp;
  8792. temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
  8793. id = temp >> (port * 3 + 1);
  8794. if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
  8795. return;
  8796. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8797. }
  8798. static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
  8799. enum port port,
  8800. struct intel_crtc_state *pipe_config)
  8801. {
  8802. enum intel_dpll_id id;
  8803. uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
  8804. switch (ddi_pll_sel) {
  8805. case PORT_CLK_SEL_WRPLL1:
  8806. id = DPLL_ID_WRPLL1;
  8807. break;
  8808. case PORT_CLK_SEL_WRPLL2:
  8809. id = DPLL_ID_WRPLL2;
  8810. break;
  8811. case PORT_CLK_SEL_SPLL:
  8812. id = DPLL_ID_SPLL;
  8813. break;
  8814. case PORT_CLK_SEL_LCPLL_810:
  8815. id = DPLL_ID_LCPLL_810;
  8816. break;
  8817. case PORT_CLK_SEL_LCPLL_1350:
  8818. id = DPLL_ID_LCPLL_1350;
  8819. break;
  8820. case PORT_CLK_SEL_LCPLL_2700:
  8821. id = DPLL_ID_LCPLL_2700;
  8822. break;
  8823. default:
  8824. MISSING_CASE(ddi_pll_sel);
  8825. /* fall through */
  8826. case PORT_CLK_SEL_NONE:
  8827. return;
  8828. }
  8829. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8830. }
  8831. static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  8832. struct intel_crtc_state *pipe_config,
  8833. unsigned long *power_domain_mask)
  8834. {
  8835. struct drm_device *dev = crtc->base.dev;
  8836. struct drm_i915_private *dev_priv = to_i915(dev);
  8837. enum intel_display_power_domain power_domain;
  8838. u32 tmp;
  8839. /*
  8840. * The pipe->transcoder mapping is fixed with the exception of the eDP
  8841. * transcoder handled below.
  8842. */
  8843. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  8844. /*
  8845. * XXX: Do intel_display_power_get_if_enabled before reading this (for
  8846. * consistency and less surprising code; it's in always on power).
  8847. */
  8848. tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
  8849. if (tmp & TRANS_DDI_FUNC_ENABLE) {
  8850. enum pipe trans_edp_pipe;
  8851. switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
  8852. default:
  8853. WARN(1, "unknown pipe linked to edp transcoder\n");
  8854. case TRANS_DDI_EDP_INPUT_A_ONOFF:
  8855. case TRANS_DDI_EDP_INPUT_A_ON:
  8856. trans_edp_pipe = PIPE_A;
  8857. break;
  8858. case TRANS_DDI_EDP_INPUT_B_ONOFF:
  8859. trans_edp_pipe = PIPE_B;
  8860. break;
  8861. case TRANS_DDI_EDP_INPUT_C_ONOFF:
  8862. trans_edp_pipe = PIPE_C;
  8863. break;
  8864. }
  8865. if (trans_edp_pipe == crtc->pipe)
  8866. pipe_config->cpu_transcoder = TRANSCODER_EDP;
  8867. }
  8868. power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
  8869. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8870. return false;
  8871. *power_domain_mask |= BIT(power_domain);
  8872. tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
  8873. return tmp & PIPECONF_ENABLE;
  8874. }
  8875. static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  8876. struct intel_crtc_state *pipe_config,
  8877. unsigned long *power_domain_mask)
  8878. {
  8879. struct drm_device *dev = crtc->base.dev;
  8880. struct drm_i915_private *dev_priv = to_i915(dev);
  8881. enum intel_display_power_domain power_domain;
  8882. enum port port;
  8883. enum transcoder cpu_transcoder;
  8884. u32 tmp;
  8885. for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
  8886. if (port == PORT_A)
  8887. cpu_transcoder = TRANSCODER_DSI_A;
  8888. else
  8889. cpu_transcoder = TRANSCODER_DSI_C;
  8890. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  8891. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8892. continue;
  8893. *power_domain_mask |= BIT(power_domain);
  8894. /*
  8895. * The PLL needs to be enabled with a valid divider
  8896. * configuration, otherwise accessing DSI registers will hang
  8897. * the machine. See BSpec North Display Engine
  8898. * registers/MIPI[BXT]. We can break out here early, since we
  8899. * need the same DSI PLL to be enabled for both DSI ports.
  8900. */
  8901. if (!intel_dsi_pll_is_enabled(dev_priv))
  8902. break;
  8903. /* XXX: this works for video mode only */
  8904. tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
  8905. if (!(tmp & DPI_ENABLE))
  8906. continue;
  8907. tmp = I915_READ(MIPI_CTRL(port));
  8908. if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
  8909. continue;
  8910. pipe_config->cpu_transcoder = cpu_transcoder;
  8911. break;
  8912. }
  8913. return transcoder_is_dsi(pipe_config->cpu_transcoder);
  8914. }
  8915. static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  8916. struct intel_crtc_state *pipe_config)
  8917. {
  8918. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8919. struct intel_shared_dpll *pll;
  8920. enum port port;
  8921. uint32_t tmp;
  8922. tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
  8923. port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
  8924. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  8925. skylake_get_ddi_pll(dev_priv, port, pipe_config);
  8926. else if (IS_BROXTON(dev_priv))
  8927. bxt_get_ddi_pll(dev_priv, port, pipe_config);
  8928. else
  8929. haswell_get_ddi_pll(dev_priv, port, pipe_config);
  8930. pll = pipe_config->shared_dpll;
  8931. if (pll) {
  8932. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  8933. &pipe_config->dpll_hw_state));
  8934. }
  8935. /*
  8936. * Haswell has only FDI/PCH transcoder A. It is which is connected to
  8937. * DDI E. So just check whether this pipe is wired to DDI E and whether
  8938. * the PCH transcoder is on.
  8939. */
  8940. if (INTEL_GEN(dev_priv) < 9 &&
  8941. (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
  8942. pipe_config->has_pch_encoder = true;
  8943. tmp = I915_READ(FDI_RX_CTL(PIPE_A));
  8944. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  8945. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  8946. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  8947. }
  8948. }
  8949. static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  8950. struct intel_crtc_state *pipe_config)
  8951. {
  8952. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8953. enum intel_display_power_domain power_domain;
  8954. unsigned long power_domain_mask;
  8955. bool active;
  8956. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  8957. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8958. return false;
  8959. power_domain_mask = BIT(power_domain);
  8960. pipe_config->shared_dpll = NULL;
  8961. active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
  8962. if (IS_BROXTON(dev_priv) &&
  8963. bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
  8964. WARN_ON(active);
  8965. active = true;
  8966. }
  8967. if (!active)
  8968. goto out;
  8969. if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  8970. haswell_get_ddi_port_state(crtc, pipe_config);
  8971. intel_get_pipe_timings(crtc, pipe_config);
  8972. }
  8973. intel_get_pipe_src_size(crtc, pipe_config);
  8974. pipe_config->gamma_mode =
  8975. I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
  8976. if (INTEL_GEN(dev_priv) >= 9) {
  8977. skl_init_scalers(dev_priv, crtc, pipe_config);
  8978. pipe_config->scaler_state.scaler_id = -1;
  8979. pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
  8980. }
  8981. power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
  8982. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  8983. power_domain_mask |= BIT(power_domain);
  8984. if (INTEL_GEN(dev_priv) >= 9)
  8985. skylake_get_pfit_config(crtc, pipe_config);
  8986. else
  8987. ironlake_get_pfit_config(crtc, pipe_config);
  8988. }
  8989. if (IS_HASWELL(dev_priv))
  8990. pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
  8991. (I915_READ(IPS_CTL) & IPS_ENABLE);
  8992. if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
  8993. !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  8994. pipe_config->pixel_multiplier =
  8995. I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
  8996. } else {
  8997. pipe_config->pixel_multiplier = 1;
  8998. }
  8999. out:
  9000. for_each_power_domain(power_domain, power_domain_mask)
  9001. intel_display_power_put(dev_priv, power_domain);
  9002. return active;
  9003. }
  9004. static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
  9005. const struct intel_plane_state *plane_state)
  9006. {
  9007. struct drm_device *dev = crtc->dev;
  9008. struct drm_i915_private *dev_priv = to_i915(dev);
  9009. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9010. uint32_t cntl = 0, size = 0;
  9011. if (plane_state && plane_state->base.visible) {
  9012. unsigned int width = plane_state->base.crtc_w;
  9013. unsigned int height = plane_state->base.crtc_h;
  9014. unsigned int stride = roundup_pow_of_two(width) * 4;
  9015. switch (stride) {
  9016. default:
  9017. WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
  9018. width, stride);
  9019. stride = 256;
  9020. /* fallthrough */
  9021. case 256:
  9022. case 512:
  9023. case 1024:
  9024. case 2048:
  9025. break;
  9026. }
  9027. cntl |= CURSOR_ENABLE |
  9028. CURSOR_GAMMA_ENABLE |
  9029. CURSOR_FORMAT_ARGB |
  9030. CURSOR_STRIDE(stride);
  9031. size = (height << 12) | width;
  9032. }
  9033. if (intel_crtc->cursor_cntl != 0 &&
  9034. (intel_crtc->cursor_base != base ||
  9035. intel_crtc->cursor_size != size ||
  9036. intel_crtc->cursor_cntl != cntl)) {
  9037. /* On these chipsets we can only modify the base/size/stride
  9038. * whilst the cursor is disabled.
  9039. */
  9040. I915_WRITE(CURCNTR(PIPE_A), 0);
  9041. POSTING_READ(CURCNTR(PIPE_A));
  9042. intel_crtc->cursor_cntl = 0;
  9043. }
  9044. if (intel_crtc->cursor_base != base) {
  9045. I915_WRITE(CURBASE(PIPE_A), base);
  9046. intel_crtc->cursor_base = base;
  9047. }
  9048. if (intel_crtc->cursor_size != size) {
  9049. I915_WRITE(CURSIZE, size);
  9050. intel_crtc->cursor_size = size;
  9051. }
  9052. if (intel_crtc->cursor_cntl != cntl) {
  9053. I915_WRITE(CURCNTR(PIPE_A), cntl);
  9054. POSTING_READ(CURCNTR(PIPE_A));
  9055. intel_crtc->cursor_cntl = cntl;
  9056. }
  9057. }
  9058. static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
  9059. const struct intel_plane_state *plane_state)
  9060. {
  9061. struct drm_device *dev = crtc->dev;
  9062. struct drm_i915_private *dev_priv = to_i915(dev);
  9063. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9064. int pipe = intel_crtc->pipe;
  9065. uint32_t cntl = 0;
  9066. if (plane_state && plane_state->base.visible) {
  9067. cntl = MCURSOR_GAMMA_ENABLE;
  9068. switch (plane_state->base.crtc_w) {
  9069. case 64:
  9070. cntl |= CURSOR_MODE_64_ARGB_AX;
  9071. break;
  9072. case 128:
  9073. cntl |= CURSOR_MODE_128_ARGB_AX;
  9074. break;
  9075. case 256:
  9076. cntl |= CURSOR_MODE_256_ARGB_AX;
  9077. break;
  9078. default:
  9079. MISSING_CASE(plane_state->base.crtc_w);
  9080. return;
  9081. }
  9082. cntl |= pipe << 28; /* Connect to correct pipe */
  9083. if (HAS_DDI(dev_priv))
  9084. cntl |= CURSOR_PIPE_CSC_ENABLE;
  9085. if (plane_state->base.rotation & DRM_ROTATE_180)
  9086. cntl |= CURSOR_ROTATE_180;
  9087. }
  9088. if (intel_crtc->cursor_cntl != cntl) {
  9089. I915_WRITE(CURCNTR(pipe), cntl);
  9090. POSTING_READ(CURCNTR(pipe));
  9091. intel_crtc->cursor_cntl = cntl;
  9092. }
  9093. /* and commit changes on next vblank */
  9094. I915_WRITE(CURBASE(pipe), base);
  9095. POSTING_READ(CURBASE(pipe));
  9096. intel_crtc->cursor_base = base;
  9097. }
  9098. /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
  9099. static void intel_crtc_update_cursor(struct drm_crtc *crtc,
  9100. const struct intel_plane_state *plane_state)
  9101. {
  9102. struct drm_device *dev = crtc->dev;
  9103. struct drm_i915_private *dev_priv = to_i915(dev);
  9104. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9105. int pipe = intel_crtc->pipe;
  9106. u32 base = intel_crtc->cursor_addr;
  9107. u32 pos = 0;
  9108. if (plane_state) {
  9109. int x = plane_state->base.crtc_x;
  9110. int y = plane_state->base.crtc_y;
  9111. if (x < 0) {
  9112. pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
  9113. x = -x;
  9114. }
  9115. pos |= x << CURSOR_X_SHIFT;
  9116. if (y < 0) {
  9117. pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
  9118. y = -y;
  9119. }
  9120. pos |= y << CURSOR_Y_SHIFT;
  9121. /* ILK+ do this automagically */
  9122. if (HAS_GMCH_DISPLAY(dev_priv) &&
  9123. plane_state->base.rotation & DRM_ROTATE_180) {
  9124. base += (plane_state->base.crtc_h *
  9125. plane_state->base.crtc_w - 1) * 4;
  9126. }
  9127. }
  9128. I915_WRITE(CURPOS(pipe), pos);
  9129. if (IS_845G(dev_priv) || IS_I865G(dev_priv))
  9130. i845_update_cursor(crtc, base, plane_state);
  9131. else
  9132. i9xx_update_cursor(crtc, base, plane_state);
  9133. }
  9134. static bool cursor_size_ok(struct drm_i915_private *dev_priv,
  9135. uint32_t width, uint32_t height)
  9136. {
  9137. if (width == 0 || height == 0)
  9138. return false;
  9139. /*
  9140. * 845g/865g are special in that they are only limited by
  9141. * the width of their cursors, the height is arbitrary up to
  9142. * the precision of the register. Everything else requires
  9143. * square cursors, limited to a few power-of-two sizes.
  9144. */
  9145. if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
  9146. if ((width & 63) != 0)
  9147. return false;
  9148. if (width > (IS_845G(dev_priv) ? 64 : 512))
  9149. return false;
  9150. if (height > 1023)
  9151. return false;
  9152. } else {
  9153. switch (width | height) {
  9154. case 256:
  9155. case 128:
  9156. if (IS_GEN2(dev_priv))
  9157. return false;
  9158. case 64:
  9159. break;
  9160. default:
  9161. return false;
  9162. }
  9163. }
  9164. return true;
  9165. }
  9166. /* VESA 640x480x72Hz mode to set on the pipe */
  9167. static struct drm_display_mode load_detect_mode = {
  9168. DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
  9169. 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
  9170. };
  9171. struct drm_framebuffer *
  9172. __intel_framebuffer_create(struct drm_device *dev,
  9173. struct drm_mode_fb_cmd2 *mode_cmd,
  9174. struct drm_i915_gem_object *obj)
  9175. {
  9176. struct intel_framebuffer *intel_fb;
  9177. int ret;
  9178. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  9179. if (!intel_fb)
  9180. return ERR_PTR(-ENOMEM);
  9181. ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
  9182. if (ret)
  9183. goto err;
  9184. return &intel_fb->base;
  9185. err:
  9186. kfree(intel_fb);
  9187. return ERR_PTR(ret);
  9188. }
  9189. static struct drm_framebuffer *
  9190. intel_framebuffer_create(struct drm_device *dev,
  9191. struct drm_mode_fb_cmd2 *mode_cmd,
  9192. struct drm_i915_gem_object *obj)
  9193. {
  9194. struct drm_framebuffer *fb;
  9195. int ret;
  9196. ret = i915_mutex_lock_interruptible(dev);
  9197. if (ret)
  9198. return ERR_PTR(ret);
  9199. fb = __intel_framebuffer_create(dev, mode_cmd, obj);
  9200. mutex_unlock(&dev->struct_mutex);
  9201. return fb;
  9202. }
  9203. static u32
  9204. intel_framebuffer_pitch_for_width(int width, int bpp)
  9205. {
  9206. u32 pitch = DIV_ROUND_UP(width * bpp, 8);
  9207. return ALIGN(pitch, 64);
  9208. }
  9209. static u32
  9210. intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
  9211. {
  9212. u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
  9213. return PAGE_ALIGN(pitch * mode->vdisplay);
  9214. }
  9215. static struct drm_framebuffer *
  9216. intel_framebuffer_create_for_mode(struct drm_device *dev,
  9217. struct drm_display_mode *mode,
  9218. int depth, int bpp)
  9219. {
  9220. struct drm_framebuffer *fb;
  9221. struct drm_i915_gem_object *obj;
  9222. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  9223. obj = i915_gem_object_create(dev,
  9224. intel_framebuffer_size_for_mode(mode, bpp));
  9225. if (IS_ERR(obj))
  9226. return ERR_CAST(obj);
  9227. mode_cmd.width = mode->hdisplay;
  9228. mode_cmd.height = mode->vdisplay;
  9229. mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
  9230. bpp);
  9231. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  9232. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  9233. if (IS_ERR(fb))
  9234. i915_gem_object_put(obj);
  9235. return fb;
  9236. }
  9237. static struct drm_framebuffer *
  9238. mode_fits_in_fbdev(struct drm_device *dev,
  9239. struct drm_display_mode *mode)
  9240. {
  9241. #ifdef CONFIG_DRM_FBDEV_EMULATION
  9242. struct drm_i915_private *dev_priv = to_i915(dev);
  9243. struct drm_i915_gem_object *obj;
  9244. struct drm_framebuffer *fb;
  9245. if (!dev_priv->fbdev)
  9246. return NULL;
  9247. if (!dev_priv->fbdev->fb)
  9248. return NULL;
  9249. obj = dev_priv->fbdev->fb->obj;
  9250. BUG_ON(!obj);
  9251. fb = &dev_priv->fbdev->fb->base;
  9252. if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
  9253. fb->bits_per_pixel))
  9254. return NULL;
  9255. if (obj->base.size < mode->vdisplay * fb->pitches[0])
  9256. return NULL;
  9257. drm_framebuffer_reference(fb);
  9258. return fb;
  9259. #else
  9260. return NULL;
  9261. #endif
  9262. }
  9263. static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
  9264. struct drm_crtc *crtc,
  9265. struct drm_display_mode *mode,
  9266. struct drm_framebuffer *fb,
  9267. int x, int y)
  9268. {
  9269. struct drm_plane_state *plane_state;
  9270. int hdisplay, vdisplay;
  9271. int ret;
  9272. plane_state = drm_atomic_get_plane_state(state, crtc->primary);
  9273. if (IS_ERR(plane_state))
  9274. return PTR_ERR(plane_state);
  9275. if (mode)
  9276. drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
  9277. else
  9278. hdisplay = vdisplay = 0;
  9279. ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
  9280. if (ret)
  9281. return ret;
  9282. drm_atomic_set_fb_for_plane(plane_state, fb);
  9283. plane_state->crtc_x = 0;
  9284. plane_state->crtc_y = 0;
  9285. plane_state->crtc_w = hdisplay;
  9286. plane_state->crtc_h = vdisplay;
  9287. plane_state->src_x = x << 16;
  9288. plane_state->src_y = y << 16;
  9289. plane_state->src_w = hdisplay << 16;
  9290. plane_state->src_h = vdisplay << 16;
  9291. return 0;
  9292. }
  9293. bool intel_get_load_detect_pipe(struct drm_connector *connector,
  9294. struct drm_display_mode *mode,
  9295. struct intel_load_detect_pipe *old,
  9296. struct drm_modeset_acquire_ctx *ctx)
  9297. {
  9298. struct intel_crtc *intel_crtc;
  9299. struct intel_encoder *intel_encoder =
  9300. intel_attached_encoder(connector);
  9301. struct drm_crtc *possible_crtc;
  9302. struct drm_encoder *encoder = &intel_encoder->base;
  9303. struct drm_crtc *crtc = NULL;
  9304. struct drm_device *dev = encoder->dev;
  9305. struct drm_i915_private *dev_priv = to_i915(dev);
  9306. struct drm_framebuffer *fb;
  9307. struct drm_mode_config *config = &dev->mode_config;
  9308. struct drm_atomic_state *state = NULL, *restore_state = NULL;
  9309. struct drm_connector_state *connector_state;
  9310. struct intel_crtc_state *crtc_state;
  9311. int ret, i = -1;
  9312. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  9313. connector->base.id, connector->name,
  9314. encoder->base.id, encoder->name);
  9315. old->restore_state = NULL;
  9316. retry:
  9317. ret = drm_modeset_lock(&config->connection_mutex, ctx);
  9318. if (ret)
  9319. goto fail;
  9320. /*
  9321. * Algorithm gets a little messy:
  9322. *
  9323. * - if the connector already has an assigned crtc, use it (but make
  9324. * sure it's on first)
  9325. *
  9326. * - try to find the first unused crtc that can drive this connector,
  9327. * and use that if we find one
  9328. */
  9329. /* See if we already have a CRTC for this connector */
  9330. if (connector->state->crtc) {
  9331. crtc = connector->state->crtc;
  9332. ret = drm_modeset_lock(&crtc->mutex, ctx);
  9333. if (ret)
  9334. goto fail;
  9335. /* Make sure the crtc and connector are running */
  9336. goto found;
  9337. }
  9338. /* Find an unused one (if possible) */
  9339. for_each_crtc(dev, possible_crtc) {
  9340. i++;
  9341. if (!(encoder->possible_crtcs & (1 << i)))
  9342. continue;
  9343. ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
  9344. if (ret)
  9345. goto fail;
  9346. if (possible_crtc->state->enable) {
  9347. drm_modeset_unlock(&possible_crtc->mutex);
  9348. continue;
  9349. }
  9350. crtc = possible_crtc;
  9351. break;
  9352. }
  9353. /*
  9354. * If we didn't find an unused CRTC, don't use any.
  9355. */
  9356. if (!crtc) {
  9357. DRM_DEBUG_KMS("no pipe available for load-detect\n");
  9358. goto fail;
  9359. }
  9360. found:
  9361. intel_crtc = to_intel_crtc(crtc);
  9362. ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
  9363. if (ret)
  9364. goto fail;
  9365. state = drm_atomic_state_alloc(dev);
  9366. restore_state = drm_atomic_state_alloc(dev);
  9367. if (!state || !restore_state) {
  9368. ret = -ENOMEM;
  9369. goto fail;
  9370. }
  9371. state->acquire_ctx = ctx;
  9372. restore_state->acquire_ctx = ctx;
  9373. connector_state = drm_atomic_get_connector_state(state, connector);
  9374. if (IS_ERR(connector_state)) {
  9375. ret = PTR_ERR(connector_state);
  9376. goto fail;
  9377. }
  9378. ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
  9379. if (ret)
  9380. goto fail;
  9381. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  9382. if (IS_ERR(crtc_state)) {
  9383. ret = PTR_ERR(crtc_state);
  9384. goto fail;
  9385. }
  9386. crtc_state->base.active = crtc_state->base.enable = true;
  9387. if (!mode)
  9388. mode = &load_detect_mode;
  9389. /* We need a framebuffer large enough to accommodate all accesses
  9390. * that the plane may generate whilst we perform load detection.
  9391. * We can not rely on the fbcon either being present (we get called
  9392. * during its initialisation to detect all boot displays, or it may
  9393. * not even exist) or that it is large enough to satisfy the
  9394. * requested mode.
  9395. */
  9396. fb = mode_fits_in_fbdev(dev, mode);
  9397. if (fb == NULL) {
  9398. DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
  9399. fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
  9400. } else
  9401. DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
  9402. if (IS_ERR(fb)) {
  9403. DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
  9404. goto fail;
  9405. }
  9406. ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
  9407. if (ret)
  9408. goto fail;
  9409. drm_framebuffer_unreference(fb);
  9410. ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
  9411. if (ret)
  9412. goto fail;
  9413. ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
  9414. if (!ret)
  9415. ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
  9416. if (!ret)
  9417. ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
  9418. if (ret) {
  9419. DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
  9420. goto fail;
  9421. }
  9422. ret = drm_atomic_commit(state);
  9423. if (ret) {
  9424. DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
  9425. goto fail;
  9426. }
  9427. old->restore_state = restore_state;
  9428. /* let the connector get through one full cycle before testing */
  9429. intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
  9430. return true;
  9431. fail:
  9432. if (state) {
  9433. drm_atomic_state_put(state);
  9434. state = NULL;
  9435. }
  9436. if (restore_state) {
  9437. drm_atomic_state_put(restore_state);
  9438. restore_state = NULL;
  9439. }
  9440. if (ret == -EDEADLK) {
  9441. drm_modeset_backoff(ctx);
  9442. goto retry;
  9443. }
  9444. return false;
  9445. }
  9446. void intel_release_load_detect_pipe(struct drm_connector *connector,
  9447. struct intel_load_detect_pipe *old,
  9448. struct drm_modeset_acquire_ctx *ctx)
  9449. {
  9450. struct intel_encoder *intel_encoder =
  9451. intel_attached_encoder(connector);
  9452. struct drm_encoder *encoder = &intel_encoder->base;
  9453. struct drm_atomic_state *state = old->restore_state;
  9454. int ret;
  9455. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  9456. connector->base.id, connector->name,
  9457. encoder->base.id, encoder->name);
  9458. if (!state)
  9459. return;
  9460. ret = drm_atomic_commit(state);
  9461. if (ret)
  9462. DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
  9463. drm_atomic_state_put(state);
  9464. }
  9465. static int i9xx_pll_refclk(struct drm_device *dev,
  9466. const struct intel_crtc_state *pipe_config)
  9467. {
  9468. struct drm_i915_private *dev_priv = to_i915(dev);
  9469. u32 dpll = pipe_config->dpll_hw_state.dpll;
  9470. if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
  9471. return dev_priv->vbt.lvds_ssc_freq;
  9472. else if (HAS_PCH_SPLIT(dev_priv))
  9473. return 120000;
  9474. else if (!IS_GEN2(dev_priv))
  9475. return 96000;
  9476. else
  9477. return 48000;
  9478. }
  9479. /* Returns the clock of the currently programmed mode of the given pipe. */
  9480. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  9481. struct intel_crtc_state *pipe_config)
  9482. {
  9483. struct drm_device *dev = crtc->base.dev;
  9484. struct drm_i915_private *dev_priv = to_i915(dev);
  9485. int pipe = pipe_config->cpu_transcoder;
  9486. u32 dpll = pipe_config->dpll_hw_state.dpll;
  9487. u32 fp;
  9488. struct dpll clock;
  9489. int port_clock;
  9490. int refclk = i9xx_pll_refclk(dev, pipe_config);
  9491. if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
  9492. fp = pipe_config->dpll_hw_state.fp0;
  9493. else
  9494. fp = pipe_config->dpll_hw_state.fp1;
  9495. clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
  9496. if (IS_PINEVIEW(dev_priv)) {
  9497. clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
  9498. clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
  9499. } else {
  9500. clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
  9501. clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
  9502. }
  9503. if (!IS_GEN2(dev_priv)) {
  9504. if (IS_PINEVIEW(dev_priv))
  9505. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
  9506. DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
  9507. else
  9508. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
  9509. DPLL_FPA01_P1_POST_DIV_SHIFT);
  9510. switch (dpll & DPLL_MODE_MASK) {
  9511. case DPLLB_MODE_DAC_SERIAL:
  9512. clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
  9513. 5 : 10;
  9514. break;
  9515. case DPLLB_MODE_LVDS:
  9516. clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
  9517. 7 : 14;
  9518. break;
  9519. default:
  9520. DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
  9521. "mode\n", (int)(dpll & DPLL_MODE_MASK));
  9522. return;
  9523. }
  9524. if (IS_PINEVIEW(dev_priv))
  9525. port_clock = pnv_calc_dpll_params(refclk, &clock);
  9526. else
  9527. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  9528. } else {
  9529. u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
  9530. bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
  9531. if (is_lvds) {
  9532. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
  9533. DPLL_FPA01_P1_POST_DIV_SHIFT);
  9534. if (lvds & LVDS_CLKB_POWER_UP)
  9535. clock.p2 = 7;
  9536. else
  9537. clock.p2 = 14;
  9538. } else {
  9539. if (dpll & PLL_P1_DIVIDE_BY_TWO)
  9540. clock.p1 = 2;
  9541. else {
  9542. clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
  9543. DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
  9544. }
  9545. if (dpll & PLL_P2_DIVIDE_BY_4)
  9546. clock.p2 = 4;
  9547. else
  9548. clock.p2 = 2;
  9549. }
  9550. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  9551. }
  9552. /*
  9553. * This value includes pixel_multiplier. We will use
  9554. * port_clock to compute adjusted_mode.crtc_clock in the
  9555. * encoder's get_config() function.
  9556. */
  9557. pipe_config->port_clock = port_clock;
  9558. }
  9559. int intel_dotclock_calculate(int link_freq,
  9560. const struct intel_link_m_n *m_n)
  9561. {
  9562. /*
  9563. * The calculation for the data clock is:
  9564. * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
  9565. * But we want to avoid losing precison if possible, so:
  9566. * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
  9567. *
  9568. * and the link clock is simpler:
  9569. * link_clock = (m * link_clock) / n
  9570. */
  9571. if (!m_n->link_n)
  9572. return 0;
  9573. return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
  9574. }
  9575. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  9576. struct intel_crtc_state *pipe_config)
  9577. {
  9578. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  9579. /* read out port_clock from the DPLL */
  9580. i9xx_crtc_clock_get(crtc, pipe_config);
  9581. /*
  9582. * In case there is an active pipe without active ports,
  9583. * we may need some idea for the dotclock anyway.
  9584. * Calculate one based on the FDI configuration.
  9585. */
  9586. pipe_config->base.adjusted_mode.crtc_clock =
  9587. intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  9588. &pipe_config->fdi_m_n);
  9589. }
  9590. /** Returns the currently programmed mode of the given pipe. */
  9591. struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
  9592. struct drm_crtc *crtc)
  9593. {
  9594. struct drm_i915_private *dev_priv = to_i915(dev);
  9595. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9596. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  9597. struct drm_display_mode *mode;
  9598. struct intel_crtc_state *pipe_config;
  9599. int htot = I915_READ(HTOTAL(cpu_transcoder));
  9600. int hsync = I915_READ(HSYNC(cpu_transcoder));
  9601. int vtot = I915_READ(VTOTAL(cpu_transcoder));
  9602. int vsync = I915_READ(VSYNC(cpu_transcoder));
  9603. enum pipe pipe = intel_crtc->pipe;
  9604. mode = kzalloc(sizeof(*mode), GFP_KERNEL);
  9605. if (!mode)
  9606. return NULL;
  9607. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  9608. if (!pipe_config) {
  9609. kfree(mode);
  9610. return NULL;
  9611. }
  9612. /*
  9613. * Construct a pipe_config sufficient for getting the clock info
  9614. * back out of crtc_clock_get.
  9615. *
  9616. * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
  9617. * to use a real value here instead.
  9618. */
  9619. pipe_config->cpu_transcoder = (enum transcoder) pipe;
  9620. pipe_config->pixel_multiplier = 1;
  9621. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
  9622. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
  9623. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
  9624. i9xx_crtc_clock_get(intel_crtc, pipe_config);
  9625. mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
  9626. mode->hdisplay = (htot & 0xffff) + 1;
  9627. mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
  9628. mode->hsync_start = (hsync & 0xffff) + 1;
  9629. mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
  9630. mode->vdisplay = (vtot & 0xffff) + 1;
  9631. mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
  9632. mode->vsync_start = (vsync & 0xffff) + 1;
  9633. mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
  9634. drm_mode_set_name(mode);
  9635. kfree(pipe_config);
  9636. return mode;
  9637. }
  9638. static void intel_crtc_destroy(struct drm_crtc *crtc)
  9639. {
  9640. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9641. struct drm_device *dev = crtc->dev;
  9642. struct intel_flip_work *work;
  9643. spin_lock_irq(&dev->event_lock);
  9644. work = intel_crtc->flip_work;
  9645. intel_crtc->flip_work = NULL;
  9646. spin_unlock_irq(&dev->event_lock);
  9647. if (work) {
  9648. cancel_work_sync(&work->mmio_work);
  9649. cancel_work_sync(&work->unpin_work);
  9650. kfree(work);
  9651. }
  9652. drm_crtc_cleanup(crtc);
  9653. kfree(intel_crtc);
  9654. }
  9655. static void intel_unpin_work_fn(struct work_struct *__work)
  9656. {
  9657. struct intel_flip_work *work =
  9658. container_of(__work, struct intel_flip_work, unpin_work);
  9659. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  9660. struct drm_device *dev = crtc->base.dev;
  9661. struct drm_plane *primary = crtc->base.primary;
  9662. if (is_mmio_work(work))
  9663. flush_work(&work->mmio_work);
  9664. mutex_lock(&dev->struct_mutex);
  9665. intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
  9666. i915_gem_object_put(work->pending_flip_obj);
  9667. mutex_unlock(&dev->struct_mutex);
  9668. i915_gem_request_put(work->flip_queued_req);
  9669. intel_frontbuffer_flip_complete(to_i915(dev),
  9670. to_intel_plane(primary)->frontbuffer_bit);
  9671. intel_fbc_post_update(crtc);
  9672. drm_framebuffer_unreference(work->old_fb);
  9673. BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
  9674. atomic_dec(&crtc->unpin_work_count);
  9675. kfree(work);
  9676. }
  9677. /* Is 'a' after or equal to 'b'? */
  9678. static bool g4x_flip_count_after_eq(u32 a, u32 b)
  9679. {
  9680. return !((a - b) & 0x80000000);
  9681. }
  9682. static bool __pageflip_finished_cs(struct intel_crtc *crtc,
  9683. struct intel_flip_work *work)
  9684. {
  9685. struct drm_device *dev = crtc->base.dev;
  9686. struct drm_i915_private *dev_priv = to_i915(dev);
  9687. if (abort_flip_on_reset(crtc))
  9688. return true;
  9689. /*
  9690. * The relevant registers doen't exist on pre-ctg.
  9691. * As the flip done interrupt doesn't trigger for mmio
  9692. * flips on gmch platforms, a flip count check isn't
  9693. * really needed there. But since ctg has the registers,
  9694. * include it in the check anyway.
  9695. */
  9696. if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
  9697. return true;
  9698. /*
  9699. * BDW signals flip done immediately if the plane
  9700. * is disabled, even if the plane enable is already
  9701. * armed to occur at the next vblank :(
  9702. */
  9703. /*
  9704. * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
  9705. * used the same base address. In that case the mmio flip might
  9706. * have completed, but the CS hasn't even executed the flip yet.
  9707. *
  9708. * A flip count check isn't enough as the CS might have updated
  9709. * the base address just after start of vblank, but before we
  9710. * managed to process the interrupt. This means we'd complete the
  9711. * CS flip too soon.
  9712. *
  9713. * Combining both checks should get us a good enough result. It may
  9714. * still happen that the CS flip has been executed, but has not
  9715. * yet actually completed. But in case the base address is the same
  9716. * anyway, we don't really care.
  9717. */
  9718. return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
  9719. crtc->flip_work->gtt_offset &&
  9720. g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
  9721. crtc->flip_work->flip_count);
  9722. }
  9723. static bool
  9724. __pageflip_finished_mmio(struct intel_crtc *crtc,
  9725. struct intel_flip_work *work)
  9726. {
  9727. /*
  9728. * MMIO work completes when vblank is different from
  9729. * flip_queued_vblank.
  9730. *
  9731. * Reset counter value doesn't matter, this is handled by
  9732. * i915_wait_request finishing early, so no need to handle
  9733. * reset here.
  9734. */
  9735. return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
  9736. }
  9737. static bool pageflip_finished(struct intel_crtc *crtc,
  9738. struct intel_flip_work *work)
  9739. {
  9740. if (!atomic_read(&work->pending))
  9741. return false;
  9742. smp_rmb();
  9743. if (is_mmio_work(work))
  9744. return __pageflip_finished_mmio(crtc, work);
  9745. else
  9746. return __pageflip_finished_cs(crtc, work);
  9747. }
  9748. void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
  9749. {
  9750. struct drm_device *dev = &dev_priv->drm;
  9751. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  9752. struct intel_flip_work *work;
  9753. unsigned long flags;
  9754. /* Ignore early vblank irqs */
  9755. if (!crtc)
  9756. return;
  9757. /*
  9758. * This is called both by irq handlers and the reset code (to complete
  9759. * lost pageflips) so needs the full irqsave spinlocks.
  9760. */
  9761. spin_lock_irqsave(&dev->event_lock, flags);
  9762. work = crtc->flip_work;
  9763. if (work != NULL &&
  9764. !is_mmio_work(work) &&
  9765. pageflip_finished(crtc, work))
  9766. page_flip_completed(crtc);
  9767. spin_unlock_irqrestore(&dev->event_lock, flags);
  9768. }
  9769. void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
  9770. {
  9771. struct drm_device *dev = &dev_priv->drm;
  9772. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  9773. struct intel_flip_work *work;
  9774. unsigned long flags;
  9775. /* Ignore early vblank irqs */
  9776. if (!crtc)
  9777. return;
  9778. /*
  9779. * This is called both by irq handlers and the reset code (to complete
  9780. * lost pageflips) so needs the full irqsave spinlocks.
  9781. */
  9782. spin_lock_irqsave(&dev->event_lock, flags);
  9783. work = crtc->flip_work;
  9784. if (work != NULL &&
  9785. is_mmio_work(work) &&
  9786. pageflip_finished(crtc, work))
  9787. page_flip_completed(crtc);
  9788. spin_unlock_irqrestore(&dev->event_lock, flags);
  9789. }
  9790. static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
  9791. struct intel_flip_work *work)
  9792. {
  9793. work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
  9794. /* Ensure that the work item is consistent when activating it ... */
  9795. smp_mb__before_atomic();
  9796. atomic_set(&work->pending, 1);
  9797. }
  9798. static int intel_gen2_queue_flip(struct drm_device *dev,
  9799. struct drm_crtc *crtc,
  9800. struct drm_framebuffer *fb,
  9801. struct drm_i915_gem_object *obj,
  9802. struct drm_i915_gem_request *req,
  9803. uint32_t flags)
  9804. {
  9805. struct intel_ring *ring = req->ring;
  9806. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9807. u32 flip_mask;
  9808. int ret;
  9809. ret = intel_ring_begin(req, 6);
  9810. if (ret)
  9811. return ret;
  9812. /* Can't queue multiple flips, so wait for the previous
  9813. * one to finish before executing the next.
  9814. */
  9815. if (intel_crtc->plane)
  9816. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9817. else
  9818. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9819. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  9820. intel_ring_emit(ring, MI_NOOP);
  9821. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9822. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9823. intel_ring_emit(ring, fb->pitches[0]);
  9824. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9825. intel_ring_emit(ring, 0); /* aux display base address, unused */
  9826. return 0;
  9827. }
  9828. static int intel_gen3_queue_flip(struct drm_device *dev,
  9829. struct drm_crtc *crtc,
  9830. struct drm_framebuffer *fb,
  9831. struct drm_i915_gem_object *obj,
  9832. struct drm_i915_gem_request *req,
  9833. uint32_t flags)
  9834. {
  9835. struct intel_ring *ring = req->ring;
  9836. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9837. u32 flip_mask;
  9838. int ret;
  9839. ret = intel_ring_begin(req, 6);
  9840. if (ret)
  9841. return ret;
  9842. if (intel_crtc->plane)
  9843. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9844. else
  9845. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9846. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  9847. intel_ring_emit(ring, MI_NOOP);
  9848. intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
  9849. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9850. intel_ring_emit(ring, fb->pitches[0]);
  9851. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9852. intel_ring_emit(ring, MI_NOOP);
  9853. return 0;
  9854. }
  9855. static int intel_gen4_queue_flip(struct drm_device *dev,
  9856. struct drm_crtc *crtc,
  9857. struct drm_framebuffer *fb,
  9858. struct drm_i915_gem_object *obj,
  9859. struct drm_i915_gem_request *req,
  9860. uint32_t flags)
  9861. {
  9862. struct intel_ring *ring = req->ring;
  9863. struct drm_i915_private *dev_priv = to_i915(dev);
  9864. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9865. uint32_t pf, pipesrc;
  9866. int ret;
  9867. ret = intel_ring_begin(req, 4);
  9868. if (ret)
  9869. return ret;
  9870. /* i965+ uses the linear or tiled offsets from the
  9871. * Display Registers (which do not change across a page-flip)
  9872. * so we need only reprogram the base address.
  9873. */
  9874. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9875. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9876. intel_ring_emit(ring, fb->pitches[0]);
  9877. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
  9878. intel_fb_modifier_to_tiling(fb->modifier[0]));
  9879. /* XXX Enabling the panel-fitter across page-flip is so far
  9880. * untested on non-native modes, so ignore it for now.
  9881. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
  9882. */
  9883. pf = 0;
  9884. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9885. intel_ring_emit(ring, pf | pipesrc);
  9886. return 0;
  9887. }
  9888. static int intel_gen6_queue_flip(struct drm_device *dev,
  9889. struct drm_crtc *crtc,
  9890. struct drm_framebuffer *fb,
  9891. struct drm_i915_gem_object *obj,
  9892. struct drm_i915_gem_request *req,
  9893. uint32_t flags)
  9894. {
  9895. struct intel_ring *ring = req->ring;
  9896. struct drm_i915_private *dev_priv = to_i915(dev);
  9897. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9898. uint32_t pf, pipesrc;
  9899. int ret;
  9900. ret = intel_ring_begin(req, 4);
  9901. if (ret)
  9902. return ret;
  9903. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9904. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9905. intel_ring_emit(ring, fb->pitches[0] |
  9906. intel_fb_modifier_to_tiling(fb->modifier[0]));
  9907. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9908. /* Contrary to the suggestions in the documentation,
  9909. * "Enable Panel Fitter" does not seem to be required when page
  9910. * flipping with a non-native mode, and worse causes a normal
  9911. * modeset to fail.
  9912. * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
  9913. */
  9914. pf = 0;
  9915. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9916. intel_ring_emit(ring, pf | pipesrc);
  9917. return 0;
  9918. }
  9919. static int intel_gen7_queue_flip(struct drm_device *dev,
  9920. struct drm_crtc *crtc,
  9921. struct drm_framebuffer *fb,
  9922. struct drm_i915_gem_object *obj,
  9923. struct drm_i915_gem_request *req,
  9924. uint32_t flags)
  9925. {
  9926. struct drm_i915_private *dev_priv = to_i915(dev);
  9927. struct intel_ring *ring = req->ring;
  9928. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9929. uint32_t plane_bit = 0;
  9930. int len, ret;
  9931. switch (intel_crtc->plane) {
  9932. case PLANE_A:
  9933. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
  9934. break;
  9935. case PLANE_B:
  9936. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
  9937. break;
  9938. case PLANE_C:
  9939. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
  9940. break;
  9941. default:
  9942. WARN_ONCE(1, "unknown plane in flip command\n");
  9943. return -ENODEV;
  9944. }
  9945. len = 4;
  9946. if (req->engine->id == RCS) {
  9947. len += 6;
  9948. /*
  9949. * On Gen 8, SRM is now taking an extra dword to accommodate
  9950. * 48bits addresses, and we need a NOOP for the batch size to
  9951. * stay even.
  9952. */
  9953. if (IS_GEN8(dev_priv))
  9954. len += 2;
  9955. }
  9956. /*
  9957. * BSpec MI_DISPLAY_FLIP for IVB:
  9958. * "The full packet must be contained within the same cache line."
  9959. *
  9960. * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
  9961. * cacheline, if we ever start emitting more commands before
  9962. * the MI_DISPLAY_FLIP we may need to first emit everything else,
  9963. * then do the cacheline alignment, and finally emit the
  9964. * MI_DISPLAY_FLIP.
  9965. */
  9966. ret = intel_ring_cacheline_align(req);
  9967. if (ret)
  9968. return ret;
  9969. ret = intel_ring_begin(req, len);
  9970. if (ret)
  9971. return ret;
  9972. /* Unmask the flip-done completion message. Note that the bspec says that
  9973. * we should do this for both the BCS and RCS, and that we must not unmask
  9974. * more than one flip event at any time (or ensure that one flip message
  9975. * can be sent by waiting for flip-done prior to queueing new flips).
  9976. * Experimentation says that BCS works despite DERRMR masking all
  9977. * flip-done completion events and that unmasking all planes at once
  9978. * for the RCS also doesn't appear to drop events. Setting the DERRMR
  9979. * to zero does lead to lockups within MI_DISPLAY_FLIP.
  9980. */
  9981. if (req->engine->id == RCS) {
  9982. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  9983. intel_ring_emit_reg(ring, DERRMR);
  9984. intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
  9985. DERRMR_PIPEB_PRI_FLIP_DONE |
  9986. DERRMR_PIPEC_PRI_FLIP_DONE));
  9987. if (IS_GEN8(dev_priv))
  9988. intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
  9989. MI_SRM_LRM_GLOBAL_GTT);
  9990. else
  9991. intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
  9992. MI_SRM_LRM_GLOBAL_GTT);
  9993. intel_ring_emit_reg(ring, DERRMR);
  9994. intel_ring_emit(ring,
  9995. i915_ggtt_offset(req->engine->scratch) + 256);
  9996. if (IS_GEN8(dev_priv)) {
  9997. intel_ring_emit(ring, 0);
  9998. intel_ring_emit(ring, MI_NOOP);
  9999. }
  10000. }
  10001. intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
  10002. intel_ring_emit(ring, fb->pitches[0] |
  10003. intel_fb_modifier_to_tiling(fb->modifier[0]));
  10004. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  10005. intel_ring_emit(ring, (MI_NOOP));
  10006. return 0;
  10007. }
  10008. static bool use_mmio_flip(struct intel_engine_cs *engine,
  10009. struct drm_i915_gem_object *obj)
  10010. {
  10011. /*
  10012. * This is not being used for older platforms, because
  10013. * non-availability of flip done interrupt forces us to use
  10014. * CS flips. Older platforms derive flip done using some clever
  10015. * tricks involving the flip_pending status bits and vblank irqs.
  10016. * So using MMIO flips there would disrupt this mechanism.
  10017. */
  10018. if (engine == NULL)
  10019. return true;
  10020. if (INTEL_GEN(engine->i915) < 5)
  10021. return false;
  10022. if (i915.use_mmio_flip < 0)
  10023. return false;
  10024. else if (i915.use_mmio_flip > 0)
  10025. return true;
  10026. else if (i915.enable_execlists)
  10027. return true;
  10028. return engine != i915_gem_object_last_write_engine(obj);
  10029. }
  10030. static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
  10031. unsigned int rotation,
  10032. struct intel_flip_work *work)
  10033. {
  10034. struct drm_device *dev = intel_crtc->base.dev;
  10035. struct drm_i915_private *dev_priv = to_i915(dev);
  10036. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  10037. const enum pipe pipe = intel_crtc->pipe;
  10038. u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
  10039. ctl = I915_READ(PLANE_CTL(pipe, 0));
  10040. ctl &= ~PLANE_CTL_TILED_MASK;
  10041. switch (fb->modifier[0]) {
  10042. case DRM_FORMAT_MOD_NONE:
  10043. break;
  10044. case I915_FORMAT_MOD_X_TILED:
  10045. ctl |= PLANE_CTL_TILED_X;
  10046. break;
  10047. case I915_FORMAT_MOD_Y_TILED:
  10048. ctl |= PLANE_CTL_TILED_Y;
  10049. break;
  10050. case I915_FORMAT_MOD_Yf_TILED:
  10051. ctl |= PLANE_CTL_TILED_YF;
  10052. break;
  10053. default:
  10054. MISSING_CASE(fb->modifier[0]);
  10055. }
  10056. /*
  10057. * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
  10058. * PLANE_SURF updates, the update is then guaranteed to be atomic.
  10059. */
  10060. I915_WRITE(PLANE_CTL(pipe, 0), ctl);
  10061. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  10062. I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
  10063. POSTING_READ(PLANE_SURF(pipe, 0));
  10064. }
  10065. static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
  10066. struct intel_flip_work *work)
  10067. {
  10068. struct drm_device *dev = intel_crtc->base.dev;
  10069. struct drm_i915_private *dev_priv = to_i915(dev);
  10070. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  10071. i915_reg_t reg = DSPCNTR(intel_crtc->plane);
  10072. u32 dspcntr;
  10073. dspcntr = I915_READ(reg);
  10074. if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
  10075. dspcntr |= DISPPLANE_TILED;
  10076. else
  10077. dspcntr &= ~DISPPLANE_TILED;
  10078. I915_WRITE(reg, dspcntr);
  10079. I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
  10080. POSTING_READ(DSPSURF(intel_crtc->plane));
  10081. }
  10082. static void intel_mmio_flip_work_func(struct work_struct *w)
  10083. {
  10084. struct intel_flip_work *work =
  10085. container_of(w, struct intel_flip_work, mmio_work);
  10086. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  10087. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  10088. struct intel_framebuffer *intel_fb =
  10089. to_intel_framebuffer(crtc->base.primary->fb);
  10090. struct drm_i915_gem_object *obj = intel_fb->obj;
  10091. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  10092. WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
  10093. intel_pipe_update_start(crtc);
  10094. if (INTEL_GEN(dev_priv) >= 9)
  10095. skl_do_mmio_flip(crtc, work->rotation, work);
  10096. else
  10097. /* use_mmio_flip() retricts MMIO flips to ilk+ */
  10098. ilk_do_mmio_flip(crtc, work);
  10099. intel_pipe_update_end(crtc, work);
  10100. }
  10101. static int intel_default_queue_flip(struct drm_device *dev,
  10102. struct drm_crtc *crtc,
  10103. struct drm_framebuffer *fb,
  10104. struct drm_i915_gem_object *obj,
  10105. struct drm_i915_gem_request *req,
  10106. uint32_t flags)
  10107. {
  10108. return -ENODEV;
  10109. }
  10110. static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
  10111. struct intel_crtc *intel_crtc,
  10112. struct intel_flip_work *work)
  10113. {
  10114. u32 addr, vblank;
  10115. if (!atomic_read(&work->pending))
  10116. return false;
  10117. smp_rmb();
  10118. vblank = intel_crtc_get_vblank_counter(intel_crtc);
  10119. if (work->flip_ready_vblank == 0) {
  10120. if (work->flip_queued_req &&
  10121. !i915_gem_request_completed(work->flip_queued_req))
  10122. return false;
  10123. work->flip_ready_vblank = vblank;
  10124. }
  10125. if (vblank - work->flip_ready_vblank < 3)
  10126. return false;
  10127. /* Potential stall - if we see that the flip has happened,
  10128. * assume a missed interrupt. */
  10129. if (INTEL_GEN(dev_priv) >= 4)
  10130. addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
  10131. else
  10132. addr = I915_READ(DSPADDR(intel_crtc->plane));
  10133. /* There is a potential issue here with a false positive after a flip
  10134. * to the same address. We could address this by checking for a
  10135. * non-incrementing frame counter.
  10136. */
  10137. return addr == work->gtt_offset;
  10138. }
  10139. void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
  10140. {
  10141. struct drm_device *dev = &dev_priv->drm;
  10142. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  10143. struct intel_flip_work *work;
  10144. WARN_ON(!in_interrupt());
  10145. if (crtc == NULL)
  10146. return;
  10147. spin_lock(&dev->event_lock);
  10148. work = crtc->flip_work;
  10149. if (work != NULL && !is_mmio_work(work) &&
  10150. __pageflip_stall_check_cs(dev_priv, crtc, work)) {
  10151. WARN_ONCE(1,
  10152. "Kicking stuck page flip: queued at %d, now %d\n",
  10153. work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
  10154. page_flip_completed(crtc);
  10155. work = NULL;
  10156. }
  10157. if (work != NULL && !is_mmio_work(work) &&
  10158. intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
  10159. intel_queue_rps_boost_for_request(work->flip_queued_req);
  10160. spin_unlock(&dev->event_lock);
  10161. }
  10162. static int intel_crtc_page_flip(struct drm_crtc *crtc,
  10163. struct drm_framebuffer *fb,
  10164. struct drm_pending_vblank_event *event,
  10165. uint32_t page_flip_flags)
  10166. {
  10167. struct drm_device *dev = crtc->dev;
  10168. struct drm_i915_private *dev_priv = to_i915(dev);
  10169. struct drm_framebuffer *old_fb = crtc->primary->fb;
  10170. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  10171. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10172. struct drm_plane *primary = crtc->primary;
  10173. enum pipe pipe = intel_crtc->pipe;
  10174. struct intel_flip_work *work;
  10175. struct intel_engine_cs *engine;
  10176. bool mmio_flip;
  10177. struct drm_i915_gem_request *request;
  10178. struct i915_vma *vma;
  10179. int ret;
  10180. /*
  10181. * drm_mode_page_flip_ioctl() should already catch this, but double
  10182. * check to be safe. In the future we may enable pageflipping from
  10183. * a disabled primary plane.
  10184. */
  10185. if (WARN_ON(intel_fb_obj(old_fb) == NULL))
  10186. return -EBUSY;
  10187. /* Can't change pixel format via MI display flips. */
  10188. if (fb->pixel_format != crtc->primary->fb->pixel_format)
  10189. return -EINVAL;
  10190. /*
  10191. * TILEOFF/LINOFF registers can't be changed via MI display flips.
  10192. * Note that pitch changes could also affect these register.
  10193. */
  10194. if (INTEL_GEN(dev_priv) > 3 &&
  10195. (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
  10196. fb->pitches[0] != crtc->primary->fb->pitches[0]))
  10197. return -EINVAL;
  10198. if (i915_terminally_wedged(&dev_priv->gpu_error))
  10199. goto out_hang;
  10200. work = kzalloc(sizeof(*work), GFP_KERNEL);
  10201. if (work == NULL)
  10202. return -ENOMEM;
  10203. work->event = event;
  10204. work->crtc = crtc;
  10205. work->old_fb = old_fb;
  10206. INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
  10207. ret = drm_crtc_vblank_get(crtc);
  10208. if (ret)
  10209. goto free_work;
  10210. /* We borrow the event spin lock for protecting flip_work */
  10211. spin_lock_irq(&dev->event_lock);
  10212. if (intel_crtc->flip_work) {
  10213. /* Before declaring the flip queue wedged, check if
  10214. * the hardware completed the operation behind our backs.
  10215. */
  10216. if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
  10217. DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
  10218. page_flip_completed(intel_crtc);
  10219. } else {
  10220. DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
  10221. spin_unlock_irq(&dev->event_lock);
  10222. drm_crtc_vblank_put(crtc);
  10223. kfree(work);
  10224. return -EBUSY;
  10225. }
  10226. }
  10227. intel_crtc->flip_work = work;
  10228. spin_unlock_irq(&dev->event_lock);
  10229. if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
  10230. flush_workqueue(dev_priv->wq);
  10231. /* Reference the objects for the scheduled work. */
  10232. drm_framebuffer_reference(work->old_fb);
  10233. crtc->primary->fb = fb;
  10234. update_state_fb(crtc->primary);
  10235. work->pending_flip_obj = i915_gem_object_get(obj);
  10236. ret = i915_mutex_lock_interruptible(dev);
  10237. if (ret)
  10238. goto cleanup;
  10239. intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
  10240. if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
  10241. ret = -EIO;
  10242. goto cleanup;
  10243. }
  10244. atomic_inc(&intel_crtc->unpin_work_count);
  10245. if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  10246. work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
  10247. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  10248. engine = dev_priv->engine[BCS];
  10249. if (fb->modifier[0] != old_fb->modifier[0])
  10250. /* vlv: DISPLAY_FLIP fails to change tiling */
  10251. engine = NULL;
  10252. } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
  10253. engine = dev_priv->engine[BCS];
  10254. } else if (INTEL_GEN(dev_priv) >= 7) {
  10255. engine = i915_gem_object_last_write_engine(obj);
  10256. if (engine == NULL || engine->id != RCS)
  10257. engine = dev_priv->engine[BCS];
  10258. } else {
  10259. engine = dev_priv->engine[RCS];
  10260. }
  10261. mmio_flip = use_mmio_flip(engine, obj);
  10262. vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
  10263. if (IS_ERR(vma)) {
  10264. ret = PTR_ERR(vma);
  10265. goto cleanup_pending;
  10266. }
  10267. work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
  10268. work->gtt_offset += intel_crtc->dspaddr_offset;
  10269. work->rotation = crtc->primary->state->rotation;
  10270. /*
  10271. * There's the potential that the next frame will not be compatible with
  10272. * FBC, so we want to call pre_update() before the actual page flip.
  10273. * The problem is that pre_update() caches some information about the fb
  10274. * object, so we want to do this only after the object is pinned. Let's
  10275. * be on the safe side and do this immediately before scheduling the
  10276. * flip.
  10277. */
  10278. intel_fbc_pre_update(intel_crtc, intel_crtc->config,
  10279. to_intel_plane_state(primary->state));
  10280. if (mmio_flip) {
  10281. INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
  10282. queue_work(system_unbound_wq, &work->mmio_work);
  10283. } else {
  10284. request = i915_gem_request_alloc(engine, engine->last_context);
  10285. if (IS_ERR(request)) {
  10286. ret = PTR_ERR(request);
  10287. goto cleanup_unpin;
  10288. }
  10289. ret = i915_gem_request_await_object(request, obj, false);
  10290. if (ret)
  10291. goto cleanup_request;
  10292. ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
  10293. page_flip_flags);
  10294. if (ret)
  10295. goto cleanup_request;
  10296. intel_mark_page_flip_active(intel_crtc, work);
  10297. work->flip_queued_req = i915_gem_request_get(request);
  10298. i915_add_request_no_flush(request);
  10299. }
  10300. i915_gem_track_fb(intel_fb_obj(old_fb), obj,
  10301. to_intel_plane(primary)->frontbuffer_bit);
  10302. mutex_unlock(&dev->struct_mutex);
  10303. intel_frontbuffer_flip_prepare(to_i915(dev),
  10304. to_intel_plane(primary)->frontbuffer_bit);
  10305. trace_i915_flip_request(intel_crtc->plane, obj);
  10306. return 0;
  10307. cleanup_request:
  10308. i915_add_request_no_flush(request);
  10309. cleanup_unpin:
  10310. intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
  10311. cleanup_pending:
  10312. atomic_dec(&intel_crtc->unpin_work_count);
  10313. mutex_unlock(&dev->struct_mutex);
  10314. cleanup:
  10315. crtc->primary->fb = old_fb;
  10316. update_state_fb(crtc->primary);
  10317. i915_gem_object_put(obj);
  10318. drm_framebuffer_unreference(work->old_fb);
  10319. spin_lock_irq(&dev->event_lock);
  10320. intel_crtc->flip_work = NULL;
  10321. spin_unlock_irq(&dev->event_lock);
  10322. drm_crtc_vblank_put(crtc);
  10323. free_work:
  10324. kfree(work);
  10325. if (ret == -EIO) {
  10326. struct drm_atomic_state *state;
  10327. struct drm_plane_state *plane_state;
  10328. out_hang:
  10329. state = drm_atomic_state_alloc(dev);
  10330. if (!state)
  10331. return -ENOMEM;
  10332. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  10333. retry:
  10334. plane_state = drm_atomic_get_plane_state(state, primary);
  10335. ret = PTR_ERR_OR_ZERO(plane_state);
  10336. if (!ret) {
  10337. drm_atomic_set_fb_for_plane(plane_state, fb);
  10338. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  10339. if (!ret)
  10340. ret = drm_atomic_commit(state);
  10341. }
  10342. if (ret == -EDEADLK) {
  10343. drm_modeset_backoff(state->acquire_ctx);
  10344. drm_atomic_state_clear(state);
  10345. goto retry;
  10346. }
  10347. drm_atomic_state_put(state);
  10348. if (ret == 0 && event) {
  10349. spin_lock_irq(&dev->event_lock);
  10350. drm_crtc_send_vblank_event(crtc, event);
  10351. spin_unlock_irq(&dev->event_lock);
  10352. }
  10353. }
  10354. return ret;
  10355. }
  10356. /**
  10357. * intel_wm_need_update - Check whether watermarks need updating
  10358. * @plane: drm plane
  10359. * @state: new plane state
  10360. *
  10361. * Check current plane state versus the new one to determine whether
  10362. * watermarks need to be recalculated.
  10363. *
  10364. * Returns true or false.
  10365. */
  10366. static bool intel_wm_need_update(struct drm_plane *plane,
  10367. struct drm_plane_state *state)
  10368. {
  10369. struct intel_plane_state *new = to_intel_plane_state(state);
  10370. struct intel_plane_state *cur = to_intel_plane_state(plane->state);
  10371. /* Update watermarks on tiling or size changes. */
  10372. if (new->base.visible != cur->base.visible)
  10373. return true;
  10374. if (!cur->base.fb || !new->base.fb)
  10375. return false;
  10376. if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
  10377. cur->base.rotation != new->base.rotation ||
  10378. drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
  10379. drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
  10380. drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
  10381. drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
  10382. return true;
  10383. return false;
  10384. }
  10385. static bool needs_scaling(struct intel_plane_state *state)
  10386. {
  10387. int src_w = drm_rect_width(&state->base.src) >> 16;
  10388. int src_h = drm_rect_height(&state->base.src) >> 16;
  10389. int dst_w = drm_rect_width(&state->base.dst);
  10390. int dst_h = drm_rect_height(&state->base.dst);
  10391. return (src_w != dst_w || src_h != dst_h);
  10392. }
  10393. int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
  10394. struct drm_plane_state *plane_state)
  10395. {
  10396. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
  10397. struct drm_crtc *crtc = crtc_state->crtc;
  10398. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10399. struct drm_plane *plane = plane_state->plane;
  10400. struct drm_device *dev = crtc->dev;
  10401. struct drm_i915_private *dev_priv = to_i915(dev);
  10402. struct intel_plane_state *old_plane_state =
  10403. to_intel_plane_state(plane->state);
  10404. bool mode_changed = needs_modeset(crtc_state);
  10405. bool was_crtc_enabled = crtc->state->active;
  10406. bool is_crtc_enabled = crtc_state->active;
  10407. bool turn_off, turn_on, visible, was_visible;
  10408. struct drm_framebuffer *fb = plane_state->fb;
  10409. int ret;
  10410. if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
  10411. ret = skl_update_scaler_plane(
  10412. to_intel_crtc_state(crtc_state),
  10413. to_intel_plane_state(plane_state));
  10414. if (ret)
  10415. return ret;
  10416. }
  10417. was_visible = old_plane_state->base.visible;
  10418. visible = to_intel_plane_state(plane_state)->base.visible;
  10419. if (!was_crtc_enabled && WARN_ON(was_visible))
  10420. was_visible = false;
  10421. /*
  10422. * Visibility is calculated as if the crtc was on, but
  10423. * after scaler setup everything depends on it being off
  10424. * when the crtc isn't active.
  10425. *
  10426. * FIXME this is wrong for watermarks. Watermarks should also
  10427. * be computed as if the pipe would be active. Perhaps move
  10428. * per-plane wm computation to the .check_plane() hook, and
  10429. * only combine the results from all planes in the current place?
  10430. */
  10431. if (!is_crtc_enabled)
  10432. to_intel_plane_state(plane_state)->base.visible = visible = false;
  10433. if (!was_visible && !visible)
  10434. return 0;
  10435. if (fb != old_plane_state->base.fb)
  10436. pipe_config->fb_changed = true;
  10437. turn_off = was_visible && (!visible || mode_changed);
  10438. turn_on = visible && (!was_visible || mode_changed);
  10439. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
  10440. intel_crtc->base.base.id,
  10441. intel_crtc->base.name,
  10442. plane->base.id, plane->name,
  10443. fb ? fb->base.id : -1);
  10444. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
  10445. plane->base.id, plane->name,
  10446. was_visible, visible,
  10447. turn_off, turn_on, mode_changed);
  10448. if (turn_on) {
  10449. pipe_config->update_wm_pre = true;
  10450. /* must disable cxsr around plane enable/disable */
  10451. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  10452. pipe_config->disable_cxsr = true;
  10453. } else if (turn_off) {
  10454. pipe_config->update_wm_post = true;
  10455. /* must disable cxsr around plane enable/disable */
  10456. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  10457. pipe_config->disable_cxsr = true;
  10458. } else if (intel_wm_need_update(plane, plane_state)) {
  10459. /* FIXME bollocks */
  10460. pipe_config->update_wm_pre = true;
  10461. pipe_config->update_wm_post = true;
  10462. }
  10463. /* Pre-gen9 platforms need two-step watermark updates */
  10464. if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
  10465. INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
  10466. to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
  10467. if (visible || was_visible)
  10468. pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
  10469. /*
  10470. * WaCxSRDisabledForSpriteScaling:ivb
  10471. *
  10472. * cstate->update_wm was already set above, so this flag will
  10473. * take effect when we commit and program watermarks.
  10474. */
  10475. if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
  10476. needs_scaling(to_intel_plane_state(plane_state)) &&
  10477. !needs_scaling(old_plane_state))
  10478. pipe_config->disable_lp_wm = true;
  10479. return 0;
  10480. }
  10481. static bool encoders_cloneable(const struct intel_encoder *a,
  10482. const struct intel_encoder *b)
  10483. {
  10484. /* masks could be asymmetric, so check both ways */
  10485. return a == b || (a->cloneable & (1 << b->type) &&
  10486. b->cloneable & (1 << a->type));
  10487. }
  10488. static bool check_single_encoder_cloning(struct drm_atomic_state *state,
  10489. struct intel_crtc *crtc,
  10490. struct intel_encoder *encoder)
  10491. {
  10492. struct intel_encoder *source_encoder;
  10493. struct drm_connector *connector;
  10494. struct drm_connector_state *connector_state;
  10495. int i;
  10496. for_each_connector_in_state(state, connector, connector_state, i) {
  10497. if (connector_state->crtc != &crtc->base)
  10498. continue;
  10499. source_encoder =
  10500. to_intel_encoder(connector_state->best_encoder);
  10501. if (!encoders_cloneable(encoder, source_encoder))
  10502. return false;
  10503. }
  10504. return true;
  10505. }
  10506. static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  10507. struct drm_crtc_state *crtc_state)
  10508. {
  10509. struct drm_device *dev = crtc->dev;
  10510. struct drm_i915_private *dev_priv = to_i915(dev);
  10511. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10512. struct intel_crtc_state *pipe_config =
  10513. to_intel_crtc_state(crtc_state);
  10514. struct drm_atomic_state *state = crtc_state->state;
  10515. int ret;
  10516. bool mode_changed = needs_modeset(crtc_state);
  10517. if (mode_changed && !crtc_state->active)
  10518. pipe_config->update_wm_post = true;
  10519. if (mode_changed && crtc_state->enable &&
  10520. dev_priv->display.crtc_compute_clock &&
  10521. !WARN_ON(pipe_config->shared_dpll)) {
  10522. ret = dev_priv->display.crtc_compute_clock(intel_crtc,
  10523. pipe_config);
  10524. if (ret)
  10525. return ret;
  10526. }
  10527. if (crtc_state->color_mgmt_changed) {
  10528. ret = intel_color_check(crtc, crtc_state);
  10529. if (ret)
  10530. return ret;
  10531. /*
  10532. * Changing color management on Intel hardware is
  10533. * handled as part of planes update.
  10534. */
  10535. crtc_state->planes_changed = true;
  10536. }
  10537. ret = 0;
  10538. if (dev_priv->display.compute_pipe_wm) {
  10539. ret = dev_priv->display.compute_pipe_wm(pipe_config);
  10540. if (ret) {
  10541. DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
  10542. return ret;
  10543. }
  10544. }
  10545. if (dev_priv->display.compute_intermediate_wm &&
  10546. !to_intel_atomic_state(state)->skip_intermediate_wm) {
  10547. if (WARN_ON(!dev_priv->display.compute_pipe_wm))
  10548. return 0;
  10549. /*
  10550. * Calculate 'intermediate' watermarks that satisfy both the
  10551. * old state and the new state. We can program these
  10552. * immediately.
  10553. */
  10554. ret = dev_priv->display.compute_intermediate_wm(dev,
  10555. intel_crtc,
  10556. pipe_config);
  10557. if (ret) {
  10558. DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
  10559. return ret;
  10560. }
  10561. } else if (dev_priv->display.compute_intermediate_wm) {
  10562. if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
  10563. pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
  10564. }
  10565. if (INTEL_GEN(dev_priv) >= 9) {
  10566. if (mode_changed)
  10567. ret = skl_update_scaler_crtc(pipe_config);
  10568. if (!ret)
  10569. ret = intel_atomic_setup_scalers(dev, intel_crtc,
  10570. pipe_config);
  10571. }
  10572. return ret;
  10573. }
  10574. static const struct drm_crtc_helper_funcs intel_helper_funcs = {
  10575. .mode_set_base_atomic = intel_pipe_set_base_atomic,
  10576. .atomic_begin = intel_begin_crtc_commit,
  10577. .atomic_flush = intel_finish_crtc_commit,
  10578. .atomic_check = intel_crtc_atomic_check,
  10579. };
  10580. static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
  10581. {
  10582. struct intel_connector *connector;
  10583. for_each_intel_connector(dev, connector) {
  10584. if (connector->base.state->crtc)
  10585. drm_connector_unreference(&connector->base);
  10586. if (connector->base.encoder) {
  10587. connector->base.state->best_encoder =
  10588. connector->base.encoder;
  10589. connector->base.state->crtc =
  10590. connector->base.encoder->crtc;
  10591. drm_connector_reference(&connector->base);
  10592. } else {
  10593. connector->base.state->best_encoder = NULL;
  10594. connector->base.state->crtc = NULL;
  10595. }
  10596. }
  10597. }
  10598. static void
  10599. connected_sink_compute_bpp(struct intel_connector *connector,
  10600. struct intel_crtc_state *pipe_config)
  10601. {
  10602. const struct drm_display_info *info = &connector->base.display_info;
  10603. int bpp = pipe_config->pipe_bpp;
  10604. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
  10605. connector->base.base.id,
  10606. connector->base.name);
  10607. /* Don't use an invalid EDID bpc value */
  10608. if (info->bpc != 0 && info->bpc * 3 < bpp) {
  10609. DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
  10610. bpp, info->bpc * 3);
  10611. pipe_config->pipe_bpp = info->bpc * 3;
  10612. }
  10613. /* Clamp bpp to 8 on screens without EDID 1.4 */
  10614. if (info->bpc == 0 && bpp > 24) {
  10615. DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
  10616. bpp);
  10617. pipe_config->pipe_bpp = 24;
  10618. }
  10619. }
  10620. static int
  10621. compute_baseline_pipe_bpp(struct intel_crtc *crtc,
  10622. struct intel_crtc_state *pipe_config)
  10623. {
  10624. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  10625. struct drm_atomic_state *state;
  10626. struct drm_connector *connector;
  10627. struct drm_connector_state *connector_state;
  10628. int bpp, i;
  10629. if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  10630. IS_CHERRYVIEW(dev_priv)))
  10631. bpp = 10*3;
  10632. else if (INTEL_GEN(dev_priv) >= 5)
  10633. bpp = 12*3;
  10634. else
  10635. bpp = 8*3;
  10636. pipe_config->pipe_bpp = bpp;
  10637. state = pipe_config->base.state;
  10638. /* Clamp display bpp to EDID value */
  10639. for_each_connector_in_state(state, connector, connector_state, i) {
  10640. if (connector_state->crtc != &crtc->base)
  10641. continue;
  10642. connected_sink_compute_bpp(to_intel_connector(connector),
  10643. pipe_config);
  10644. }
  10645. return bpp;
  10646. }
  10647. static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
  10648. {
  10649. DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
  10650. "type: 0x%x flags: 0x%x\n",
  10651. mode->crtc_clock,
  10652. mode->crtc_hdisplay, mode->crtc_hsync_start,
  10653. mode->crtc_hsync_end, mode->crtc_htotal,
  10654. mode->crtc_vdisplay, mode->crtc_vsync_start,
  10655. mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
  10656. }
  10657. static inline void
  10658. intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
  10659. unsigned int lane_count, struct intel_link_m_n *m_n)
  10660. {
  10661. DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
  10662. id, lane_count,
  10663. m_n->gmch_m, m_n->gmch_n,
  10664. m_n->link_m, m_n->link_n, m_n->tu);
  10665. }
  10666. static void intel_dump_pipe_config(struct intel_crtc *crtc,
  10667. struct intel_crtc_state *pipe_config,
  10668. const char *context)
  10669. {
  10670. struct drm_device *dev = crtc->base.dev;
  10671. struct drm_i915_private *dev_priv = to_i915(dev);
  10672. struct drm_plane *plane;
  10673. struct intel_plane *intel_plane;
  10674. struct intel_plane_state *state;
  10675. struct drm_framebuffer *fb;
  10676. DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
  10677. crtc->base.base.id, crtc->base.name, context);
  10678. DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
  10679. transcoder_name(pipe_config->cpu_transcoder),
  10680. pipe_config->pipe_bpp, pipe_config->dither);
  10681. if (pipe_config->has_pch_encoder)
  10682. intel_dump_m_n_config(pipe_config, "fdi",
  10683. pipe_config->fdi_lanes,
  10684. &pipe_config->fdi_m_n);
  10685. if (intel_crtc_has_dp_encoder(pipe_config)) {
  10686. intel_dump_m_n_config(pipe_config, "dp m_n",
  10687. pipe_config->lane_count, &pipe_config->dp_m_n);
  10688. if (pipe_config->has_drrs)
  10689. intel_dump_m_n_config(pipe_config, "dp m2_n2",
  10690. pipe_config->lane_count,
  10691. &pipe_config->dp_m2_n2);
  10692. }
  10693. DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
  10694. pipe_config->has_audio, pipe_config->has_infoframe);
  10695. DRM_DEBUG_KMS("requested mode:\n");
  10696. drm_mode_debug_printmodeline(&pipe_config->base.mode);
  10697. DRM_DEBUG_KMS("adjusted mode:\n");
  10698. drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
  10699. intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
  10700. DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
  10701. pipe_config->port_clock,
  10702. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  10703. if (INTEL_GEN(dev_priv) >= 9)
  10704. DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
  10705. crtc->num_scalers,
  10706. pipe_config->scaler_state.scaler_users,
  10707. pipe_config->scaler_state.scaler_id);
  10708. if (HAS_GMCH_DISPLAY(dev_priv))
  10709. DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
  10710. pipe_config->gmch_pfit.control,
  10711. pipe_config->gmch_pfit.pgm_ratios,
  10712. pipe_config->gmch_pfit.lvds_border_bits);
  10713. else
  10714. DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
  10715. pipe_config->pch_pfit.pos,
  10716. pipe_config->pch_pfit.size,
  10717. enableddisabled(pipe_config->pch_pfit.enabled));
  10718. DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
  10719. pipe_config->ips_enabled, pipe_config->double_wide);
  10720. if (IS_BROXTON(dev_priv)) {
  10721. DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
  10722. "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
  10723. "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
  10724. pipe_config->dpll_hw_state.ebb0,
  10725. pipe_config->dpll_hw_state.ebb4,
  10726. pipe_config->dpll_hw_state.pll0,
  10727. pipe_config->dpll_hw_state.pll1,
  10728. pipe_config->dpll_hw_state.pll2,
  10729. pipe_config->dpll_hw_state.pll3,
  10730. pipe_config->dpll_hw_state.pll6,
  10731. pipe_config->dpll_hw_state.pll8,
  10732. pipe_config->dpll_hw_state.pll9,
  10733. pipe_config->dpll_hw_state.pll10,
  10734. pipe_config->dpll_hw_state.pcsdw12);
  10735. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  10736. DRM_DEBUG_KMS("dpll_hw_state: "
  10737. "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
  10738. pipe_config->dpll_hw_state.ctrl1,
  10739. pipe_config->dpll_hw_state.cfgcr1,
  10740. pipe_config->dpll_hw_state.cfgcr2);
  10741. } else if (HAS_DDI(dev_priv)) {
  10742. DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
  10743. pipe_config->dpll_hw_state.wrpll,
  10744. pipe_config->dpll_hw_state.spll);
  10745. } else {
  10746. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  10747. "fp0: 0x%x, fp1: 0x%x\n",
  10748. pipe_config->dpll_hw_state.dpll,
  10749. pipe_config->dpll_hw_state.dpll_md,
  10750. pipe_config->dpll_hw_state.fp0,
  10751. pipe_config->dpll_hw_state.fp1);
  10752. }
  10753. DRM_DEBUG_KMS("planes on this crtc\n");
  10754. list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
  10755. struct drm_format_name_buf format_name;
  10756. intel_plane = to_intel_plane(plane);
  10757. if (intel_plane->pipe != crtc->pipe)
  10758. continue;
  10759. state = to_intel_plane_state(plane->state);
  10760. fb = state->base.fb;
  10761. if (!fb) {
  10762. DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
  10763. plane->base.id, plane->name, state->scaler_id);
  10764. continue;
  10765. }
  10766. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
  10767. plane->base.id, plane->name,
  10768. fb->base.id, fb->width, fb->height,
  10769. drm_get_format_name(fb->pixel_format, &format_name));
  10770. if (INTEL_GEN(dev_priv) >= 9)
  10771. DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
  10772. state->scaler_id,
  10773. state->base.src.x1 >> 16,
  10774. state->base.src.y1 >> 16,
  10775. drm_rect_width(&state->base.src) >> 16,
  10776. drm_rect_height(&state->base.src) >> 16,
  10777. state->base.dst.x1, state->base.dst.y1,
  10778. drm_rect_width(&state->base.dst),
  10779. drm_rect_height(&state->base.dst));
  10780. }
  10781. }
  10782. static bool check_digital_port_conflicts(struct drm_atomic_state *state)
  10783. {
  10784. struct drm_device *dev = state->dev;
  10785. struct drm_connector *connector;
  10786. unsigned int used_ports = 0;
  10787. unsigned int used_mst_ports = 0;
  10788. /*
  10789. * Walk the connector list instead of the encoder
  10790. * list to detect the problem on ddi platforms
  10791. * where there's just one encoder per digital port.
  10792. */
  10793. drm_for_each_connector(connector, dev) {
  10794. struct drm_connector_state *connector_state;
  10795. struct intel_encoder *encoder;
  10796. connector_state = drm_atomic_get_existing_connector_state(state, connector);
  10797. if (!connector_state)
  10798. connector_state = connector->state;
  10799. if (!connector_state->best_encoder)
  10800. continue;
  10801. encoder = to_intel_encoder(connector_state->best_encoder);
  10802. WARN_ON(!connector_state->crtc);
  10803. switch (encoder->type) {
  10804. unsigned int port_mask;
  10805. case INTEL_OUTPUT_UNKNOWN:
  10806. if (WARN_ON(!HAS_DDI(to_i915(dev))))
  10807. break;
  10808. case INTEL_OUTPUT_DP:
  10809. case INTEL_OUTPUT_HDMI:
  10810. case INTEL_OUTPUT_EDP:
  10811. port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
  10812. /* the same port mustn't appear more than once */
  10813. if (used_ports & port_mask)
  10814. return false;
  10815. used_ports |= port_mask;
  10816. break;
  10817. case INTEL_OUTPUT_DP_MST:
  10818. used_mst_ports |=
  10819. 1 << enc_to_mst(&encoder->base)->primary->port;
  10820. break;
  10821. default:
  10822. break;
  10823. }
  10824. }
  10825. /* can't mix MST and SST/HDMI on the same port */
  10826. if (used_ports & used_mst_ports)
  10827. return false;
  10828. return true;
  10829. }
  10830. static void
  10831. clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  10832. {
  10833. struct drm_crtc_state tmp_state;
  10834. struct intel_crtc_scaler_state scaler_state;
  10835. struct intel_dpll_hw_state dpll_hw_state;
  10836. struct intel_shared_dpll *shared_dpll;
  10837. bool force_thru;
  10838. /* FIXME: before the switch to atomic started, a new pipe_config was
  10839. * kzalloc'd. Code that depends on any field being zero should be
  10840. * fixed, so that the crtc_state can be safely duplicated. For now,
  10841. * only fields that are know to not cause problems are preserved. */
  10842. tmp_state = crtc_state->base;
  10843. scaler_state = crtc_state->scaler_state;
  10844. shared_dpll = crtc_state->shared_dpll;
  10845. dpll_hw_state = crtc_state->dpll_hw_state;
  10846. force_thru = crtc_state->pch_pfit.force_thru;
  10847. memset(crtc_state, 0, sizeof *crtc_state);
  10848. crtc_state->base = tmp_state;
  10849. crtc_state->scaler_state = scaler_state;
  10850. crtc_state->shared_dpll = shared_dpll;
  10851. crtc_state->dpll_hw_state = dpll_hw_state;
  10852. crtc_state->pch_pfit.force_thru = force_thru;
  10853. }
  10854. static int
  10855. intel_modeset_pipe_config(struct drm_crtc *crtc,
  10856. struct intel_crtc_state *pipe_config)
  10857. {
  10858. struct drm_atomic_state *state = pipe_config->base.state;
  10859. struct intel_encoder *encoder;
  10860. struct drm_connector *connector;
  10861. struct drm_connector_state *connector_state;
  10862. int base_bpp, ret = -EINVAL;
  10863. int i;
  10864. bool retry = true;
  10865. clear_intel_crtc_state(pipe_config);
  10866. pipe_config->cpu_transcoder =
  10867. (enum transcoder) to_intel_crtc(crtc)->pipe;
  10868. /*
  10869. * Sanitize sync polarity flags based on requested ones. If neither
  10870. * positive or negative polarity is requested, treat this as meaning
  10871. * negative polarity.
  10872. */
  10873. if (!(pipe_config->base.adjusted_mode.flags &
  10874. (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
  10875. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
  10876. if (!(pipe_config->base.adjusted_mode.flags &
  10877. (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
  10878. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
  10879. base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
  10880. pipe_config);
  10881. if (base_bpp < 0)
  10882. goto fail;
  10883. /*
  10884. * Determine the real pipe dimensions. Note that stereo modes can
  10885. * increase the actual pipe size due to the frame doubling and
  10886. * insertion of additional space for blanks between the frame. This
  10887. * is stored in the crtc timings. We use the requested mode to do this
  10888. * computation to clearly distinguish it from the adjusted mode, which
  10889. * can be changed by the connectors in the below retry loop.
  10890. */
  10891. drm_crtc_get_hv_timing(&pipe_config->base.mode,
  10892. &pipe_config->pipe_src_w,
  10893. &pipe_config->pipe_src_h);
  10894. for_each_connector_in_state(state, connector, connector_state, i) {
  10895. if (connector_state->crtc != crtc)
  10896. continue;
  10897. encoder = to_intel_encoder(connector_state->best_encoder);
  10898. if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
  10899. DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
  10900. goto fail;
  10901. }
  10902. /*
  10903. * Determine output_types before calling the .compute_config()
  10904. * hooks so that the hooks can use this information safely.
  10905. */
  10906. pipe_config->output_types |= 1 << encoder->type;
  10907. }
  10908. encoder_retry:
  10909. /* Ensure the port clock defaults are reset when retrying. */
  10910. pipe_config->port_clock = 0;
  10911. pipe_config->pixel_multiplier = 1;
  10912. /* Fill in default crtc timings, allow encoders to overwrite them. */
  10913. drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
  10914. CRTC_STEREO_DOUBLE);
  10915. /* Pass our mode to the connectors and the CRTC to give them a chance to
  10916. * adjust it according to limitations or connector properties, and also
  10917. * a chance to reject the mode entirely.
  10918. */
  10919. for_each_connector_in_state(state, connector, connector_state, i) {
  10920. if (connector_state->crtc != crtc)
  10921. continue;
  10922. encoder = to_intel_encoder(connector_state->best_encoder);
  10923. if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
  10924. DRM_DEBUG_KMS("Encoder config failure\n");
  10925. goto fail;
  10926. }
  10927. }
  10928. /* Set default port clock if not overwritten by the encoder. Needs to be
  10929. * done afterwards in case the encoder adjusts the mode. */
  10930. if (!pipe_config->port_clock)
  10931. pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
  10932. * pipe_config->pixel_multiplier;
  10933. ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
  10934. if (ret < 0) {
  10935. DRM_DEBUG_KMS("CRTC fixup failed\n");
  10936. goto fail;
  10937. }
  10938. if (ret == RETRY) {
  10939. if (WARN(!retry, "loop in pipe configuration computation\n")) {
  10940. ret = -EINVAL;
  10941. goto fail;
  10942. }
  10943. DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
  10944. retry = false;
  10945. goto encoder_retry;
  10946. }
  10947. /* Dithering seems to not pass-through bits correctly when it should, so
  10948. * only enable it on 6bpc panels. */
  10949. pipe_config->dither = pipe_config->pipe_bpp == 6*3;
  10950. DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
  10951. base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  10952. fail:
  10953. return ret;
  10954. }
  10955. static void
  10956. intel_modeset_update_crtc_state(struct drm_atomic_state *state)
  10957. {
  10958. struct drm_crtc *crtc;
  10959. struct drm_crtc_state *crtc_state;
  10960. int i;
  10961. /* Double check state. */
  10962. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  10963. to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
  10964. /* Update hwmode for vblank functions */
  10965. if (crtc->state->active)
  10966. crtc->hwmode = crtc->state->adjusted_mode;
  10967. else
  10968. crtc->hwmode.crtc_clock = 0;
  10969. /*
  10970. * Update legacy state to satisfy fbc code. This can
  10971. * be removed when fbc uses the atomic state.
  10972. */
  10973. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  10974. struct drm_plane_state *plane_state = crtc->primary->state;
  10975. crtc->primary->fb = plane_state->fb;
  10976. crtc->x = plane_state->src_x >> 16;
  10977. crtc->y = plane_state->src_y >> 16;
  10978. }
  10979. }
  10980. }
  10981. static bool intel_fuzzy_clock_check(int clock1, int clock2)
  10982. {
  10983. int diff;
  10984. if (clock1 == clock2)
  10985. return true;
  10986. if (!clock1 || !clock2)
  10987. return false;
  10988. diff = abs(clock1 - clock2);
  10989. if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
  10990. return true;
  10991. return false;
  10992. }
  10993. static bool
  10994. intel_compare_m_n(unsigned int m, unsigned int n,
  10995. unsigned int m2, unsigned int n2,
  10996. bool exact)
  10997. {
  10998. if (m == m2 && n == n2)
  10999. return true;
  11000. if (exact || !m || !n || !m2 || !n2)
  11001. return false;
  11002. BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
  11003. if (n > n2) {
  11004. while (n > n2) {
  11005. m2 <<= 1;
  11006. n2 <<= 1;
  11007. }
  11008. } else if (n < n2) {
  11009. while (n < n2) {
  11010. m <<= 1;
  11011. n <<= 1;
  11012. }
  11013. }
  11014. if (n != n2)
  11015. return false;
  11016. return intel_fuzzy_clock_check(m, m2);
  11017. }
  11018. static bool
  11019. intel_compare_link_m_n(const struct intel_link_m_n *m_n,
  11020. struct intel_link_m_n *m2_n2,
  11021. bool adjust)
  11022. {
  11023. if (m_n->tu == m2_n2->tu &&
  11024. intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
  11025. m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
  11026. intel_compare_m_n(m_n->link_m, m_n->link_n,
  11027. m2_n2->link_m, m2_n2->link_n, !adjust)) {
  11028. if (adjust)
  11029. *m2_n2 = *m_n;
  11030. return true;
  11031. }
  11032. return false;
  11033. }
  11034. static bool
  11035. intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  11036. struct intel_crtc_state *current_config,
  11037. struct intel_crtc_state *pipe_config,
  11038. bool adjust)
  11039. {
  11040. bool ret = true;
  11041. #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
  11042. do { \
  11043. if (!adjust) \
  11044. DRM_ERROR(fmt, ##__VA_ARGS__); \
  11045. else \
  11046. DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
  11047. } while (0)
  11048. #define PIPE_CONF_CHECK_X(name) \
  11049. if (current_config->name != pipe_config->name) { \
  11050. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11051. "(expected 0x%08x, found 0x%08x)\n", \
  11052. current_config->name, \
  11053. pipe_config->name); \
  11054. ret = false; \
  11055. }
  11056. #define PIPE_CONF_CHECK_I(name) \
  11057. if (current_config->name != pipe_config->name) { \
  11058. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11059. "(expected %i, found %i)\n", \
  11060. current_config->name, \
  11061. pipe_config->name); \
  11062. ret = false; \
  11063. }
  11064. #define PIPE_CONF_CHECK_P(name) \
  11065. if (current_config->name != pipe_config->name) { \
  11066. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11067. "(expected %p, found %p)\n", \
  11068. current_config->name, \
  11069. pipe_config->name); \
  11070. ret = false; \
  11071. }
  11072. #define PIPE_CONF_CHECK_M_N(name) \
  11073. if (!intel_compare_link_m_n(&current_config->name, \
  11074. &pipe_config->name,\
  11075. adjust)) { \
  11076. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11077. "(expected tu %i gmch %i/%i link %i/%i, " \
  11078. "found tu %i, gmch %i/%i link %i/%i)\n", \
  11079. current_config->name.tu, \
  11080. current_config->name.gmch_m, \
  11081. current_config->name.gmch_n, \
  11082. current_config->name.link_m, \
  11083. current_config->name.link_n, \
  11084. pipe_config->name.tu, \
  11085. pipe_config->name.gmch_m, \
  11086. pipe_config->name.gmch_n, \
  11087. pipe_config->name.link_m, \
  11088. pipe_config->name.link_n); \
  11089. ret = false; \
  11090. }
  11091. /* This is required for BDW+ where there is only one set of registers for
  11092. * switching between high and low RR.
  11093. * This macro can be used whenever a comparison has to be made between one
  11094. * hw state and multiple sw state variables.
  11095. */
  11096. #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
  11097. if (!intel_compare_link_m_n(&current_config->name, \
  11098. &pipe_config->name, adjust) && \
  11099. !intel_compare_link_m_n(&current_config->alt_name, \
  11100. &pipe_config->name, adjust)) { \
  11101. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11102. "(expected tu %i gmch %i/%i link %i/%i, " \
  11103. "or tu %i gmch %i/%i link %i/%i, " \
  11104. "found tu %i, gmch %i/%i link %i/%i)\n", \
  11105. current_config->name.tu, \
  11106. current_config->name.gmch_m, \
  11107. current_config->name.gmch_n, \
  11108. current_config->name.link_m, \
  11109. current_config->name.link_n, \
  11110. current_config->alt_name.tu, \
  11111. current_config->alt_name.gmch_m, \
  11112. current_config->alt_name.gmch_n, \
  11113. current_config->alt_name.link_m, \
  11114. current_config->alt_name.link_n, \
  11115. pipe_config->name.tu, \
  11116. pipe_config->name.gmch_m, \
  11117. pipe_config->name.gmch_n, \
  11118. pipe_config->name.link_m, \
  11119. pipe_config->name.link_n); \
  11120. ret = false; \
  11121. }
  11122. #define PIPE_CONF_CHECK_FLAGS(name, mask) \
  11123. if ((current_config->name ^ pipe_config->name) & (mask)) { \
  11124. INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
  11125. "(expected %i, found %i)\n", \
  11126. current_config->name & (mask), \
  11127. pipe_config->name & (mask)); \
  11128. ret = false; \
  11129. }
  11130. #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
  11131. if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
  11132. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11133. "(expected %i, found %i)\n", \
  11134. current_config->name, \
  11135. pipe_config->name); \
  11136. ret = false; \
  11137. }
  11138. #define PIPE_CONF_QUIRK(quirk) \
  11139. ((current_config->quirks | pipe_config->quirks) & (quirk))
  11140. PIPE_CONF_CHECK_I(cpu_transcoder);
  11141. PIPE_CONF_CHECK_I(has_pch_encoder);
  11142. PIPE_CONF_CHECK_I(fdi_lanes);
  11143. PIPE_CONF_CHECK_M_N(fdi_m_n);
  11144. PIPE_CONF_CHECK_I(lane_count);
  11145. PIPE_CONF_CHECK_X(lane_lat_optim_mask);
  11146. if (INTEL_GEN(dev_priv) < 8) {
  11147. PIPE_CONF_CHECK_M_N(dp_m_n);
  11148. if (current_config->has_drrs)
  11149. PIPE_CONF_CHECK_M_N(dp_m2_n2);
  11150. } else
  11151. PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
  11152. PIPE_CONF_CHECK_X(output_types);
  11153. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
  11154. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
  11155. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
  11156. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
  11157. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
  11158. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
  11159. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
  11160. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
  11161. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
  11162. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
  11163. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
  11164. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
  11165. PIPE_CONF_CHECK_I(pixel_multiplier);
  11166. PIPE_CONF_CHECK_I(has_hdmi_sink);
  11167. if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
  11168. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  11169. PIPE_CONF_CHECK_I(limited_color_range);
  11170. PIPE_CONF_CHECK_I(has_infoframe);
  11171. PIPE_CONF_CHECK_I(has_audio);
  11172. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11173. DRM_MODE_FLAG_INTERLACE);
  11174. if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
  11175. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11176. DRM_MODE_FLAG_PHSYNC);
  11177. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11178. DRM_MODE_FLAG_NHSYNC);
  11179. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11180. DRM_MODE_FLAG_PVSYNC);
  11181. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11182. DRM_MODE_FLAG_NVSYNC);
  11183. }
  11184. PIPE_CONF_CHECK_X(gmch_pfit.control);
  11185. /* pfit ratios are autocomputed by the hw on gen4+ */
  11186. if (INTEL_GEN(dev_priv) < 4)
  11187. PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
  11188. PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
  11189. if (!adjust) {
  11190. PIPE_CONF_CHECK_I(pipe_src_w);
  11191. PIPE_CONF_CHECK_I(pipe_src_h);
  11192. PIPE_CONF_CHECK_I(pch_pfit.enabled);
  11193. if (current_config->pch_pfit.enabled) {
  11194. PIPE_CONF_CHECK_X(pch_pfit.pos);
  11195. PIPE_CONF_CHECK_X(pch_pfit.size);
  11196. }
  11197. PIPE_CONF_CHECK_I(scaler_state.scaler_id);
  11198. }
  11199. /* BDW+ don't expose a synchronous way to read the state */
  11200. if (IS_HASWELL(dev_priv))
  11201. PIPE_CONF_CHECK_I(ips_enabled);
  11202. PIPE_CONF_CHECK_I(double_wide);
  11203. PIPE_CONF_CHECK_P(shared_dpll);
  11204. PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
  11205. PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
  11206. PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
  11207. PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
  11208. PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
  11209. PIPE_CONF_CHECK_X(dpll_hw_state.spll);
  11210. PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
  11211. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
  11212. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
  11213. PIPE_CONF_CHECK_X(dsi_pll.ctrl);
  11214. PIPE_CONF_CHECK_X(dsi_pll.div);
  11215. if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
  11216. PIPE_CONF_CHECK_I(pipe_bpp);
  11217. PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
  11218. PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
  11219. #undef PIPE_CONF_CHECK_X
  11220. #undef PIPE_CONF_CHECK_I
  11221. #undef PIPE_CONF_CHECK_P
  11222. #undef PIPE_CONF_CHECK_FLAGS
  11223. #undef PIPE_CONF_CHECK_CLOCK_FUZZY
  11224. #undef PIPE_CONF_QUIRK
  11225. #undef INTEL_ERR_OR_DBG_KMS
  11226. return ret;
  11227. }
  11228. static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
  11229. const struct intel_crtc_state *pipe_config)
  11230. {
  11231. if (pipe_config->has_pch_encoder) {
  11232. int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  11233. &pipe_config->fdi_m_n);
  11234. int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
  11235. /*
  11236. * FDI already provided one idea for the dotclock.
  11237. * Yell if the encoder disagrees.
  11238. */
  11239. WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
  11240. "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
  11241. fdi_dotclock, dotclock);
  11242. }
  11243. }
  11244. static void verify_wm_state(struct drm_crtc *crtc,
  11245. struct drm_crtc_state *new_state)
  11246. {
  11247. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  11248. struct skl_ddb_allocation hw_ddb, *sw_ddb;
  11249. struct skl_pipe_wm hw_wm, *sw_wm;
  11250. struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
  11251. struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
  11252. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11253. const enum pipe pipe = intel_crtc->pipe;
  11254. int plane, level, max_level = ilk_wm_max_level(dev_priv);
  11255. if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
  11256. return;
  11257. skl_pipe_wm_get_hw_state(crtc, &hw_wm);
  11258. sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
  11259. skl_ddb_get_hw_state(dev_priv, &hw_ddb);
  11260. sw_ddb = &dev_priv->wm.skl_hw.ddb;
  11261. /* planes */
  11262. for_each_universal_plane(dev_priv, pipe, plane) {
  11263. hw_plane_wm = &hw_wm.planes[plane];
  11264. sw_plane_wm = &sw_wm->planes[plane];
  11265. /* Watermarks */
  11266. for (level = 0; level <= max_level; level++) {
  11267. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  11268. &sw_plane_wm->wm[level]))
  11269. continue;
  11270. DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11271. pipe_name(pipe), plane + 1, level,
  11272. sw_plane_wm->wm[level].plane_en,
  11273. sw_plane_wm->wm[level].plane_res_b,
  11274. sw_plane_wm->wm[level].plane_res_l,
  11275. hw_plane_wm->wm[level].plane_en,
  11276. hw_plane_wm->wm[level].plane_res_b,
  11277. hw_plane_wm->wm[level].plane_res_l);
  11278. }
  11279. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  11280. &sw_plane_wm->trans_wm)) {
  11281. DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11282. pipe_name(pipe), plane + 1,
  11283. sw_plane_wm->trans_wm.plane_en,
  11284. sw_plane_wm->trans_wm.plane_res_b,
  11285. sw_plane_wm->trans_wm.plane_res_l,
  11286. hw_plane_wm->trans_wm.plane_en,
  11287. hw_plane_wm->trans_wm.plane_res_b,
  11288. hw_plane_wm->trans_wm.plane_res_l);
  11289. }
  11290. /* DDB */
  11291. hw_ddb_entry = &hw_ddb.plane[pipe][plane];
  11292. sw_ddb_entry = &sw_ddb->plane[pipe][plane];
  11293. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  11294. DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
  11295. pipe_name(pipe), plane + 1,
  11296. sw_ddb_entry->start, sw_ddb_entry->end,
  11297. hw_ddb_entry->start, hw_ddb_entry->end);
  11298. }
  11299. }
  11300. /*
  11301. * cursor
  11302. * If the cursor plane isn't active, we may not have updated it's ddb
  11303. * allocation. In that case since the ddb allocation will be updated
  11304. * once the plane becomes visible, we can skip this check
  11305. */
  11306. if (intel_crtc->cursor_addr) {
  11307. hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
  11308. sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
  11309. /* Watermarks */
  11310. for (level = 0; level <= max_level; level++) {
  11311. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  11312. &sw_plane_wm->wm[level]))
  11313. continue;
  11314. DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11315. pipe_name(pipe), level,
  11316. sw_plane_wm->wm[level].plane_en,
  11317. sw_plane_wm->wm[level].plane_res_b,
  11318. sw_plane_wm->wm[level].plane_res_l,
  11319. hw_plane_wm->wm[level].plane_en,
  11320. hw_plane_wm->wm[level].plane_res_b,
  11321. hw_plane_wm->wm[level].plane_res_l);
  11322. }
  11323. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  11324. &sw_plane_wm->trans_wm)) {
  11325. DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11326. pipe_name(pipe),
  11327. sw_plane_wm->trans_wm.plane_en,
  11328. sw_plane_wm->trans_wm.plane_res_b,
  11329. sw_plane_wm->trans_wm.plane_res_l,
  11330. hw_plane_wm->trans_wm.plane_en,
  11331. hw_plane_wm->trans_wm.plane_res_b,
  11332. hw_plane_wm->trans_wm.plane_res_l);
  11333. }
  11334. /* DDB */
  11335. hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
  11336. sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
  11337. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  11338. DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
  11339. pipe_name(pipe),
  11340. sw_ddb_entry->start, sw_ddb_entry->end,
  11341. hw_ddb_entry->start, hw_ddb_entry->end);
  11342. }
  11343. }
  11344. }
  11345. static void
  11346. verify_connector_state(struct drm_device *dev,
  11347. struct drm_atomic_state *state,
  11348. struct drm_crtc *crtc)
  11349. {
  11350. struct drm_connector *connector;
  11351. struct drm_connector_state *old_conn_state;
  11352. int i;
  11353. for_each_connector_in_state(state, connector, old_conn_state, i) {
  11354. struct drm_encoder *encoder = connector->encoder;
  11355. struct drm_connector_state *state = connector->state;
  11356. if (state->crtc != crtc)
  11357. continue;
  11358. intel_connector_verify_state(to_intel_connector(connector));
  11359. I915_STATE_WARN(state->best_encoder != encoder,
  11360. "connector's atomic encoder doesn't match legacy encoder\n");
  11361. }
  11362. }
  11363. static void
  11364. verify_encoder_state(struct drm_device *dev)
  11365. {
  11366. struct intel_encoder *encoder;
  11367. struct intel_connector *connector;
  11368. for_each_intel_encoder(dev, encoder) {
  11369. bool enabled = false;
  11370. enum pipe pipe;
  11371. DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
  11372. encoder->base.base.id,
  11373. encoder->base.name);
  11374. for_each_intel_connector(dev, connector) {
  11375. if (connector->base.state->best_encoder != &encoder->base)
  11376. continue;
  11377. enabled = true;
  11378. I915_STATE_WARN(connector->base.state->crtc !=
  11379. encoder->base.crtc,
  11380. "connector's crtc doesn't match encoder crtc\n");
  11381. }
  11382. I915_STATE_WARN(!!encoder->base.crtc != enabled,
  11383. "encoder's enabled state mismatch "
  11384. "(expected %i, found %i)\n",
  11385. !!encoder->base.crtc, enabled);
  11386. if (!encoder->base.crtc) {
  11387. bool active;
  11388. active = encoder->get_hw_state(encoder, &pipe);
  11389. I915_STATE_WARN(active,
  11390. "encoder detached but still enabled on pipe %c.\n",
  11391. pipe_name(pipe));
  11392. }
  11393. }
  11394. }
  11395. static void
  11396. verify_crtc_state(struct drm_crtc *crtc,
  11397. struct drm_crtc_state *old_crtc_state,
  11398. struct drm_crtc_state *new_crtc_state)
  11399. {
  11400. struct drm_device *dev = crtc->dev;
  11401. struct drm_i915_private *dev_priv = to_i915(dev);
  11402. struct intel_encoder *encoder;
  11403. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11404. struct intel_crtc_state *pipe_config, *sw_config;
  11405. struct drm_atomic_state *old_state;
  11406. bool active;
  11407. old_state = old_crtc_state->state;
  11408. __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
  11409. pipe_config = to_intel_crtc_state(old_crtc_state);
  11410. memset(pipe_config, 0, sizeof(*pipe_config));
  11411. pipe_config->base.crtc = crtc;
  11412. pipe_config->base.state = old_state;
  11413. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  11414. active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
  11415. /* hw state is inconsistent with the pipe quirk */
  11416. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  11417. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  11418. active = new_crtc_state->active;
  11419. I915_STATE_WARN(new_crtc_state->active != active,
  11420. "crtc active state doesn't match with hw state "
  11421. "(expected %i, found %i)\n", new_crtc_state->active, active);
  11422. I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
  11423. "transitional active state does not match atomic hw state "
  11424. "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
  11425. for_each_encoder_on_crtc(dev, crtc, encoder) {
  11426. enum pipe pipe;
  11427. active = encoder->get_hw_state(encoder, &pipe);
  11428. I915_STATE_WARN(active != new_crtc_state->active,
  11429. "[ENCODER:%i] active %i with crtc active %i\n",
  11430. encoder->base.base.id, active, new_crtc_state->active);
  11431. I915_STATE_WARN(active && intel_crtc->pipe != pipe,
  11432. "Encoder connected to wrong pipe %c\n",
  11433. pipe_name(pipe));
  11434. if (active) {
  11435. pipe_config->output_types |= 1 << encoder->type;
  11436. encoder->get_config(encoder, pipe_config);
  11437. }
  11438. }
  11439. if (!new_crtc_state->active)
  11440. return;
  11441. intel_pipe_config_sanity_check(dev_priv, pipe_config);
  11442. sw_config = to_intel_crtc_state(crtc->state);
  11443. if (!intel_pipe_config_compare(dev_priv, sw_config,
  11444. pipe_config, false)) {
  11445. I915_STATE_WARN(1, "pipe state doesn't match!\n");
  11446. intel_dump_pipe_config(intel_crtc, pipe_config,
  11447. "[hw state]");
  11448. intel_dump_pipe_config(intel_crtc, sw_config,
  11449. "[sw state]");
  11450. }
  11451. }
  11452. static void
  11453. verify_single_dpll_state(struct drm_i915_private *dev_priv,
  11454. struct intel_shared_dpll *pll,
  11455. struct drm_crtc *crtc,
  11456. struct drm_crtc_state *new_state)
  11457. {
  11458. struct intel_dpll_hw_state dpll_hw_state;
  11459. unsigned crtc_mask;
  11460. bool active;
  11461. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  11462. DRM_DEBUG_KMS("%s\n", pll->name);
  11463. active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
  11464. if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
  11465. I915_STATE_WARN(!pll->on && pll->active_mask,
  11466. "pll in active use but not on in sw tracking\n");
  11467. I915_STATE_WARN(pll->on && !pll->active_mask,
  11468. "pll is on but not used by any active crtc\n");
  11469. I915_STATE_WARN(pll->on != active,
  11470. "pll on state mismatch (expected %i, found %i)\n",
  11471. pll->on, active);
  11472. }
  11473. if (!crtc) {
  11474. I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
  11475. "more active pll users than references: %x vs %x\n",
  11476. pll->active_mask, pll->config.crtc_mask);
  11477. return;
  11478. }
  11479. crtc_mask = 1 << drm_crtc_index(crtc);
  11480. if (new_state->active)
  11481. I915_STATE_WARN(!(pll->active_mask & crtc_mask),
  11482. "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
  11483. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  11484. else
  11485. I915_STATE_WARN(pll->active_mask & crtc_mask,
  11486. "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
  11487. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  11488. I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
  11489. "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
  11490. crtc_mask, pll->config.crtc_mask);
  11491. I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
  11492. &dpll_hw_state,
  11493. sizeof(dpll_hw_state)),
  11494. "pll hw state mismatch\n");
  11495. }
  11496. static void
  11497. verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
  11498. struct drm_crtc_state *old_crtc_state,
  11499. struct drm_crtc_state *new_crtc_state)
  11500. {
  11501. struct drm_i915_private *dev_priv = to_i915(dev);
  11502. struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
  11503. struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
  11504. if (new_state->shared_dpll)
  11505. verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
  11506. if (old_state->shared_dpll &&
  11507. old_state->shared_dpll != new_state->shared_dpll) {
  11508. unsigned crtc_mask = 1 << drm_crtc_index(crtc);
  11509. struct intel_shared_dpll *pll = old_state->shared_dpll;
  11510. I915_STATE_WARN(pll->active_mask & crtc_mask,
  11511. "pll active mismatch (didn't expect pipe %c in active mask)\n",
  11512. pipe_name(drm_crtc_index(crtc)));
  11513. I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
  11514. "pll enabled crtcs mismatch (found %x in enabled mask)\n",
  11515. pipe_name(drm_crtc_index(crtc)));
  11516. }
  11517. }
  11518. static void
  11519. intel_modeset_verify_crtc(struct drm_crtc *crtc,
  11520. struct drm_atomic_state *state,
  11521. struct drm_crtc_state *old_state,
  11522. struct drm_crtc_state *new_state)
  11523. {
  11524. if (!needs_modeset(new_state) &&
  11525. !to_intel_crtc_state(new_state)->update_pipe)
  11526. return;
  11527. verify_wm_state(crtc, new_state);
  11528. verify_connector_state(crtc->dev, state, crtc);
  11529. verify_crtc_state(crtc, old_state, new_state);
  11530. verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
  11531. }
  11532. static void
  11533. verify_disabled_dpll_state(struct drm_device *dev)
  11534. {
  11535. struct drm_i915_private *dev_priv = to_i915(dev);
  11536. int i;
  11537. for (i = 0; i < dev_priv->num_shared_dpll; i++)
  11538. verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
  11539. }
  11540. static void
  11541. intel_modeset_verify_disabled(struct drm_device *dev,
  11542. struct drm_atomic_state *state)
  11543. {
  11544. verify_encoder_state(dev);
  11545. verify_connector_state(dev, state, NULL);
  11546. verify_disabled_dpll_state(dev);
  11547. }
  11548. static void update_scanline_offset(struct intel_crtc *crtc)
  11549. {
  11550. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  11551. /*
  11552. * The scanline counter increments at the leading edge of hsync.
  11553. *
  11554. * On most platforms it starts counting from vtotal-1 on the
  11555. * first active line. That means the scanline counter value is
  11556. * always one less than what we would expect. Ie. just after
  11557. * start of vblank, which also occurs at start of hsync (on the
  11558. * last active line), the scanline counter will read vblank_start-1.
  11559. *
  11560. * On gen2 the scanline counter starts counting from 1 instead
  11561. * of vtotal-1, so we have to subtract one (or rather add vtotal-1
  11562. * to keep the value positive), instead of adding one.
  11563. *
  11564. * On HSW+ the behaviour of the scanline counter depends on the output
  11565. * type. For DP ports it behaves like most other platforms, but on HDMI
  11566. * there's an extra 1 line difference. So we need to add two instead of
  11567. * one to the value.
  11568. */
  11569. if (IS_GEN2(dev_priv)) {
  11570. const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
  11571. int vtotal;
  11572. vtotal = adjusted_mode->crtc_vtotal;
  11573. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
  11574. vtotal /= 2;
  11575. crtc->scanline_offset = vtotal - 1;
  11576. } else if (HAS_DDI(dev_priv) &&
  11577. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
  11578. crtc->scanline_offset = 2;
  11579. } else
  11580. crtc->scanline_offset = 1;
  11581. }
  11582. static void intel_modeset_clear_plls(struct drm_atomic_state *state)
  11583. {
  11584. struct drm_device *dev = state->dev;
  11585. struct drm_i915_private *dev_priv = to_i915(dev);
  11586. struct intel_shared_dpll_config *shared_dpll = NULL;
  11587. struct drm_crtc *crtc;
  11588. struct drm_crtc_state *crtc_state;
  11589. int i;
  11590. if (!dev_priv->display.crtc_compute_clock)
  11591. return;
  11592. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11593. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11594. struct intel_shared_dpll *old_dpll =
  11595. to_intel_crtc_state(crtc->state)->shared_dpll;
  11596. if (!needs_modeset(crtc_state))
  11597. continue;
  11598. to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
  11599. if (!old_dpll)
  11600. continue;
  11601. if (!shared_dpll)
  11602. shared_dpll = intel_atomic_get_shared_dpll_state(state);
  11603. intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
  11604. }
  11605. }
  11606. /*
  11607. * This implements the workaround described in the "notes" section of the mode
  11608. * set sequence documentation. When going from no pipes or single pipe to
  11609. * multiple pipes, and planes are enabled after the pipe, we need to wait at
  11610. * least 2 vblanks on the first pipe before enabling planes on the second pipe.
  11611. */
  11612. static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
  11613. {
  11614. struct drm_crtc_state *crtc_state;
  11615. struct intel_crtc *intel_crtc;
  11616. struct drm_crtc *crtc;
  11617. struct intel_crtc_state *first_crtc_state = NULL;
  11618. struct intel_crtc_state *other_crtc_state = NULL;
  11619. enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
  11620. int i;
  11621. /* look at all crtc's that are going to be enabled in during modeset */
  11622. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11623. intel_crtc = to_intel_crtc(crtc);
  11624. if (!crtc_state->active || !needs_modeset(crtc_state))
  11625. continue;
  11626. if (first_crtc_state) {
  11627. other_crtc_state = to_intel_crtc_state(crtc_state);
  11628. break;
  11629. } else {
  11630. first_crtc_state = to_intel_crtc_state(crtc_state);
  11631. first_pipe = intel_crtc->pipe;
  11632. }
  11633. }
  11634. /* No workaround needed? */
  11635. if (!first_crtc_state)
  11636. return 0;
  11637. /* w/a possibly needed, check how many crtc's are already enabled. */
  11638. for_each_intel_crtc(state->dev, intel_crtc) {
  11639. struct intel_crtc_state *pipe_config;
  11640. pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
  11641. if (IS_ERR(pipe_config))
  11642. return PTR_ERR(pipe_config);
  11643. pipe_config->hsw_workaround_pipe = INVALID_PIPE;
  11644. if (!pipe_config->base.active ||
  11645. needs_modeset(&pipe_config->base))
  11646. continue;
  11647. /* 2 or more enabled crtcs means no need for w/a */
  11648. if (enabled_pipe != INVALID_PIPE)
  11649. return 0;
  11650. enabled_pipe = intel_crtc->pipe;
  11651. }
  11652. if (enabled_pipe != INVALID_PIPE)
  11653. first_crtc_state->hsw_workaround_pipe = enabled_pipe;
  11654. else if (other_crtc_state)
  11655. other_crtc_state->hsw_workaround_pipe = first_pipe;
  11656. return 0;
  11657. }
  11658. static int intel_modeset_all_pipes(struct drm_atomic_state *state)
  11659. {
  11660. struct drm_crtc *crtc;
  11661. struct drm_crtc_state *crtc_state;
  11662. int ret = 0;
  11663. /* add all active pipes to the state */
  11664. for_each_crtc(state->dev, crtc) {
  11665. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  11666. if (IS_ERR(crtc_state))
  11667. return PTR_ERR(crtc_state);
  11668. if (!crtc_state->active || needs_modeset(crtc_state))
  11669. continue;
  11670. crtc_state->mode_changed = true;
  11671. ret = drm_atomic_add_affected_connectors(state, crtc);
  11672. if (ret)
  11673. break;
  11674. ret = drm_atomic_add_affected_planes(state, crtc);
  11675. if (ret)
  11676. break;
  11677. }
  11678. return ret;
  11679. }
  11680. static int intel_modeset_checks(struct drm_atomic_state *state)
  11681. {
  11682. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11683. struct drm_i915_private *dev_priv = to_i915(state->dev);
  11684. struct drm_crtc *crtc;
  11685. struct drm_crtc_state *crtc_state;
  11686. int ret = 0, i;
  11687. if (!check_digital_port_conflicts(state)) {
  11688. DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
  11689. return -EINVAL;
  11690. }
  11691. intel_state->modeset = true;
  11692. intel_state->active_crtcs = dev_priv->active_crtcs;
  11693. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11694. if (crtc_state->active)
  11695. intel_state->active_crtcs |= 1 << i;
  11696. else
  11697. intel_state->active_crtcs &= ~(1 << i);
  11698. if (crtc_state->active != crtc->state->active)
  11699. intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
  11700. }
  11701. /*
  11702. * See if the config requires any additional preparation, e.g.
  11703. * to adjust global state with pipes off. We need to do this
  11704. * here so we can get the modeset_pipe updated config for the new
  11705. * mode set on this crtc. For other crtcs we need to use the
  11706. * adjusted_mode bits in the crtc directly.
  11707. */
  11708. if (dev_priv->display.modeset_calc_cdclk) {
  11709. if (!intel_state->cdclk_pll_vco)
  11710. intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
  11711. if (!intel_state->cdclk_pll_vco)
  11712. intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
  11713. ret = dev_priv->display.modeset_calc_cdclk(state);
  11714. if (ret < 0)
  11715. return ret;
  11716. if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
  11717. intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
  11718. ret = intel_modeset_all_pipes(state);
  11719. if (ret < 0)
  11720. return ret;
  11721. DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
  11722. intel_state->cdclk, intel_state->dev_cdclk);
  11723. } else {
  11724. to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
  11725. }
  11726. intel_modeset_clear_plls(state);
  11727. if (IS_HASWELL(dev_priv))
  11728. return haswell_mode_set_planes_workaround(state);
  11729. return 0;
  11730. }
  11731. /*
  11732. * Handle calculation of various watermark data at the end of the atomic check
  11733. * phase. The code here should be run after the per-crtc and per-plane 'check'
  11734. * handlers to ensure that all derived state has been updated.
  11735. */
  11736. static int calc_watermark_data(struct drm_atomic_state *state)
  11737. {
  11738. struct drm_device *dev = state->dev;
  11739. struct drm_i915_private *dev_priv = to_i915(dev);
  11740. /* Is there platform-specific watermark information to calculate? */
  11741. if (dev_priv->display.compute_global_watermarks)
  11742. return dev_priv->display.compute_global_watermarks(state);
  11743. return 0;
  11744. }
  11745. /**
  11746. * intel_atomic_check - validate state object
  11747. * @dev: drm device
  11748. * @state: state to validate
  11749. */
  11750. static int intel_atomic_check(struct drm_device *dev,
  11751. struct drm_atomic_state *state)
  11752. {
  11753. struct drm_i915_private *dev_priv = to_i915(dev);
  11754. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11755. struct drm_crtc *crtc;
  11756. struct drm_crtc_state *crtc_state;
  11757. int ret, i;
  11758. bool any_ms = false;
  11759. ret = drm_atomic_helper_check_modeset(dev, state);
  11760. if (ret)
  11761. return ret;
  11762. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11763. struct intel_crtc_state *pipe_config =
  11764. to_intel_crtc_state(crtc_state);
  11765. /* Catch I915_MODE_FLAG_INHERITED */
  11766. if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
  11767. crtc_state->mode_changed = true;
  11768. if (!needs_modeset(crtc_state))
  11769. continue;
  11770. if (!crtc_state->enable) {
  11771. any_ms = true;
  11772. continue;
  11773. }
  11774. /* FIXME: For only active_changed we shouldn't need to do any
  11775. * state recomputation at all. */
  11776. ret = drm_atomic_add_affected_connectors(state, crtc);
  11777. if (ret)
  11778. return ret;
  11779. ret = intel_modeset_pipe_config(crtc, pipe_config);
  11780. if (ret) {
  11781. intel_dump_pipe_config(to_intel_crtc(crtc),
  11782. pipe_config, "[failed]");
  11783. return ret;
  11784. }
  11785. if (i915.fastboot &&
  11786. intel_pipe_config_compare(dev_priv,
  11787. to_intel_crtc_state(crtc->state),
  11788. pipe_config, true)) {
  11789. crtc_state->mode_changed = false;
  11790. to_intel_crtc_state(crtc_state)->update_pipe = true;
  11791. }
  11792. if (needs_modeset(crtc_state))
  11793. any_ms = true;
  11794. ret = drm_atomic_add_affected_planes(state, crtc);
  11795. if (ret)
  11796. return ret;
  11797. intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
  11798. needs_modeset(crtc_state) ?
  11799. "[modeset]" : "[fastset]");
  11800. }
  11801. if (any_ms) {
  11802. ret = intel_modeset_checks(state);
  11803. if (ret)
  11804. return ret;
  11805. } else {
  11806. intel_state->cdclk = dev_priv->atomic_cdclk_freq;
  11807. }
  11808. ret = drm_atomic_helper_check_planes(dev, state);
  11809. if (ret)
  11810. return ret;
  11811. intel_fbc_choose_crtc(dev_priv, state);
  11812. return calc_watermark_data(state);
  11813. }
  11814. static int intel_atomic_prepare_commit(struct drm_device *dev,
  11815. struct drm_atomic_state *state)
  11816. {
  11817. struct drm_i915_private *dev_priv = to_i915(dev);
  11818. struct drm_crtc_state *crtc_state;
  11819. struct drm_crtc *crtc;
  11820. int i, ret;
  11821. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11822. if (state->legacy_cursor_update)
  11823. continue;
  11824. ret = intel_crtc_wait_for_pending_flips(crtc);
  11825. if (ret)
  11826. return ret;
  11827. if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
  11828. flush_workqueue(dev_priv->wq);
  11829. }
  11830. ret = mutex_lock_interruptible(&dev->struct_mutex);
  11831. if (ret)
  11832. return ret;
  11833. ret = drm_atomic_helper_prepare_planes(dev, state);
  11834. mutex_unlock(&dev->struct_mutex);
  11835. return ret;
  11836. }
  11837. u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
  11838. {
  11839. struct drm_device *dev = crtc->base.dev;
  11840. if (!dev->max_vblank_count)
  11841. return drm_accurate_vblank_count(&crtc->base);
  11842. return dev->driver->get_vblank_counter(dev, crtc->pipe);
  11843. }
  11844. static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
  11845. struct drm_i915_private *dev_priv,
  11846. unsigned crtc_mask)
  11847. {
  11848. unsigned last_vblank_count[I915_MAX_PIPES];
  11849. enum pipe pipe;
  11850. int ret;
  11851. if (!crtc_mask)
  11852. return;
  11853. for_each_pipe(dev_priv, pipe) {
  11854. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  11855. pipe);
  11856. if (!((1 << pipe) & crtc_mask))
  11857. continue;
  11858. ret = drm_crtc_vblank_get(&crtc->base);
  11859. if (WARN_ON(ret != 0)) {
  11860. crtc_mask &= ~(1 << pipe);
  11861. continue;
  11862. }
  11863. last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
  11864. }
  11865. for_each_pipe(dev_priv, pipe) {
  11866. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  11867. pipe);
  11868. long lret;
  11869. if (!((1 << pipe) & crtc_mask))
  11870. continue;
  11871. lret = wait_event_timeout(dev->vblank[pipe].queue,
  11872. last_vblank_count[pipe] !=
  11873. drm_crtc_vblank_count(&crtc->base),
  11874. msecs_to_jiffies(50));
  11875. WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
  11876. drm_crtc_vblank_put(&crtc->base);
  11877. }
  11878. }
  11879. static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
  11880. {
  11881. /* fb updated, need to unpin old fb */
  11882. if (crtc_state->fb_changed)
  11883. return true;
  11884. /* wm changes, need vblank before final wm's */
  11885. if (crtc_state->update_wm_post)
  11886. return true;
  11887. /*
  11888. * cxsr is re-enabled after vblank.
  11889. * This is already handled by crtc_state->update_wm_post,
  11890. * but added for clarity.
  11891. */
  11892. if (crtc_state->disable_cxsr)
  11893. return true;
  11894. return false;
  11895. }
  11896. static void intel_update_crtc(struct drm_crtc *crtc,
  11897. struct drm_atomic_state *state,
  11898. struct drm_crtc_state *old_crtc_state,
  11899. unsigned int *crtc_vblank_mask)
  11900. {
  11901. struct drm_device *dev = crtc->dev;
  11902. struct drm_i915_private *dev_priv = to_i915(dev);
  11903. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11904. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
  11905. bool modeset = needs_modeset(crtc->state);
  11906. if (modeset) {
  11907. update_scanline_offset(intel_crtc);
  11908. dev_priv->display.crtc_enable(pipe_config, state);
  11909. } else {
  11910. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  11911. }
  11912. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  11913. intel_fbc_enable(
  11914. intel_crtc, pipe_config,
  11915. to_intel_plane_state(crtc->primary->state));
  11916. }
  11917. drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
  11918. if (needs_vblank_wait(pipe_config))
  11919. *crtc_vblank_mask |= drm_crtc_mask(crtc);
  11920. }
  11921. static void intel_update_crtcs(struct drm_atomic_state *state,
  11922. unsigned int *crtc_vblank_mask)
  11923. {
  11924. struct drm_crtc *crtc;
  11925. struct drm_crtc_state *old_crtc_state;
  11926. int i;
  11927. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11928. if (!crtc->state->active)
  11929. continue;
  11930. intel_update_crtc(crtc, state, old_crtc_state,
  11931. crtc_vblank_mask);
  11932. }
  11933. }
  11934. static void skl_update_crtcs(struct drm_atomic_state *state,
  11935. unsigned int *crtc_vblank_mask)
  11936. {
  11937. struct drm_i915_private *dev_priv = to_i915(state->dev);
  11938. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11939. struct drm_crtc *crtc;
  11940. struct intel_crtc *intel_crtc;
  11941. struct drm_crtc_state *old_crtc_state;
  11942. struct intel_crtc_state *cstate;
  11943. unsigned int updated = 0;
  11944. bool progress;
  11945. enum pipe pipe;
  11946. int i;
  11947. const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
  11948. for_each_crtc_in_state(state, crtc, old_crtc_state, i)
  11949. /* ignore allocations for crtc's that have been turned off. */
  11950. if (crtc->state->active)
  11951. entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
  11952. /*
  11953. * Whenever the number of active pipes changes, we need to make sure we
  11954. * update the pipes in the right order so that their ddb allocations
  11955. * never overlap with eachother inbetween CRTC updates. Otherwise we'll
  11956. * cause pipe underruns and other bad stuff.
  11957. */
  11958. do {
  11959. progress = false;
  11960. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11961. bool vbl_wait = false;
  11962. unsigned int cmask = drm_crtc_mask(crtc);
  11963. intel_crtc = to_intel_crtc(crtc);
  11964. cstate = to_intel_crtc_state(crtc->state);
  11965. pipe = intel_crtc->pipe;
  11966. if (updated & cmask || !cstate->base.active)
  11967. continue;
  11968. if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
  11969. continue;
  11970. updated |= cmask;
  11971. entries[i] = &cstate->wm.skl.ddb;
  11972. /*
  11973. * If this is an already active pipe, it's DDB changed,
  11974. * and this isn't the last pipe that needs updating
  11975. * then we need to wait for a vblank to pass for the
  11976. * new ddb allocation to take effect.
  11977. */
  11978. if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
  11979. &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
  11980. !crtc->state->active_changed &&
  11981. intel_state->wm_results.dirty_pipes != updated)
  11982. vbl_wait = true;
  11983. intel_update_crtc(crtc, state, old_crtc_state,
  11984. crtc_vblank_mask);
  11985. if (vbl_wait)
  11986. intel_wait_for_vblank(dev_priv, pipe);
  11987. progress = true;
  11988. }
  11989. } while (progress);
  11990. }
  11991. static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  11992. {
  11993. struct drm_device *dev = state->dev;
  11994. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11995. struct drm_i915_private *dev_priv = to_i915(dev);
  11996. struct drm_crtc_state *old_crtc_state;
  11997. struct drm_crtc *crtc;
  11998. struct intel_crtc_state *intel_cstate;
  11999. bool hw_check = intel_state->modeset;
  12000. unsigned long put_domains[I915_MAX_PIPES] = {};
  12001. unsigned crtc_vblank_mask = 0;
  12002. int i;
  12003. drm_atomic_helper_wait_for_dependencies(state);
  12004. if (intel_state->modeset)
  12005. intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
  12006. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12007. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12008. if (needs_modeset(crtc->state) ||
  12009. to_intel_crtc_state(crtc->state)->update_pipe) {
  12010. hw_check = true;
  12011. put_domains[to_intel_crtc(crtc)->pipe] =
  12012. modeset_get_crtc_power_domains(crtc,
  12013. to_intel_crtc_state(crtc->state));
  12014. }
  12015. if (!needs_modeset(crtc->state))
  12016. continue;
  12017. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  12018. if (old_crtc_state->active) {
  12019. intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
  12020. dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
  12021. intel_crtc->active = false;
  12022. intel_fbc_disable(intel_crtc);
  12023. intel_disable_shared_dpll(intel_crtc);
  12024. /*
  12025. * Underruns don't always raise
  12026. * interrupts, so check manually.
  12027. */
  12028. intel_check_cpu_fifo_underruns(dev_priv);
  12029. intel_check_pch_fifo_underruns(dev_priv);
  12030. if (!crtc->state->active) {
  12031. /*
  12032. * Make sure we don't call initial_watermarks
  12033. * for ILK-style watermark updates.
  12034. */
  12035. if (dev_priv->display.atomic_update_watermarks)
  12036. dev_priv->display.initial_watermarks(intel_state,
  12037. to_intel_crtc_state(crtc->state));
  12038. else
  12039. intel_update_watermarks(intel_crtc);
  12040. }
  12041. }
  12042. }
  12043. /* Only after disabling all output pipelines that will be changed can we
  12044. * update the the output configuration. */
  12045. intel_modeset_update_crtc_state(state);
  12046. if (intel_state->modeset) {
  12047. drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
  12048. if (dev_priv->display.modeset_commit_cdclk &&
  12049. (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
  12050. intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
  12051. dev_priv->display.modeset_commit_cdclk(state);
  12052. /*
  12053. * SKL workaround: bspec recommends we disable the SAGV when we
  12054. * have more then one pipe enabled
  12055. */
  12056. if (!intel_can_enable_sagv(state))
  12057. intel_disable_sagv(dev_priv);
  12058. intel_modeset_verify_disabled(dev, state);
  12059. }
  12060. /* Complete the events for pipes that have now been disabled */
  12061. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12062. bool modeset = needs_modeset(crtc->state);
  12063. /* Complete events for now disable pipes here. */
  12064. if (modeset && !crtc->state->active && crtc->state->event) {
  12065. spin_lock_irq(&dev->event_lock);
  12066. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  12067. spin_unlock_irq(&dev->event_lock);
  12068. crtc->state->event = NULL;
  12069. }
  12070. }
  12071. /* Now enable the clocks, plane, pipe, and connectors that we set up. */
  12072. dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
  12073. /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
  12074. * already, but still need the state for the delayed optimization. To
  12075. * fix this:
  12076. * - wrap the optimization/post_plane_update stuff into a per-crtc work.
  12077. * - schedule that vblank worker _before_ calling hw_done
  12078. * - at the start of commit_tail, cancel it _synchrously
  12079. * - switch over to the vblank wait helper in the core after that since
  12080. * we don't need out special handling any more.
  12081. */
  12082. if (!state->legacy_cursor_update)
  12083. intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
  12084. /*
  12085. * Now that the vblank has passed, we can go ahead and program the
  12086. * optimal watermarks on platforms that need two-step watermark
  12087. * programming.
  12088. *
  12089. * TODO: Move this (and other cleanup) to an async worker eventually.
  12090. */
  12091. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12092. intel_cstate = to_intel_crtc_state(crtc->state);
  12093. if (dev_priv->display.optimize_watermarks)
  12094. dev_priv->display.optimize_watermarks(intel_state,
  12095. intel_cstate);
  12096. }
  12097. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12098. intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
  12099. if (put_domains[i])
  12100. modeset_put_power_domains(dev_priv, put_domains[i]);
  12101. intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
  12102. }
  12103. if (intel_state->modeset && intel_can_enable_sagv(state))
  12104. intel_enable_sagv(dev_priv);
  12105. drm_atomic_helper_commit_hw_done(state);
  12106. if (intel_state->modeset)
  12107. intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
  12108. mutex_lock(&dev->struct_mutex);
  12109. drm_atomic_helper_cleanup_planes(dev, state);
  12110. mutex_unlock(&dev->struct_mutex);
  12111. drm_atomic_helper_commit_cleanup_done(state);
  12112. drm_atomic_state_put(state);
  12113. /* As one of the primary mmio accessors, KMS has a high likelihood
  12114. * of triggering bugs in unclaimed access. After we finish
  12115. * modesetting, see if an error has been flagged, and if so
  12116. * enable debugging for the next modeset - and hope we catch
  12117. * the culprit.
  12118. *
  12119. * XXX note that we assume display power is on at this point.
  12120. * This might hold true now but we need to add pm helper to check
  12121. * unclaimed only when the hardware is on, as atomic commits
  12122. * can happen also when the device is completely off.
  12123. */
  12124. intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
  12125. }
  12126. static void intel_atomic_commit_work(struct work_struct *work)
  12127. {
  12128. struct drm_atomic_state *state =
  12129. container_of(work, struct drm_atomic_state, commit_work);
  12130. intel_atomic_commit_tail(state);
  12131. }
  12132. static int __i915_sw_fence_call
  12133. intel_atomic_commit_ready(struct i915_sw_fence *fence,
  12134. enum i915_sw_fence_notify notify)
  12135. {
  12136. struct intel_atomic_state *state =
  12137. container_of(fence, struct intel_atomic_state, commit_ready);
  12138. switch (notify) {
  12139. case FENCE_COMPLETE:
  12140. if (state->base.commit_work.func)
  12141. queue_work(system_unbound_wq, &state->base.commit_work);
  12142. break;
  12143. case FENCE_FREE:
  12144. drm_atomic_state_put(&state->base);
  12145. break;
  12146. }
  12147. return NOTIFY_DONE;
  12148. }
  12149. static void intel_atomic_track_fbs(struct drm_atomic_state *state)
  12150. {
  12151. struct drm_plane_state *old_plane_state;
  12152. struct drm_plane *plane;
  12153. int i;
  12154. for_each_plane_in_state(state, plane, old_plane_state, i)
  12155. i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
  12156. intel_fb_obj(plane->state->fb),
  12157. to_intel_plane(plane)->frontbuffer_bit);
  12158. }
  12159. /**
  12160. * intel_atomic_commit - commit validated state object
  12161. * @dev: DRM device
  12162. * @state: the top-level driver state object
  12163. * @nonblock: nonblocking commit
  12164. *
  12165. * This function commits a top-level state object that has been validated
  12166. * with drm_atomic_helper_check().
  12167. *
  12168. * RETURNS
  12169. * Zero for success or -errno.
  12170. */
  12171. static int intel_atomic_commit(struct drm_device *dev,
  12172. struct drm_atomic_state *state,
  12173. bool nonblock)
  12174. {
  12175. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  12176. struct drm_i915_private *dev_priv = to_i915(dev);
  12177. int ret = 0;
  12178. ret = drm_atomic_helper_setup_commit(state, nonblock);
  12179. if (ret)
  12180. return ret;
  12181. drm_atomic_state_get(state);
  12182. i915_sw_fence_init(&intel_state->commit_ready,
  12183. intel_atomic_commit_ready);
  12184. ret = intel_atomic_prepare_commit(dev, state);
  12185. if (ret) {
  12186. DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
  12187. i915_sw_fence_commit(&intel_state->commit_ready);
  12188. return ret;
  12189. }
  12190. drm_atomic_helper_swap_state(state, true);
  12191. dev_priv->wm.distrust_bios_wm = false;
  12192. intel_shared_dpll_commit(state);
  12193. intel_atomic_track_fbs(state);
  12194. if (intel_state->modeset) {
  12195. memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
  12196. sizeof(intel_state->min_pixclk));
  12197. dev_priv->active_crtcs = intel_state->active_crtcs;
  12198. dev_priv->atomic_cdclk_freq = intel_state->cdclk;
  12199. }
  12200. drm_atomic_state_get(state);
  12201. INIT_WORK(&state->commit_work,
  12202. nonblock ? intel_atomic_commit_work : NULL);
  12203. i915_sw_fence_commit(&intel_state->commit_ready);
  12204. if (!nonblock) {
  12205. i915_sw_fence_wait(&intel_state->commit_ready);
  12206. intel_atomic_commit_tail(state);
  12207. }
  12208. return 0;
  12209. }
  12210. void intel_crtc_restore_mode(struct drm_crtc *crtc)
  12211. {
  12212. struct drm_device *dev = crtc->dev;
  12213. struct drm_atomic_state *state;
  12214. struct drm_crtc_state *crtc_state;
  12215. int ret;
  12216. state = drm_atomic_state_alloc(dev);
  12217. if (!state) {
  12218. DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
  12219. crtc->base.id, crtc->name);
  12220. return;
  12221. }
  12222. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  12223. retry:
  12224. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  12225. ret = PTR_ERR_OR_ZERO(crtc_state);
  12226. if (!ret) {
  12227. if (!crtc_state->active)
  12228. goto out;
  12229. crtc_state->mode_changed = true;
  12230. ret = drm_atomic_commit(state);
  12231. }
  12232. if (ret == -EDEADLK) {
  12233. drm_atomic_state_clear(state);
  12234. drm_modeset_backoff(state->acquire_ctx);
  12235. goto retry;
  12236. }
  12237. out:
  12238. drm_atomic_state_put(state);
  12239. }
  12240. /*
  12241. * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  12242. * drm_atomic_helper_legacy_gamma_set() directly.
  12243. */
  12244. static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
  12245. u16 *red, u16 *green, u16 *blue,
  12246. uint32_t size)
  12247. {
  12248. struct drm_device *dev = crtc->dev;
  12249. struct drm_mode_config *config = &dev->mode_config;
  12250. struct drm_crtc_state *state;
  12251. int ret;
  12252. ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
  12253. if (ret)
  12254. return ret;
  12255. /*
  12256. * Make sure we update the legacy properties so this works when
  12257. * atomic is not enabled.
  12258. */
  12259. state = crtc->state;
  12260. drm_object_property_set_value(&crtc->base,
  12261. config->degamma_lut_property,
  12262. (state->degamma_lut) ?
  12263. state->degamma_lut->base.id : 0);
  12264. drm_object_property_set_value(&crtc->base,
  12265. config->ctm_property,
  12266. (state->ctm) ?
  12267. state->ctm->base.id : 0);
  12268. drm_object_property_set_value(&crtc->base,
  12269. config->gamma_lut_property,
  12270. (state->gamma_lut) ?
  12271. state->gamma_lut->base.id : 0);
  12272. return 0;
  12273. }
  12274. static const struct drm_crtc_funcs intel_crtc_funcs = {
  12275. .gamma_set = intel_atomic_legacy_gamma_set,
  12276. .set_config = drm_atomic_helper_set_config,
  12277. .set_property = drm_atomic_helper_crtc_set_property,
  12278. .destroy = intel_crtc_destroy,
  12279. .page_flip = intel_crtc_page_flip,
  12280. .atomic_duplicate_state = intel_crtc_duplicate_state,
  12281. .atomic_destroy_state = intel_crtc_destroy_state,
  12282. };
  12283. /**
  12284. * intel_prepare_plane_fb - Prepare fb for usage on plane
  12285. * @plane: drm plane to prepare for
  12286. * @fb: framebuffer to prepare for presentation
  12287. *
  12288. * Prepares a framebuffer for usage on a display plane. Generally this
  12289. * involves pinning the underlying object and updating the frontbuffer tracking
  12290. * bits. Some older platforms need special physical address handling for
  12291. * cursor planes.
  12292. *
  12293. * Must be called with struct_mutex held.
  12294. *
  12295. * Returns 0 on success, negative error code on failure.
  12296. */
  12297. int
  12298. intel_prepare_plane_fb(struct drm_plane *plane,
  12299. struct drm_plane_state *new_state)
  12300. {
  12301. struct intel_atomic_state *intel_state =
  12302. to_intel_atomic_state(new_state->state);
  12303. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12304. struct drm_framebuffer *fb = new_state->fb;
  12305. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  12306. struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
  12307. int ret;
  12308. if (!obj && !old_obj)
  12309. return 0;
  12310. if (old_obj) {
  12311. struct drm_crtc_state *crtc_state =
  12312. drm_atomic_get_existing_crtc_state(new_state->state,
  12313. plane->state->crtc);
  12314. /* Big Hammer, we also need to ensure that any pending
  12315. * MI_WAIT_FOR_EVENT inside a user batch buffer on the
  12316. * current scanout is retired before unpinning the old
  12317. * framebuffer. Note that we rely on userspace rendering
  12318. * into the buffer attached to the pipe they are waiting
  12319. * on. If not, userspace generates a GPU hang with IPEHR
  12320. * point to the MI_WAIT_FOR_EVENT.
  12321. *
  12322. * This should only fail upon a hung GPU, in which case we
  12323. * can safely continue.
  12324. */
  12325. if (needs_modeset(crtc_state)) {
  12326. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  12327. old_obj->resv, NULL,
  12328. false, 0,
  12329. GFP_KERNEL);
  12330. if (ret < 0)
  12331. return ret;
  12332. }
  12333. }
  12334. if (new_state->fence) { /* explicit fencing */
  12335. ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
  12336. new_state->fence,
  12337. I915_FENCE_TIMEOUT,
  12338. GFP_KERNEL);
  12339. if (ret < 0)
  12340. return ret;
  12341. }
  12342. if (!obj)
  12343. return 0;
  12344. if (!new_state->fence) { /* implicit fencing */
  12345. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  12346. obj->resv, NULL,
  12347. false, I915_FENCE_TIMEOUT,
  12348. GFP_KERNEL);
  12349. if (ret < 0)
  12350. return ret;
  12351. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  12352. }
  12353. if (plane->type == DRM_PLANE_TYPE_CURSOR &&
  12354. INTEL_INFO(dev_priv)->cursor_needs_physical) {
  12355. int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
  12356. ret = i915_gem_object_attach_phys(obj, align);
  12357. if (ret) {
  12358. DRM_DEBUG_KMS("failed to attach phys object\n");
  12359. return ret;
  12360. }
  12361. } else {
  12362. struct i915_vma *vma;
  12363. vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
  12364. if (IS_ERR(vma)) {
  12365. DRM_DEBUG_KMS("failed to pin object\n");
  12366. return PTR_ERR(vma);
  12367. }
  12368. }
  12369. return 0;
  12370. }
  12371. /**
  12372. * intel_cleanup_plane_fb - Cleans up an fb after plane use
  12373. * @plane: drm plane to clean up for
  12374. * @fb: old framebuffer that was on plane
  12375. *
  12376. * Cleans up a framebuffer that has just been removed from a plane.
  12377. *
  12378. * Must be called with struct_mutex held.
  12379. */
  12380. void
  12381. intel_cleanup_plane_fb(struct drm_plane *plane,
  12382. struct drm_plane_state *old_state)
  12383. {
  12384. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12385. struct intel_plane_state *old_intel_state;
  12386. struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
  12387. struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
  12388. old_intel_state = to_intel_plane_state(old_state);
  12389. if (!obj && !old_obj)
  12390. return;
  12391. if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
  12392. !INTEL_INFO(dev_priv)->cursor_needs_physical))
  12393. intel_unpin_fb_obj(old_state->fb, old_state->rotation);
  12394. }
  12395. int
  12396. skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
  12397. {
  12398. int max_scale;
  12399. int crtc_clock, cdclk;
  12400. if (!intel_crtc || !crtc_state->base.enable)
  12401. return DRM_PLANE_HELPER_NO_SCALING;
  12402. crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
  12403. cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
  12404. if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
  12405. return DRM_PLANE_HELPER_NO_SCALING;
  12406. /*
  12407. * skl max scale is lower of:
  12408. * close to 3 but not 3, -1 is for that purpose
  12409. * or
  12410. * cdclk/crtc_clock
  12411. */
  12412. max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
  12413. return max_scale;
  12414. }
  12415. static int
  12416. intel_check_primary_plane(struct drm_plane *plane,
  12417. struct intel_crtc_state *crtc_state,
  12418. struct intel_plane_state *state)
  12419. {
  12420. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12421. struct drm_crtc *crtc = state->base.crtc;
  12422. int min_scale = DRM_PLANE_HELPER_NO_SCALING;
  12423. int max_scale = DRM_PLANE_HELPER_NO_SCALING;
  12424. bool can_position = false;
  12425. int ret;
  12426. if (INTEL_GEN(dev_priv) >= 9) {
  12427. /* use scaler when colorkey is not required */
  12428. if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
  12429. min_scale = 1;
  12430. max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
  12431. }
  12432. can_position = true;
  12433. }
  12434. ret = drm_plane_helper_check_state(&state->base,
  12435. &state->clip,
  12436. min_scale, max_scale,
  12437. can_position, true);
  12438. if (ret)
  12439. return ret;
  12440. if (!state->base.fb)
  12441. return 0;
  12442. if (INTEL_GEN(dev_priv) >= 9) {
  12443. ret = skl_check_plane_surface(state);
  12444. if (ret)
  12445. return ret;
  12446. }
  12447. return 0;
  12448. }
  12449. static void intel_begin_crtc_commit(struct drm_crtc *crtc,
  12450. struct drm_crtc_state *old_crtc_state)
  12451. {
  12452. struct drm_device *dev = crtc->dev;
  12453. struct drm_i915_private *dev_priv = to_i915(dev);
  12454. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12455. struct intel_crtc_state *intel_cstate =
  12456. to_intel_crtc_state(crtc->state);
  12457. struct intel_crtc_state *old_intel_cstate =
  12458. to_intel_crtc_state(old_crtc_state);
  12459. struct intel_atomic_state *old_intel_state =
  12460. to_intel_atomic_state(old_crtc_state->state);
  12461. bool modeset = needs_modeset(crtc->state);
  12462. /* Perform vblank evasion around commit operation */
  12463. intel_pipe_update_start(intel_crtc);
  12464. if (modeset)
  12465. goto out;
  12466. if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
  12467. intel_color_set_csc(crtc->state);
  12468. intel_color_load_luts(crtc->state);
  12469. }
  12470. if (intel_cstate->update_pipe)
  12471. intel_update_pipe_config(intel_crtc, old_intel_cstate);
  12472. else if (INTEL_GEN(dev_priv) >= 9)
  12473. skl_detach_scalers(intel_crtc);
  12474. out:
  12475. if (dev_priv->display.atomic_update_watermarks)
  12476. dev_priv->display.atomic_update_watermarks(old_intel_state,
  12477. intel_cstate);
  12478. }
  12479. static void intel_finish_crtc_commit(struct drm_crtc *crtc,
  12480. struct drm_crtc_state *old_crtc_state)
  12481. {
  12482. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12483. intel_pipe_update_end(intel_crtc, NULL);
  12484. }
  12485. /**
  12486. * intel_plane_destroy - destroy a plane
  12487. * @plane: plane to destroy
  12488. *
  12489. * Common destruction function for all types of planes (primary, cursor,
  12490. * sprite).
  12491. */
  12492. void intel_plane_destroy(struct drm_plane *plane)
  12493. {
  12494. drm_plane_cleanup(plane);
  12495. kfree(to_intel_plane(plane));
  12496. }
  12497. const struct drm_plane_funcs intel_plane_funcs = {
  12498. .update_plane = drm_atomic_helper_update_plane,
  12499. .disable_plane = drm_atomic_helper_disable_plane,
  12500. .destroy = intel_plane_destroy,
  12501. .set_property = drm_atomic_helper_plane_set_property,
  12502. .atomic_get_property = intel_plane_atomic_get_property,
  12503. .atomic_set_property = intel_plane_atomic_set_property,
  12504. .atomic_duplicate_state = intel_plane_duplicate_state,
  12505. .atomic_destroy_state = intel_plane_destroy_state,
  12506. };
  12507. static struct intel_plane *
  12508. intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  12509. {
  12510. struct intel_plane *primary = NULL;
  12511. struct intel_plane_state *state = NULL;
  12512. const uint32_t *intel_primary_formats;
  12513. unsigned int supported_rotations;
  12514. unsigned int num_formats;
  12515. int ret;
  12516. primary = kzalloc(sizeof(*primary), GFP_KERNEL);
  12517. if (!primary) {
  12518. ret = -ENOMEM;
  12519. goto fail;
  12520. }
  12521. state = intel_create_plane_state(&primary->base);
  12522. if (!state) {
  12523. ret = -ENOMEM;
  12524. goto fail;
  12525. }
  12526. primary->base.state = &state->base;
  12527. primary->can_scale = false;
  12528. primary->max_downscale = 1;
  12529. if (INTEL_GEN(dev_priv) >= 9) {
  12530. primary->can_scale = true;
  12531. state->scaler_id = -1;
  12532. }
  12533. primary->pipe = pipe;
  12534. /*
  12535. * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
  12536. * port is hooked to pipe B. Hence we want plane A feeding pipe B.
  12537. */
  12538. if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
  12539. primary->plane = (enum plane) !pipe;
  12540. else
  12541. primary->plane = (enum plane) pipe;
  12542. primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
  12543. primary->check_plane = intel_check_primary_plane;
  12544. if (INTEL_GEN(dev_priv) >= 9) {
  12545. intel_primary_formats = skl_primary_formats;
  12546. num_formats = ARRAY_SIZE(skl_primary_formats);
  12547. primary->update_plane = skylake_update_primary_plane;
  12548. primary->disable_plane = skylake_disable_primary_plane;
  12549. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12550. intel_primary_formats = i965_primary_formats;
  12551. num_formats = ARRAY_SIZE(i965_primary_formats);
  12552. primary->update_plane = ironlake_update_primary_plane;
  12553. primary->disable_plane = i9xx_disable_primary_plane;
  12554. } else if (INTEL_GEN(dev_priv) >= 4) {
  12555. intel_primary_formats = i965_primary_formats;
  12556. num_formats = ARRAY_SIZE(i965_primary_formats);
  12557. primary->update_plane = i9xx_update_primary_plane;
  12558. primary->disable_plane = i9xx_disable_primary_plane;
  12559. } else {
  12560. intel_primary_formats = i8xx_primary_formats;
  12561. num_formats = ARRAY_SIZE(i8xx_primary_formats);
  12562. primary->update_plane = i9xx_update_primary_plane;
  12563. primary->disable_plane = i9xx_disable_primary_plane;
  12564. }
  12565. if (INTEL_GEN(dev_priv) >= 9)
  12566. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12567. 0, &intel_plane_funcs,
  12568. intel_primary_formats, num_formats,
  12569. DRM_PLANE_TYPE_PRIMARY,
  12570. "plane 1%c", pipe_name(pipe));
  12571. else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  12572. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12573. 0, &intel_plane_funcs,
  12574. intel_primary_formats, num_formats,
  12575. DRM_PLANE_TYPE_PRIMARY,
  12576. "primary %c", pipe_name(pipe));
  12577. else
  12578. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12579. 0, &intel_plane_funcs,
  12580. intel_primary_formats, num_formats,
  12581. DRM_PLANE_TYPE_PRIMARY,
  12582. "plane %c", plane_name(primary->plane));
  12583. if (ret)
  12584. goto fail;
  12585. if (INTEL_GEN(dev_priv) >= 9) {
  12586. supported_rotations =
  12587. DRM_ROTATE_0 | DRM_ROTATE_90 |
  12588. DRM_ROTATE_180 | DRM_ROTATE_270;
  12589. } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  12590. supported_rotations =
  12591. DRM_ROTATE_0 | DRM_ROTATE_180 |
  12592. DRM_REFLECT_X;
  12593. } else if (INTEL_GEN(dev_priv) >= 4) {
  12594. supported_rotations =
  12595. DRM_ROTATE_0 | DRM_ROTATE_180;
  12596. } else {
  12597. supported_rotations = DRM_ROTATE_0;
  12598. }
  12599. if (INTEL_GEN(dev_priv) >= 4)
  12600. drm_plane_create_rotation_property(&primary->base,
  12601. DRM_ROTATE_0,
  12602. supported_rotations);
  12603. drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
  12604. return primary;
  12605. fail:
  12606. kfree(state);
  12607. kfree(primary);
  12608. return ERR_PTR(ret);
  12609. }
  12610. static int
  12611. intel_check_cursor_plane(struct drm_plane *plane,
  12612. struct intel_crtc_state *crtc_state,
  12613. struct intel_plane_state *state)
  12614. {
  12615. struct drm_framebuffer *fb = state->base.fb;
  12616. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  12617. enum pipe pipe = to_intel_plane(plane)->pipe;
  12618. unsigned stride;
  12619. int ret;
  12620. ret = drm_plane_helper_check_state(&state->base,
  12621. &state->clip,
  12622. DRM_PLANE_HELPER_NO_SCALING,
  12623. DRM_PLANE_HELPER_NO_SCALING,
  12624. true, true);
  12625. if (ret)
  12626. return ret;
  12627. /* if we want to turn off the cursor ignore width and height */
  12628. if (!obj)
  12629. return 0;
  12630. /* Check for which cursor types we support */
  12631. if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
  12632. state->base.crtc_h)) {
  12633. DRM_DEBUG("Cursor dimension %dx%d not supported\n",
  12634. state->base.crtc_w, state->base.crtc_h);
  12635. return -EINVAL;
  12636. }
  12637. stride = roundup_pow_of_two(state->base.crtc_w) * 4;
  12638. if (obj->base.size < stride * state->base.crtc_h) {
  12639. DRM_DEBUG_KMS("buffer is too small\n");
  12640. return -ENOMEM;
  12641. }
  12642. if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
  12643. DRM_DEBUG_KMS("cursor cannot be tiled\n");
  12644. return -EINVAL;
  12645. }
  12646. /*
  12647. * There's something wrong with the cursor on CHV pipe C.
  12648. * If it straddles the left edge of the screen then
  12649. * moving it away from the edge or disabling it often
  12650. * results in a pipe underrun, and often that can lead to
  12651. * dead pipe (constant underrun reported, and it scans
  12652. * out just a solid color). To recover from that, the
  12653. * display power well must be turned off and on again.
  12654. * Refuse the put the cursor into that compromised position.
  12655. */
  12656. if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
  12657. state->base.visible && state->base.crtc_x < 0) {
  12658. DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
  12659. return -EINVAL;
  12660. }
  12661. return 0;
  12662. }
  12663. static void
  12664. intel_disable_cursor_plane(struct drm_plane *plane,
  12665. struct drm_crtc *crtc)
  12666. {
  12667. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12668. intel_crtc->cursor_addr = 0;
  12669. intel_crtc_update_cursor(crtc, NULL);
  12670. }
  12671. static void
  12672. intel_update_cursor_plane(struct drm_plane *plane,
  12673. const struct intel_crtc_state *crtc_state,
  12674. const struct intel_plane_state *state)
  12675. {
  12676. struct drm_crtc *crtc = crtc_state->base.crtc;
  12677. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12678. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12679. struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
  12680. uint32_t addr;
  12681. if (!obj)
  12682. addr = 0;
  12683. else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
  12684. addr = i915_gem_object_ggtt_offset(obj, NULL);
  12685. else
  12686. addr = obj->phys_handle->busaddr;
  12687. intel_crtc->cursor_addr = addr;
  12688. intel_crtc_update_cursor(crtc, state);
  12689. }
  12690. static struct intel_plane *
  12691. intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  12692. {
  12693. struct intel_plane *cursor = NULL;
  12694. struct intel_plane_state *state = NULL;
  12695. int ret;
  12696. cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
  12697. if (!cursor) {
  12698. ret = -ENOMEM;
  12699. goto fail;
  12700. }
  12701. state = intel_create_plane_state(&cursor->base);
  12702. if (!state) {
  12703. ret = -ENOMEM;
  12704. goto fail;
  12705. }
  12706. cursor->base.state = &state->base;
  12707. cursor->can_scale = false;
  12708. cursor->max_downscale = 1;
  12709. cursor->pipe = pipe;
  12710. cursor->plane = pipe;
  12711. cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
  12712. cursor->check_plane = intel_check_cursor_plane;
  12713. cursor->update_plane = intel_update_cursor_plane;
  12714. cursor->disable_plane = intel_disable_cursor_plane;
  12715. ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
  12716. 0, &intel_plane_funcs,
  12717. intel_cursor_formats,
  12718. ARRAY_SIZE(intel_cursor_formats),
  12719. DRM_PLANE_TYPE_CURSOR,
  12720. "cursor %c", pipe_name(pipe));
  12721. if (ret)
  12722. goto fail;
  12723. if (INTEL_GEN(dev_priv) >= 4)
  12724. drm_plane_create_rotation_property(&cursor->base,
  12725. DRM_ROTATE_0,
  12726. DRM_ROTATE_0 |
  12727. DRM_ROTATE_180);
  12728. if (INTEL_GEN(dev_priv) >= 9)
  12729. state->scaler_id = -1;
  12730. drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
  12731. return cursor;
  12732. fail:
  12733. kfree(state);
  12734. kfree(cursor);
  12735. return ERR_PTR(ret);
  12736. }
  12737. static void skl_init_scalers(struct drm_i915_private *dev_priv,
  12738. struct intel_crtc *crtc,
  12739. struct intel_crtc_state *crtc_state)
  12740. {
  12741. struct intel_crtc_scaler_state *scaler_state =
  12742. &crtc_state->scaler_state;
  12743. int i;
  12744. for (i = 0; i < crtc->num_scalers; i++) {
  12745. struct intel_scaler *scaler = &scaler_state->scalers[i];
  12746. scaler->in_use = 0;
  12747. scaler->mode = PS_SCALER_MODE_DYN;
  12748. }
  12749. scaler_state->scaler_id = -1;
  12750. }
  12751. static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
  12752. {
  12753. struct intel_crtc *intel_crtc;
  12754. struct intel_crtc_state *crtc_state = NULL;
  12755. struct intel_plane *primary = NULL;
  12756. struct intel_plane *cursor = NULL;
  12757. int sprite, ret;
  12758. intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
  12759. if (!intel_crtc)
  12760. return -ENOMEM;
  12761. crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
  12762. if (!crtc_state) {
  12763. ret = -ENOMEM;
  12764. goto fail;
  12765. }
  12766. intel_crtc->config = crtc_state;
  12767. intel_crtc->base.state = &crtc_state->base;
  12768. crtc_state->base.crtc = &intel_crtc->base;
  12769. /* initialize shared scalers */
  12770. if (INTEL_GEN(dev_priv) >= 9) {
  12771. if (pipe == PIPE_C)
  12772. intel_crtc->num_scalers = 1;
  12773. else
  12774. intel_crtc->num_scalers = SKL_NUM_SCALERS;
  12775. skl_init_scalers(dev_priv, intel_crtc, crtc_state);
  12776. }
  12777. primary = intel_primary_plane_create(dev_priv, pipe);
  12778. if (IS_ERR(primary)) {
  12779. ret = PTR_ERR(primary);
  12780. goto fail;
  12781. }
  12782. for_each_sprite(dev_priv, pipe, sprite) {
  12783. struct intel_plane *plane;
  12784. plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
  12785. if (IS_ERR(plane)) {
  12786. ret = PTR_ERR(plane);
  12787. goto fail;
  12788. }
  12789. }
  12790. cursor = intel_cursor_plane_create(dev_priv, pipe);
  12791. if (IS_ERR(cursor)) {
  12792. ret = PTR_ERR(cursor);
  12793. goto fail;
  12794. }
  12795. ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
  12796. &primary->base, &cursor->base,
  12797. &intel_crtc_funcs,
  12798. "pipe %c", pipe_name(pipe));
  12799. if (ret)
  12800. goto fail;
  12801. intel_crtc->pipe = pipe;
  12802. intel_crtc->plane = primary->plane;
  12803. intel_crtc->cursor_base = ~0;
  12804. intel_crtc->cursor_cntl = ~0;
  12805. intel_crtc->cursor_size = ~0;
  12806. intel_crtc->wm.cxsr_allowed = true;
  12807. BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
  12808. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
  12809. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
  12810. dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
  12811. drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
  12812. intel_color_init(&intel_crtc->base);
  12813. WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
  12814. return 0;
  12815. fail:
  12816. /*
  12817. * drm_mode_config_cleanup() will free up any
  12818. * crtcs/planes already initialized.
  12819. */
  12820. kfree(crtc_state);
  12821. kfree(intel_crtc);
  12822. return ret;
  12823. }
  12824. enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
  12825. {
  12826. struct drm_encoder *encoder = connector->base.encoder;
  12827. struct drm_device *dev = connector->base.dev;
  12828. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  12829. if (!encoder || WARN_ON(!encoder->crtc))
  12830. return INVALID_PIPE;
  12831. return to_intel_crtc(encoder->crtc)->pipe;
  12832. }
  12833. int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
  12834. struct drm_file *file)
  12835. {
  12836. struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
  12837. struct drm_crtc *drmmode_crtc;
  12838. struct intel_crtc *crtc;
  12839. drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
  12840. if (!drmmode_crtc)
  12841. return -ENOENT;
  12842. crtc = to_intel_crtc(drmmode_crtc);
  12843. pipe_from_crtc_id->pipe = crtc->pipe;
  12844. return 0;
  12845. }
  12846. static int intel_encoder_clones(struct intel_encoder *encoder)
  12847. {
  12848. struct drm_device *dev = encoder->base.dev;
  12849. struct intel_encoder *source_encoder;
  12850. int index_mask = 0;
  12851. int entry = 0;
  12852. for_each_intel_encoder(dev, source_encoder) {
  12853. if (encoders_cloneable(encoder, source_encoder))
  12854. index_mask |= (1 << entry);
  12855. entry++;
  12856. }
  12857. return index_mask;
  12858. }
  12859. static bool has_edp_a(struct drm_i915_private *dev_priv)
  12860. {
  12861. if (!IS_MOBILE(dev_priv))
  12862. return false;
  12863. if ((I915_READ(DP_A) & DP_DETECTED) == 0)
  12864. return false;
  12865. if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
  12866. return false;
  12867. return true;
  12868. }
  12869. static bool intel_crt_present(struct drm_i915_private *dev_priv)
  12870. {
  12871. if (INTEL_GEN(dev_priv) >= 9)
  12872. return false;
  12873. if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
  12874. return false;
  12875. if (IS_CHERRYVIEW(dev_priv))
  12876. return false;
  12877. if (HAS_PCH_LPT_H(dev_priv) &&
  12878. I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
  12879. return false;
  12880. /* DDI E can't be used if DDI A requires 4 lanes */
  12881. if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
  12882. return false;
  12883. if (!dev_priv->vbt.int_crt_support)
  12884. return false;
  12885. return true;
  12886. }
  12887. void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
  12888. {
  12889. int pps_num;
  12890. int pps_idx;
  12891. if (HAS_DDI(dev_priv))
  12892. return;
  12893. /*
  12894. * This w/a is needed at least on CPT/PPT, but to be sure apply it
  12895. * everywhere where registers can be write protected.
  12896. */
  12897. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12898. pps_num = 2;
  12899. else
  12900. pps_num = 1;
  12901. for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
  12902. u32 val = I915_READ(PP_CONTROL(pps_idx));
  12903. val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
  12904. I915_WRITE(PP_CONTROL(pps_idx), val);
  12905. }
  12906. }
  12907. static void intel_pps_init(struct drm_i915_private *dev_priv)
  12908. {
  12909. if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
  12910. dev_priv->pps_mmio_base = PCH_PPS_BASE;
  12911. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12912. dev_priv->pps_mmio_base = VLV_PPS_BASE;
  12913. else
  12914. dev_priv->pps_mmio_base = PPS_BASE;
  12915. intel_pps_unlock_regs_wa(dev_priv);
  12916. }
  12917. static void intel_setup_outputs(struct drm_device *dev)
  12918. {
  12919. struct drm_i915_private *dev_priv = to_i915(dev);
  12920. struct intel_encoder *encoder;
  12921. bool dpd_is_edp = false;
  12922. intel_pps_init(dev_priv);
  12923. /*
  12924. * intel_edp_init_connector() depends on this completing first, to
  12925. * prevent the registeration of both eDP and LVDS and the incorrect
  12926. * sharing of the PPS.
  12927. */
  12928. intel_lvds_init(dev);
  12929. if (intel_crt_present(dev_priv))
  12930. intel_crt_init(dev);
  12931. if (IS_BROXTON(dev_priv)) {
  12932. /*
  12933. * FIXME: Broxton doesn't support port detection via the
  12934. * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
  12935. * detect the ports.
  12936. */
  12937. intel_ddi_init(dev, PORT_A);
  12938. intel_ddi_init(dev, PORT_B);
  12939. intel_ddi_init(dev, PORT_C);
  12940. intel_dsi_init(dev);
  12941. } else if (HAS_DDI(dev_priv)) {
  12942. int found;
  12943. /*
  12944. * Haswell uses DDI functions to detect digital outputs.
  12945. * On SKL pre-D0 the strap isn't connected, so we assume
  12946. * it's there.
  12947. */
  12948. found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
  12949. /* WaIgnoreDDIAStrap: skl */
  12950. if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  12951. intel_ddi_init(dev, PORT_A);
  12952. /* DDI B, C and D detection is indicated by the SFUSE_STRAP
  12953. * register */
  12954. found = I915_READ(SFUSE_STRAP);
  12955. if (found & SFUSE_STRAP_DDIB_DETECTED)
  12956. intel_ddi_init(dev, PORT_B);
  12957. if (found & SFUSE_STRAP_DDIC_DETECTED)
  12958. intel_ddi_init(dev, PORT_C);
  12959. if (found & SFUSE_STRAP_DDID_DETECTED)
  12960. intel_ddi_init(dev, PORT_D);
  12961. /*
  12962. * On SKL we don't have a way to detect DDI-E so we rely on VBT.
  12963. */
  12964. if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
  12965. (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
  12966. dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
  12967. dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
  12968. intel_ddi_init(dev, PORT_E);
  12969. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12970. int found;
  12971. dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
  12972. if (has_edp_a(dev_priv))
  12973. intel_dp_init(dev, DP_A, PORT_A);
  12974. if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
  12975. /* PCH SDVOB multiplex with HDMIB */
  12976. found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
  12977. if (!found)
  12978. intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
  12979. if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
  12980. intel_dp_init(dev, PCH_DP_B, PORT_B);
  12981. }
  12982. if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
  12983. intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
  12984. if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
  12985. intel_hdmi_init(dev, PCH_HDMID, PORT_D);
  12986. if (I915_READ(PCH_DP_C) & DP_DETECTED)
  12987. intel_dp_init(dev, PCH_DP_C, PORT_C);
  12988. if (I915_READ(PCH_DP_D) & DP_DETECTED)
  12989. intel_dp_init(dev, PCH_DP_D, PORT_D);
  12990. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  12991. bool has_edp, has_port;
  12992. /*
  12993. * The DP_DETECTED bit is the latched state of the DDC
  12994. * SDA pin at boot. However since eDP doesn't require DDC
  12995. * (no way to plug in a DP->HDMI dongle) the DDC pins for
  12996. * eDP ports may have been muxed to an alternate function.
  12997. * Thus we can't rely on the DP_DETECTED bit alone to detect
  12998. * eDP ports. Consult the VBT as well as DP_DETECTED to
  12999. * detect eDP ports.
  13000. *
  13001. * Sadly the straps seem to be missing sometimes even for HDMI
  13002. * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
  13003. * and VBT for the presence of the port. Additionally we can't
  13004. * trust the port type the VBT declares as we've seen at least
  13005. * HDMI ports that the VBT claim are DP or eDP.
  13006. */
  13007. has_edp = intel_dp_is_edp(dev_priv, PORT_B);
  13008. has_port = intel_bios_is_port_present(dev_priv, PORT_B);
  13009. if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
  13010. has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
  13011. if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
  13012. intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
  13013. has_edp = intel_dp_is_edp(dev_priv, PORT_C);
  13014. has_port = intel_bios_is_port_present(dev_priv, PORT_C);
  13015. if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
  13016. has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
  13017. if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
  13018. intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
  13019. if (IS_CHERRYVIEW(dev_priv)) {
  13020. /*
  13021. * eDP not supported on port D,
  13022. * so no need to worry about it
  13023. */
  13024. has_port = intel_bios_is_port_present(dev_priv, PORT_D);
  13025. if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
  13026. intel_dp_init(dev, CHV_DP_D, PORT_D);
  13027. if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
  13028. intel_hdmi_init(dev, CHV_HDMID, PORT_D);
  13029. }
  13030. intel_dsi_init(dev);
  13031. } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
  13032. bool found = false;
  13033. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  13034. DRM_DEBUG_KMS("probing SDVOB\n");
  13035. found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
  13036. if (!found && IS_G4X(dev_priv)) {
  13037. DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
  13038. intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
  13039. }
  13040. if (!found && IS_G4X(dev_priv))
  13041. intel_dp_init(dev, DP_B, PORT_B);
  13042. }
  13043. /* Before G4X SDVOC doesn't have its own detect register */
  13044. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  13045. DRM_DEBUG_KMS("probing SDVOC\n");
  13046. found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
  13047. }
  13048. if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
  13049. if (IS_G4X(dev_priv)) {
  13050. DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
  13051. intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
  13052. }
  13053. if (IS_G4X(dev_priv))
  13054. intel_dp_init(dev, DP_C, PORT_C);
  13055. }
  13056. if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
  13057. intel_dp_init(dev, DP_D, PORT_D);
  13058. } else if (IS_GEN2(dev_priv))
  13059. intel_dvo_init(dev);
  13060. if (SUPPORTS_TV(dev_priv))
  13061. intel_tv_init(dev);
  13062. intel_psr_init(dev);
  13063. for_each_intel_encoder(dev, encoder) {
  13064. encoder->base.possible_crtcs = encoder->crtc_mask;
  13065. encoder->base.possible_clones =
  13066. intel_encoder_clones(encoder);
  13067. }
  13068. intel_init_pch_refclk(dev);
  13069. drm_helper_move_panel_connectors_to_head(dev);
  13070. }
  13071. static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  13072. {
  13073. struct drm_device *dev = fb->dev;
  13074. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13075. drm_framebuffer_cleanup(fb);
  13076. mutex_lock(&dev->struct_mutex);
  13077. WARN_ON(!intel_fb->obj->framebuffer_references--);
  13078. i915_gem_object_put(intel_fb->obj);
  13079. mutex_unlock(&dev->struct_mutex);
  13080. kfree(intel_fb);
  13081. }
  13082. static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
  13083. struct drm_file *file,
  13084. unsigned int *handle)
  13085. {
  13086. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13087. struct drm_i915_gem_object *obj = intel_fb->obj;
  13088. if (obj->userptr.mm) {
  13089. DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
  13090. return -EINVAL;
  13091. }
  13092. return drm_gem_handle_create(file, &obj->base, handle);
  13093. }
  13094. static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
  13095. struct drm_file *file,
  13096. unsigned flags, unsigned color,
  13097. struct drm_clip_rect *clips,
  13098. unsigned num_clips)
  13099. {
  13100. struct drm_device *dev = fb->dev;
  13101. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13102. struct drm_i915_gem_object *obj = intel_fb->obj;
  13103. mutex_lock(&dev->struct_mutex);
  13104. if (obj->pin_display && obj->cache_dirty)
  13105. i915_gem_clflush_object(obj, true);
  13106. intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
  13107. mutex_unlock(&dev->struct_mutex);
  13108. return 0;
  13109. }
  13110. static const struct drm_framebuffer_funcs intel_fb_funcs = {
  13111. .destroy = intel_user_framebuffer_destroy,
  13112. .create_handle = intel_user_framebuffer_create_handle,
  13113. .dirty = intel_user_framebuffer_dirty,
  13114. };
  13115. static
  13116. u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
  13117. uint64_t fb_modifier, uint32_t pixel_format)
  13118. {
  13119. u32 gen = INTEL_INFO(dev_priv)->gen;
  13120. if (gen >= 9) {
  13121. int cpp = drm_format_plane_cpp(pixel_format, 0);
  13122. /* "The stride in bytes must not exceed the of the size of 8K
  13123. * pixels and 32K bytes."
  13124. */
  13125. return min(8192 * cpp, 32768);
  13126. } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
  13127. !IS_CHERRYVIEW(dev_priv)) {
  13128. return 32*1024;
  13129. } else if (gen >= 4) {
  13130. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  13131. return 16*1024;
  13132. else
  13133. return 32*1024;
  13134. } else if (gen >= 3) {
  13135. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  13136. return 8*1024;
  13137. else
  13138. return 16*1024;
  13139. } else {
  13140. /* XXX DSPC is limited to 4k tiled */
  13141. return 8*1024;
  13142. }
  13143. }
  13144. static int intel_framebuffer_init(struct drm_device *dev,
  13145. struct intel_framebuffer *intel_fb,
  13146. struct drm_mode_fb_cmd2 *mode_cmd,
  13147. struct drm_i915_gem_object *obj)
  13148. {
  13149. struct drm_i915_private *dev_priv = to_i915(dev);
  13150. unsigned int tiling = i915_gem_object_get_tiling(obj);
  13151. int ret;
  13152. u32 pitch_limit, stride_alignment;
  13153. struct drm_format_name_buf format_name;
  13154. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  13155. if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
  13156. /*
  13157. * If there's a fence, enforce that
  13158. * the fb modifier and tiling mode match.
  13159. */
  13160. if (tiling != I915_TILING_NONE &&
  13161. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  13162. DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
  13163. return -EINVAL;
  13164. }
  13165. } else {
  13166. if (tiling == I915_TILING_X) {
  13167. mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
  13168. } else if (tiling == I915_TILING_Y) {
  13169. DRM_DEBUG("No Y tiling for legacy addfb\n");
  13170. return -EINVAL;
  13171. }
  13172. }
  13173. /* Passed in modifier sanity checking. */
  13174. switch (mode_cmd->modifier[0]) {
  13175. case I915_FORMAT_MOD_Y_TILED:
  13176. case I915_FORMAT_MOD_Yf_TILED:
  13177. if (INTEL_GEN(dev_priv) < 9) {
  13178. DRM_DEBUG("Unsupported tiling 0x%llx!\n",
  13179. mode_cmd->modifier[0]);
  13180. return -EINVAL;
  13181. }
  13182. case DRM_FORMAT_MOD_NONE:
  13183. case I915_FORMAT_MOD_X_TILED:
  13184. break;
  13185. default:
  13186. DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
  13187. mode_cmd->modifier[0]);
  13188. return -EINVAL;
  13189. }
  13190. /*
  13191. * gen2/3 display engine uses the fence if present,
  13192. * so the tiling mode must match the fb modifier exactly.
  13193. */
  13194. if (INTEL_INFO(dev_priv)->gen < 4 &&
  13195. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  13196. DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
  13197. return -EINVAL;
  13198. }
  13199. stride_alignment = intel_fb_stride_alignment(dev_priv,
  13200. mode_cmd->modifier[0],
  13201. mode_cmd->pixel_format);
  13202. if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
  13203. DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
  13204. mode_cmd->pitches[0], stride_alignment);
  13205. return -EINVAL;
  13206. }
  13207. pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
  13208. mode_cmd->pixel_format);
  13209. if (mode_cmd->pitches[0] > pitch_limit) {
  13210. DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
  13211. mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
  13212. "tiled" : "linear",
  13213. mode_cmd->pitches[0], pitch_limit);
  13214. return -EINVAL;
  13215. }
  13216. /*
  13217. * If there's a fence, enforce that
  13218. * the fb pitch and fence stride match.
  13219. */
  13220. if (tiling != I915_TILING_NONE &&
  13221. mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
  13222. DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
  13223. mode_cmd->pitches[0],
  13224. i915_gem_object_get_stride(obj));
  13225. return -EINVAL;
  13226. }
  13227. /* Reject formats not supported by any plane early. */
  13228. switch (mode_cmd->pixel_format) {
  13229. case DRM_FORMAT_C8:
  13230. case DRM_FORMAT_RGB565:
  13231. case DRM_FORMAT_XRGB8888:
  13232. case DRM_FORMAT_ARGB8888:
  13233. break;
  13234. case DRM_FORMAT_XRGB1555:
  13235. if (INTEL_GEN(dev_priv) > 3) {
  13236. DRM_DEBUG("unsupported pixel format: %s\n",
  13237. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13238. return -EINVAL;
  13239. }
  13240. break;
  13241. case DRM_FORMAT_ABGR8888:
  13242. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  13243. INTEL_GEN(dev_priv) < 9) {
  13244. DRM_DEBUG("unsupported pixel format: %s\n",
  13245. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13246. return -EINVAL;
  13247. }
  13248. break;
  13249. case DRM_FORMAT_XBGR8888:
  13250. case DRM_FORMAT_XRGB2101010:
  13251. case DRM_FORMAT_XBGR2101010:
  13252. if (INTEL_GEN(dev_priv) < 4) {
  13253. DRM_DEBUG("unsupported pixel format: %s\n",
  13254. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13255. return -EINVAL;
  13256. }
  13257. break;
  13258. case DRM_FORMAT_ABGR2101010:
  13259. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  13260. DRM_DEBUG("unsupported pixel format: %s\n",
  13261. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13262. return -EINVAL;
  13263. }
  13264. break;
  13265. case DRM_FORMAT_YUYV:
  13266. case DRM_FORMAT_UYVY:
  13267. case DRM_FORMAT_YVYU:
  13268. case DRM_FORMAT_VYUY:
  13269. if (INTEL_GEN(dev_priv) < 5) {
  13270. DRM_DEBUG("unsupported pixel format: %s\n",
  13271. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13272. return -EINVAL;
  13273. }
  13274. break;
  13275. default:
  13276. DRM_DEBUG("unsupported pixel format: %s\n",
  13277. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13278. return -EINVAL;
  13279. }
  13280. /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
  13281. if (mode_cmd->offsets[0] != 0)
  13282. return -EINVAL;
  13283. drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
  13284. intel_fb->obj = obj;
  13285. ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
  13286. if (ret)
  13287. return ret;
  13288. ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
  13289. if (ret) {
  13290. DRM_ERROR("framebuffer init failed %d\n", ret);
  13291. return ret;
  13292. }
  13293. intel_fb->obj->framebuffer_references++;
  13294. return 0;
  13295. }
  13296. static struct drm_framebuffer *
  13297. intel_user_framebuffer_create(struct drm_device *dev,
  13298. struct drm_file *filp,
  13299. const struct drm_mode_fb_cmd2 *user_mode_cmd)
  13300. {
  13301. struct drm_framebuffer *fb;
  13302. struct drm_i915_gem_object *obj;
  13303. struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
  13304. obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
  13305. if (!obj)
  13306. return ERR_PTR(-ENOENT);
  13307. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  13308. if (IS_ERR(fb))
  13309. i915_gem_object_put(obj);
  13310. return fb;
  13311. }
  13312. static const struct drm_mode_config_funcs intel_mode_funcs = {
  13313. .fb_create = intel_user_framebuffer_create,
  13314. .output_poll_changed = intel_fbdev_output_poll_changed,
  13315. .atomic_check = intel_atomic_check,
  13316. .atomic_commit = intel_atomic_commit,
  13317. .atomic_state_alloc = intel_atomic_state_alloc,
  13318. .atomic_state_clear = intel_atomic_state_clear,
  13319. };
  13320. /**
  13321. * intel_init_display_hooks - initialize the display modesetting hooks
  13322. * @dev_priv: device private
  13323. */
  13324. void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  13325. {
  13326. if (INTEL_INFO(dev_priv)->gen >= 9) {
  13327. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  13328. dev_priv->display.get_initial_plane_config =
  13329. skylake_get_initial_plane_config;
  13330. dev_priv->display.crtc_compute_clock =
  13331. haswell_crtc_compute_clock;
  13332. dev_priv->display.crtc_enable = haswell_crtc_enable;
  13333. dev_priv->display.crtc_disable = haswell_crtc_disable;
  13334. } else if (HAS_DDI(dev_priv)) {
  13335. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  13336. dev_priv->display.get_initial_plane_config =
  13337. ironlake_get_initial_plane_config;
  13338. dev_priv->display.crtc_compute_clock =
  13339. haswell_crtc_compute_clock;
  13340. dev_priv->display.crtc_enable = haswell_crtc_enable;
  13341. dev_priv->display.crtc_disable = haswell_crtc_disable;
  13342. } else if (HAS_PCH_SPLIT(dev_priv)) {
  13343. dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
  13344. dev_priv->display.get_initial_plane_config =
  13345. ironlake_get_initial_plane_config;
  13346. dev_priv->display.crtc_compute_clock =
  13347. ironlake_crtc_compute_clock;
  13348. dev_priv->display.crtc_enable = ironlake_crtc_enable;
  13349. dev_priv->display.crtc_disable = ironlake_crtc_disable;
  13350. } else if (IS_CHERRYVIEW(dev_priv)) {
  13351. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13352. dev_priv->display.get_initial_plane_config =
  13353. i9xx_get_initial_plane_config;
  13354. dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
  13355. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  13356. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13357. } else if (IS_VALLEYVIEW(dev_priv)) {
  13358. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13359. dev_priv->display.get_initial_plane_config =
  13360. i9xx_get_initial_plane_config;
  13361. dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
  13362. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  13363. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13364. } else if (IS_G4X(dev_priv)) {
  13365. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13366. dev_priv->display.get_initial_plane_config =
  13367. i9xx_get_initial_plane_config;
  13368. dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
  13369. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13370. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13371. } else if (IS_PINEVIEW(dev_priv)) {
  13372. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13373. dev_priv->display.get_initial_plane_config =
  13374. i9xx_get_initial_plane_config;
  13375. dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
  13376. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13377. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13378. } else if (!IS_GEN2(dev_priv)) {
  13379. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13380. dev_priv->display.get_initial_plane_config =
  13381. i9xx_get_initial_plane_config;
  13382. dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
  13383. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13384. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13385. } else {
  13386. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13387. dev_priv->display.get_initial_plane_config =
  13388. i9xx_get_initial_plane_config;
  13389. dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
  13390. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13391. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13392. }
  13393. /* Returns the core display clock speed */
  13394. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  13395. dev_priv->display.get_display_clock_speed =
  13396. skylake_get_display_clock_speed;
  13397. else if (IS_BROXTON(dev_priv))
  13398. dev_priv->display.get_display_clock_speed =
  13399. broxton_get_display_clock_speed;
  13400. else if (IS_BROADWELL(dev_priv))
  13401. dev_priv->display.get_display_clock_speed =
  13402. broadwell_get_display_clock_speed;
  13403. else if (IS_HASWELL(dev_priv))
  13404. dev_priv->display.get_display_clock_speed =
  13405. haswell_get_display_clock_speed;
  13406. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  13407. dev_priv->display.get_display_clock_speed =
  13408. valleyview_get_display_clock_speed;
  13409. else if (IS_GEN5(dev_priv))
  13410. dev_priv->display.get_display_clock_speed =
  13411. ilk_get_display_clock_speed;
  13412. else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
  13413. IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
  13414. dev_priv->display.get_display_clock_speed =
  13415. i945_get_display_clock_speed;
  13416. else if (IS_GM45(dev_priv))
  13417. dev_priv->display.get_display_clock_speed =
  13418. gm45_get_display_clock_speed;
  13419. else if (IS_CRESTLINE(dev_priv))
  13420. dev_priv->display.get_display_clock_speed =
  13421. i965gm_get_display_clock_speed;
  13422. else if (IS_PINEVIEW(dev_priv))
  13423. dev_priv->display.get_display_clock_speed =
  13424. pnv_get_display_clock_speed;
  13425. else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
  13426. dev_priv->display.get_display_clock_speed =
  13427. g33_get_display_clock_speed;
  13428. else if (IS_I915G(dev_priv))
  13429. dev_priv->display.get_display_clock_speed =
  13430. i915_get_display_clock_speed;
  13431. else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
  13432. dev_priv->display.get_display_clock_speed =
  13433. i9xx_misc_get_display_clock_speed;
  13434. else if (IS_I915GM(dev_priv))
  13435. dev_priv->display.get_display_clock_speed =
  13436. i915gm_get_display_clock_speed;
  13437. else if (IS_I865G(dev_priv))
  13438. dev_priv->display.get_display_clock_speed =
  13439. i865_get_display_clock_speed;
  13440. else if (IS_I85X(dev_priv))
  13441. dev_priv->display.get_display_clock_speed =
  13442. i85x_get_display_clock_speed;
  13443. else { /* 830 */
  13444. WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
  13445. dev_priv->display.get_display_clock_speed =
  13446. i830_get_display_clock_speed;
  13447. }
  13448. if (IS_GEN5(dev_priv)) {
  13449. dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
  13450. } else if (IS_GEN6(dev_priv)) {
  13451. dev_priv->display.fdi_link_train = gen6_fdi_link_train;
  13452. } else if (IS_IVYBRIDGE(dev_priv)) {
  13453. /* FIXME: detect B0+ stepping and use auto training */
  13454. dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
  13455. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  13456. dev_priv->display.fdi_link_train = hsw_fdi_link_train;
  13457. }
  13458. if (IS_BROADWELL(dev_priv)) {
  13459. dev_priv->display.modeset_commit_cdclk =
  13460. broadwell_modeset_commit_cdclk;
  13461. dev_priv->display.modeset_calc_cdclk =
  13462. broadwell_modeset_calc_cdclk;
  13463. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  13464. dev_priv->display.modeset_commit_cdclk =
  13465. valleyview_modeset_commit_cdclk;
  13466. dev_priv->display.modeset_calc_cdclk =
  13467. valleyview_modeset_calc_cdclk;
  13468. } else if (IS_BROXTON(dev_priv)) {
  13469. dev_priv->display.modeset_commit_cdclk =
  13470. bxt_modeset_commit_cdclk;
  13471. dev_priv->display.modeset_calc_cdclk =
  13472. bxt_modeset_calc_cdclk;
  13473. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  13474. dev_priv->display.modeset_commit_cdclk =
  13475. skl_modeset_commit_cdclk;
  13476. dev_priv->display.modeset_calc_cdclk =
  13477. skl_modeset_calc_cdclk;
  13478. }
  13479. if (dev_priv->info.gen >= 9)
  13480. dev_priv->display.update_crtcs = skl_update_crtcs;
  13481. else
  13482. dev_priv->display.update_crtcs = intel_update_crtcs;
  13483. switch (INTEL_INFO(dev_priv)->gen) {
  13484. case 2:
  13485. dev_priv->display.queue_flip = intel_gen2_queue_flip;
  13486. break;
  13487. case 3:
  13488. dev_priv->display.queue_flip = intel_gen3_queue_flip;
  13489. break;
  13490. case 4:
  13491. case 5:
  13492. dev_priv->display.queue_flip = intel_gen4_queue_flip;
  13493. break;
  13494. case 6:
  13495. dev_priv->display.queue_flip = intel_gen6_queue_flip;
  13496. break;
  13497. case 7:
  13498. case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
  13499. dev_priv->display.queue_flip = intel_gen7_queue_flip;
  13500. break;
  13501. case 9:
  13502. /* Drop through - unsupported since execlist only. */
  13503. default:
  13504. /* Default just returns -ENODEV to indicate unsupported */
  13505. dev_priv->display.queue_flip = intel_default_queue_flip;
  13506. }
  13507. }
  13508. /*
  13509. * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
  13510. * resume, or other times. This quirk makes sure that's the case for
  13511. * affected systems.
  13512. */
  13513. static void quirk_pipea_force(struct drm_device *dev)
  13514. {
  13515. struct drm_i915_private *dev_priv = to_i915(dev);
  13516. dev_priv->quirks |= QUIRK_PIPEA_FORCE;
  13517. DRM_INFO("applying pipe a force quirk\n");
  13518. }
  13519. static void quirk_pipeb_force(struct drm_device *dev)
  13520. {
  13521. struct drm_i915_private *dev_priv = to_i915(dev);
  13522. dev_priv->quirks |= QUIRK_PIPEB_FORCE;
  13523. DRM_INFO("applying pipe b force quirk\n");
  13524. }
  13525. /*
  13526. * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
  13527. */
  13528. static void quirk_ssc_force_disable(struct drm_device *dev)
  13529. {
  13530. struct drm_i915_private *dev_priv = to_i915(dev);
  13531. dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
  13532. DRM_INFO("applying lvds SSC disable quirk\n");
  13533. }
  13534. /*
  13535. * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
  13536. * brightness value
  13537. */
  13538. static void quirk_invert_brightness(struct drm_device *dev)
  13539. {
  13540. struct drm_i915_private *dev_priv = to_i915(dev);
  13541. dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
  13542. DRM_INFO("applying inverted panel brightness quirk\n");
  13543. }
  13544. /* Some VBT's incorrectly indicate no backlight is present */
  13545. static void quirk_backlight_present(struct drm_device *dev)
  13546. {
  13547. struct drm_i915_private *dev_priv = to_i915(dev);
  13548. dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
  13549. DRM_INFO("applying backlight present quirk\n");
  13550. }
  13551. struct intel_quirk {
  13552. int device;
  13553. int subsystem_vendor;
  13554. int subsystem_device;
  13555. void (*hook)(struct drm_device *dev);
  13556. };
  13557. /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
  13558. struct intel_dmi_quirk {
  13559. void (*hook)(struct drm_device *dev);
  13560. const struct dmi_system_id (*dmi_id_list)[];
  13561. };
  13562. static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  13563. {
  13564. DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
  13565. return 1;
  13566. }
  13567. static const struct intel_dmi_quirk intel_dmi_quirks[] = {
  13568. {
  13569. .dmi_id_list = &(const struct dmi_system_id[]) {
  13570. {
  13571. .callback = intel_dmi_reverse_brightness,
  13572. .ident = "NCR Corporation",
  13573. .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
  13574. DMI_MATCH(DMI_PRODUCT_NAME, ""),
  13575. },
  13576. },
  13577. { } /* terminating entry */
  13578. },
  13579. .hook = quirk_invert_brightness,
  13580. },
  13581. };
  13582. static struct intel_quirk intel_quirks[] = {
  13583. /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
  13584. { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
  13585. /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
  13586. { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  13587. /* 830 needs to leave pipe A & dpll A up */
  13588. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  13589. /* 830 needs to leave pipe B & dpll B up */
  13590. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
  13591. /* Lenovo U160 cannot use SSC on LVDS */
  13592. { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
  13593. /* Sony Vaio Y cannot use SSC on LVDS */
  13594. { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
  13595. /* Acer Aspire 5734Z must invert backlight brightness */
  13596. { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
  13597. /* Acer/eMachines G725 */
  13598. { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
  13599. /* Acer/eMachines e725 */
  13600. { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
  13601. /* Acer/Packard Bell NCL20 */
  13602. { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
  13603. /* Acer Aspire 4736Z */
  13604. { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
  13605. /* Acer Aspire 5336 */
  13606. { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
  13607. /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
  13608. { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
  13609. /* Acer C720 Chromebook (Core i3 4005U) */
  13610. { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
  13611. /* Apple Macbook 2,1 (Core 2 T7400) */
  13612. { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
  13613. /* Apple Macbook 4,1 */
  13614. { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
  13615. /* Toshiba CB35 Chromebook (Celeron 2955U) */
  13616. { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
  13617. /* HP Chromebook 14 (Celeron 2955U) */
  13618. { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
  13619. /* Dell Chromebook 11 */
  13620. { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
  13621. /* Dell Chromebook 11 (2015 version) */
  13622. { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
  13623. };
  13624. static void intel_init_quirks(struct drm_device *dev)
  13625. {
  13626. struct pci_dev *d = dev->pdev;
  13627. int i;
  13628. for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
  13629. struct intel_quirk *q = &intel_quirks[i];
  13630. if (d->device == q->device &&
  13631. (d->subsystem_vendor == q->subsystem_vendor ||
  13632. q->subsystem_vendor == PCI_ANY_ID) &&
  13633. (d->subsystem_device == q->subsystem_device ||
  13634. q->subsystem_device == PCI_ANY_ID))
  13635. q->hook(dev);
  13636. }
  13637. for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
  13638. if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
  13639. intel_dmi_quirks[i].hook(dev);
  13640. }
  13641. }
  13642. /* Disable the VGA plane that we never use */
  13643. static void i915_disable_vga(struct drm_i915_private *dev_priv)
  13644. {
  13645. struct pci_dev *pdev = dev_priv->drm.pdev;
  13646. u8 sr1;
  13647. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  13648. /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
  13649. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  13650. outb(SR01, VGA_SR_INDEX);
  13651. sr1 = inb(VGA_SR_DATA);
  13652. outb(sr1 | 1<<5, VGA_SR_DATA);
  13653. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  13654. udelay(300);
  13655. I915_WRITE(vga_reg, VGA_DISP_DISABLE);
  13656. POSTING_READ(vga_reg);
  13657. }
  13658. void intel_modeset_init_hw(struct drm_device *dev)
  13659. {
  13660. struct drm_i915_private *dev_priv = to_i915(dev);
  13661. intel_update_cdclk(dev_priv);
  13662. dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
  13663. intel_init_clock_gating(dev_priv);
  13664. }
  13665. /*
  13666. * Calculate what we think the watermarks should be for the state we've read
  13667. * out of the hardware and then immediately program those watermarks so that
  13668. * we ensure the hardware settings match our internal state.
  13669. *
  13670. * We can calculate what we think WM's should be by creating a duplicate of the
  13671. * current state (which was constructed during hardware readout) and running it
  13672. * through the atomic check code to calculate new watermark values in the
  13673. * state object.
  13674. */
  13675. static void sanitize_watermarks(struct drm_device *dev)
  13676. {
  13677. struct drm_i915_private *dev_priv = to_i915(dev);
  13678. struct drm_atomic_state *state;
  13679. struct intel_atomic_state *intel_state;
  13680. struct drm_crtc *crtc;
  13681. struct drm_crtc_state *cstate;
  13682. struct drm_modeset_acquire_ctx ctx;
  13683. int ret;
  13684. int i;
  13685. /* Only supported on platforms that use atomic watermark design */
  13686. if (!dev_priv->display.optimize_watermarks)
  13687. return;
  13688. /*
  13689. * We need to hold connection_mutex before calling duplicate_state so
  13690. * that the connector loop is protected.
  13691. */
  13692. drm_modeset_acquire_init(&ctx, 0);
  13693. retry:
  13694. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  13695. if (ret == -EDEADLK) {
  13696. drm_modeset_backoff(&ctx);
  13697. goto retry;
  13698. } else if (WARN_ON(ret)) {
  13699. goto fail;
  13700. }
  13701. state = drm_atomic_helper_duplicate_state(dev, &ctx);
  13702. if (WARN_ON(IS_ERR(state)))
  13703. goto fail;
  13704. intel_state = to_intel_atomic_state(state);
  13705. /*
  13706. * Hardware readout is the only time we don't want to calculate
  13707. * intermediate watermarks (since we don't trust the current
  13708. * watermarks).
  13709. */
  13710. intel_state->skip_intermediate_wm = true;
  13711. ret = intel_atomic_check(dev, state);
  13712. if (ret) {
  13713. /*
  13714. * If we fail here, it means that the hardware appears to be
  13715. * programmed in a way that shouldn't be possible, given our
  13716. * understanding of watermark requirements. This might mean a
  13717. * mistake in the hardware readout code or a mistake in the
  13718. * watermark calculations for a given platform. Raise a WARN
  13719. * so that this is noticeable.
  13720. *
  13721. * If this actually happens, we'll have to just leave the
  13722. * BIOS-programmed watermarks untouched and hope for the best.
  13723. */
  13724. WARN(true, "Could not determine valid watermarks for inherited state\n");
  13725. goto put_state;
  13726. }
  13727. /* Write calculated watermark values back */
  13728. for_each_crtc_in_state(state, crtc, cstate, i) {
  13729. struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
  13730. cs->wm.need_postvbl_update = true;
  13731. dev_priv->display.optimize_watermarks(intel_state, cs);
  13732. }
  13733. put_state:
  13734. drm_atomic_state_put(state);
  13735. fail:
  13736. drm_modeset_drop_locks(&ctx);
  13737. drm_modeset_acquire_fini(&ctx);
  13738. }
  13739. int intel_modeset_init(struct drm_device *dev)
  13740. {
  13741. struct drm_i915_private *dev_priv = to_i915(dev);
  13742. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  13743. enum pipe pipe;
  13744. struct intel_crtc *crtc;
  13745. drm_mode_config_init(dev);
  13746. dev->mode_config.min_width = 0;
  13747. dev->mode_config.min_height = 0;
  13748. dev->mode_config.preferred_depth = 24;
  13749. dev->mode_config.prefer_shadow = 1;
  13750. dev->mode_config.allow_fb_modifiers = true;
  13751. dev->mode_config.funcs = &intel_mode_funcs;
  13752. intel_init_quirks(dev);
  13753. intel_init_pm(dev_priv);
  13754. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  13755. return 0;
  13756. /*
  13757. * There may be no VBT; and if the BIOS enabled SSC we can
  13758. * just keep using it to avoid unnecessary flicker. Whereas if the
  13759. * BIOS isn't using it, don't assume it will work even if the VBT
  13760. * indicates as much.
  13761. */
  13762. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
  13763. bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
  13764. DREF_SSC1_ENABLE);
  13765. if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
  13766. DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
  13767. bios_lvds_use_ssc ? "en" : "dis",
  13768. dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
  13769. dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
  13770. }
  13771. }
  13772. if (IS_GEN2(dev_priv)) {
  13773. dev->mode_config.max_width = 2048;
  13774. dev->mode_config.max_height = 2048;
  13775. } else if (IS_GEN3(dev_priv)) {
  13776. dev->mode_config.max_width = 4096;
  13777. dev->mode_config.max_height = 4096;
  13778. } else {
  13779. dev->mode_config.max_width = 8192;
  13780. dev->mode_config.max_height = 8192;
  13781. }
  13782. if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
  13783. dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
  13784. dev->mode_config.cursor_height = 1023;
  13785. } else if (IS_GEN2(dev_priv)) {
  13786. dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
  13787. dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
  13788. } else {
  13789. dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
  13790. dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
  13791. }
  13792. dev->mode_config.fb_base = ggtt->mappable_base;
  13793. DRM_DEBUG_KMS("%d display pipe%s available.\n",
  13794. INTEL_INFO(dev_priv)->num_pipes,
  13795. INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
  13796. for_each_pipe(dev_priv, pipe) {
  13797. int ret;
  13798. ret = intel_crtc_init(dev_priv, pipe);
  13799. if (ret) {
  13800. drm_mode_config_cleanup(dev);
  13801. return ret;
  13802. }
  13803. }
  13804. intel_update_czclk(dev_priv);
  13805. intel_update_cdclk(dev_priv);
  13806. intel_shared_dpll_init(dev);
  13807. if (dev_priv->max_cdclk_freq == 0)
  13808. intel_update_max_cdclk(dev_priv);
  13809. /* Just disable it once at startup */
  13810. i915_disable_vga(dev_priv);
  13811. intel_setup_outputs(dev);
  13812. drm_modeset_lock_all(dev);
  13813. intel_modeset_setup_hw_state(dev);
  13814. drm_modeset_unlock_all(dev);
  13815. for_each_intel_crtc(dev, crtc) {
  13816. struct intel_initial_plane_config plane_config = {};
  13817. if (!crtc->active)
  13818. continue;
  13819. /*
  13820. * Note that reserving the BIOS fb up front prevents us
  13821. * from stuffing other stolen allocations like the ring
  13822. * on top. This prevents some ugliness at boot time, and
  13823. * can even allow for smooth boot transitions if the BIOS
  13824. * fb is large enough for the active pipe configuration.
  13825. */
  13826. dev_priv->display.get_initial_plane_config(crtc,
  13827. &plane_config);
  13828. /*
  13829. * If the fb is shared between multiple heads, we'll
  13830. * just get the first one.
  13831. */
  13832. intel_find_initial_plane_obj(crtc, &plane_config);
  13833. }
  13834. /*
  13835. * Make sure hardware watermarks really match the state we read out.
  13836. * Note that we need to do this after reconstructing the BIOS fb's
  13837. * since the watermark calculation done here will use pstate->fb.
  13838. */
  13839. sanitize_watermarks(dev);
  13840. return 0;
  13841. }
  13842. static void intel_enable_pipe_a(struct drm_device *dev)
  13843. {
  13844. struct intel_connector *connector;
  13845. struct drm_connector *crt = NULL;
  13846. struct intel_load_detect_pipe load_detect_temp;
  13847. struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
  13848. /* We can't just switch on the pipe A, we need to set things up with a
  13849. * proper mode and output configuration. As a gross hack, enable pipe A
  13850. * by enabling the load detect pipe once. */
  13851. for_each_intel_connector(dev, connector) {
  13852. if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
  13853. crt = &connector->base;
  13854. break;
  13855. }
  13856. }
  13857. if (!crt)
  13858. return;
  13859. if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
  13860. intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
  13861. }
  13862. static bool
  13863. intel_check_plane_mapping(struct intel_crtc *crtc)
  13864. {
  13865. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  13866. u32 val;
  13867. if (INTEL_INFO(dev_priv)->num_pipes == 1)
  13868. return true;
  13869. val = I915_READ(DSPCNTR(!crtc->plane));
  13870. if ((val & DISPLAY_PLANE_ENABLE) &&
  13871. (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
  13872. return false;
  13873. return true;
  13874. }
  13875. static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
  13876. {
  13877. struct drm_device *dev = crtc->base.dev;
  13878. struct intel_encoder *encoder;
  13879. for_each_encoder_on_crtc(dev, &crtc->base, encoder)
  13880. return true;
  13881. return false;
  13882. }
  13883. static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
  13884. {
  13885. struct drm_device *dev = encoder->base.dev;
  13886. struct intel_connector *connector;
  13887. for_each_connector_on_encoder(dev, &encoder->base, connector)
  13888. return connector;
  13889. return NULL;
  13890. }
  13891. static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
  13892. enum transcoder pch_transcoder)
  13893. {
  13894. return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
  13895. (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
  13896. }
  13897. static void intel_sanitize_crtc(struct intel_crtc *crtc)
  13898. {
  13899. struct drm_device *dev = crtc->base.dev;
  13900. struct drm_i915_private *dev_priv = to_i915(dev);
  13901. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  13902. /* Clear any frame start delays used for debugging left by the BIOS */
  13903. if (!transcoder_is_dsi(cpu_transcoder)) {
  13904. i915_reg_t reg = PIPECONF(cpu_transcoder);
  13905. I915_WRITE(reg,
  13906. I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
  13907. }
  13908. /* restore vblank interrupts to correct state */
  13909. drm_crtc_vblank_reset(&crtc->base);
  13910. if (crtc->active) {
  13911. struct intel_plane *plane;
  13912. drm_crtc_vblank_on(&crtc->base);
  13913. /* Disable everything but the primary plane */
  13914. for_each_intel_plane_on_crtc(dev, crtc, plane) {
  13915. if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
  13916. continue;
  13917. plane->disable_plane(&plane->base, &crtc->base);
  13918. }
  13919. }
  13920. /* We need to sanitize the plane -> pipe mapping first because this will
  13921. * disable the crtc (and hence change the state) if it is wrong. Note
  13922. * that gen4+ has a fixed plane -> pipe mapping. */
  13923. if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
  13924. bool plane;
  13925. DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
  13926. crtc->base.base.id, crtc->base.name);
  13927. /* Pipe has the wrong plane attached and the plane is active.
  13928. * Temporarily change the plane mapping and disable everything
  13929. * ... */
  13930. plane = crtc->plane;
  13931. to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
  13932. crtc->plane = !plane;
  13933. intel_crtc_disable_noatomic(&crtc->base);
  13934. crtc->plane = plane;
  13935. }
  13936. if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
  13937. crtc->pipe == PIPE_A && !crtc->active) {
  13938. /* BIOS forgot to enable pipe A, this mostly happens after
  13939. * resume. Force-enable the pipe to fix this, the update_dpms
  13940. * call below we restore the pipe to the right state, but leave
  13941. * the required bits on. */
  13942. intel_enable_pipe_a(dev);
  13943. }
  13944. /* Adjust the state of the output pipe according to whether we
  13945. * have active connectors/encoders. */
  13946. if (crtc->active && !intel_crtc_has_encoders(crtc))
  13947. intel_crtc_disable_noatomic(&crtc->base);
  13948. if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
  13949. /*
  13950. * We start out with underrun reporting disabled to avoid races.
  13951. * For correct bookkeeping mark this on active crtcs.
  13952. *
  13953. * Also on gmch platforms we dont have any hardware bits to
  13954. * disable the underrun reporting. Which means we need to start
  13955. * out with underrun reporting disabled also on inactive pipes,
  13956. * since otherwise we'll complain about the garbage we read when
  13957. * e.g. coming up after runtime pm.
  13958. *
  13959. * No protection against concurrent access is required - at
  13960. * worst a fifo underrun happens which also sets this to false.
  13961. */
  13962. crtc->cpu_fifo_underrun_disabled = true;
  13963. /*
  13964. * We track the PCH trancoder underrun reporting state
  13965. * within the crtc. With crtc for pipe A housing the underrun
  13966. * reporting state for PCH transcoder A, crtc for pipe B housing
  13967. * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
  13968. * and marking underrun reporting as disabled for the non-existing
  13969. * PCH transcoders B and C would prevent enabling the south
  13970. * error interrupt (see cpt_can_enable_serr_int()).
  13971. */
  13972. if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
  13973. crtc->pch_fifo_underrun_disabled = true;
  13974. }
  13975. }
  13976. static void intel_sanitize_encoder(struct intel_encoder *encoder)
  13977. {
  13978. struct intel_connector *connector;
  13979. /* We need to check both for a crtc link (meaning that the
  13980. * encoder is active and trying to read from a pipe) and the
  13981. * pipe itself being active. */
  13982. bool has_active_crtc = encoder->base.crtc &&
  13983. to_intel_crtc(encoder->base.crtc)->active;
  13984. connector = intel_encoder_find_connector(encoder);
  13985. if (connector && !has_active_crtc) {
  13986. DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
  13987. encoder->base.base.id,
  13988. encoder->base.name);
  13989. /* Connector is active, but has no active pipe. This is
  13990. * fallout from our resume register restoring. Disable
  13991. * the encoder manually again. */
  13992. if (encoder->base.crtc) {
  13993. struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
  13994. DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
  13995. encoder->base.base.id,
  13996. encoder->base.name);
  13997. encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  13998. if (encoder->post_disable)
  13999. encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  14000. }
  14001. encoder->base.crtc = NULL;
  14002. /* Inconsistent output/port/pipe state happens presumably due to
  14003. * a bug in one of the get_hw_state functions. Or someplace else
  14004. * in our code, like the register restore mess on resume. Clamp
  14005. * things to off as a safer default. */
  14006. connector->base.dpms = DRM_MODE_DPMS_OFF;
  14007. connector->base.encoder = NULL;
  14008. }
  14009. /* Enabled encoders without active connectors will be fixed in
  14010. * the crtc fixup. */
  14011. }
  14012. void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
  14013. {
  14014. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  14015. if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
  14016. DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
  14017. i915_disable_vga(dev_priv);
  14018. }
  14019. }
  14020. void i915_redisable_vga(struct drm_i915_private *dev_priv)
  14021. {
  14022. /* This function can be called both from intel_modeset_setup_hw_state or
  14023. * at a very early point in our resume sequence, where the power well
  14024. * structures are not yet restored. Since this function is at a very
  14025. * paranoid "someone might have enabled VGA while we were not looking"
  14026. * level, just check if the power well is enabled instead of trying to
  14027. * follow the "don't touch the power well if we don't need it" policy
  14028. * the rest of the driver uses. */
  14029. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
  14030. return;
  14031. i915_redisable_vga_power_on(dev_priv);
  14032. intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
  14033. }
  14034. static bool primary_get_hw_state(struct intel_plane *plane)
  14035. {
  14036. struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
  14037. return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
  14038. }
  14039. /* FIXME read out full plane state for all planes */
  14040. static void readout_plane_state(struct intel_crtc *crtc)
  14041. {
  14042. struct drm_plane *primary = crtc->base.primary;
  14043. struct intel_plane_state *plane_state =
  14044. to_intel_plane_state(primary->state);
  14045. plane_state->base.visible = crtc->active &&
  14046. primary_get_hw_state(to_intel_plane(primary));
  14047. if (plane_state->base.visible)
  14048. crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
  14049. }
  14050. static void intel_modeset_readout_hw_state(struct drm_device *dev)
  14051. {
  14052. struct drm_i915_private *dev_priv = to_i915(dev);
  14053. enum pipe pipe;
  14054. struct intel_crtc *crtc;
  14055. struct intel_encoder *encoder;
  14056. struct intel_connector *connector;
  14057. int i;
  14058. dev_priv->active_crtcs = 0;
  14059. for_each_intel_crtc(dev, crtc) {
  14060. struct intel_crtc_state *crtc_state = crtc->config;
  14061. int pixclk = 0;
  14062. __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
  14063. memset(crtc_state, 0, sizeof(*crtc_state));
  14064. crtc_state->base.crtc = &crtc->base;
  14065. crtc_state->base.active = crtc_state->base.enable =
  14066. dev_priv->display.get_pipe_config(crtc, crtc_state);
  14067. crtc->base.enabled = crtc_state->base.enable;
  14068. crtc->active = crtc_state->base.active;
  14069. if (crtc_state->base.active) {
  14070. dev_priv->active_crtcs |= 1 << crtc->pipe;
  14071. if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
  14072. pixclk = ilk_pipe_pixel_rate(crtc_state);
  14073. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  14074. pixclk = crtc_state->base.adjusted_mode.crtc_clock;
  14075. else
  14076. WARN_ON(dev_priv->display.modeset_calc_cdclk);
  14077. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  14078. if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
  14079. pixclk = DIV_ROUND_UP(pixclk * 100, 95);
  14080. }
  14081. dev_priv->min_pixclk[crtc->pipe] = pixclk;
  14082. readout_plane_state(crtc);
  14083. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
  14084. crtc->base.base.id, crtc->base.name,
  14085. enableddisabled(crtc->active));
  14086. }
  14087. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  14088. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  14089. pll->on = pll->funcs.get_hw_state(dev_priv, pll,
  14090. &pll->config.hw_state);
  14091. pll->config.crtc_mask = 0;
  14092. for_each_intel_crtc(dev, crtc) {
  14093. if (crtc->active && crtc->config->shared_dpll == pll)
  14094. pll->config.crtc_mask |= 1 << crtc->pipe;
  14095. }
  14096. pll->active_mask = pll->config.crtc_mask;
  14097. DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
  14098. pll->name, pll->config.crtc_mask, pll->on);
  14099. }
  14100. for_each_intel_encoder(dev, encoder) {
  14101. pipe = 0;
  14102. if (encoder->get_hw_state(encoder, &pipe)) {
  14103. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  14104. encoder->base.crtc = &crtc->base;
  14105. crtc->config->output_types |= 1 << encoder->type;
  14106. encoder->get_config(encoder, crtc->config);
  14107. } else {
  14108. encoder->base.crtc = NULL;
  14109. }
  14110. DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
  14111. encoder->base.base.id, encoder->base.name,
  14112. enableddisabled(encoder->base.crtc),
  14113. pipe_name(pipe));
  14114. }
  14115. for_each_intel_connector(dev, connector) {
  14116. if (connector->get_hw_state(connector)) {
  14117. connector->base.dpms = DRM_MODE_DPMS_ON;
  14118. encoder = connector->encoder;
  14119. connector->base.encoder = &encoder->base;
  14120. if (encoder->base.crtc &&
  14121. encoder->base.crtc->state->active) {
  14122. /*
  14123. * This has to be done during hardware readout
  14124. * because anything calling .crtc_disable may
  14125. * rely on the connector_mask being accurate.
  14126. */
  14127. encoder->base.crtc->state->connector_mask |=
  14128. 1 << drm_connector_index(&connector->base);
  14129. encoder->base.crtc->state->encoder_mask |=
  14130. 1 << drm_encoder_index(&encoder->base);
  14131. }
  14132. } else {
  14133. connector->base.dpms = DRM_MODE_DPMS_OFF;
  14134. connector->base.encoder = NULL;
  14135. }
  14136. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
  14137. connector->base.base.id, connector->base.name,
  14138. enableddisabled(connector->base.encoder));
  14139. }
  14140. for_each_intel_crtc(dev, crtc) {
  14141. crtc->base.hwmode = crtc->config->base.adjusted_mode;
  14142. memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
  14143. if (crtc->base.state->active) {
  14144. intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
  14145. intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
  14146. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
  14147. /*
  14148. * The initial mode needs to be set in order to keep
  14149. * the atomic core happy. It wants a valid mode if the
  14150. * crtc's enabled, so we do the above call.
  14151. *
  14152. * At this point some state updated by the connectors
  14153. * in their ->detect() callback has not run yet, so
  14154. * no recalculation can be done yet.
  14155. *
  14156. * Even if we could do a recalculation and modeset
  14157. * right now it would cause a double modeset if
  14158. * fbdev or userspace chooses a different initial mode.
  14159. *
  14160. * If that happens, someone indicated they wanted a
  14161. * mode change, which means it's safe to do a full
  14162. * recalculation.
  14163. */
  14164. crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
  14165. drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
  14166. update_scanline_offset(crtc);
  14167. }
  14168. intel_pipe_config_sanity_check(dev_priv, crtc->config);
  14169. }
  14170. }
  14171. /* Scan out the current hw modeset state,
  14172. * and sanitizes it to the current state
  14173. */
  14174. static void
  14175. intel_modeset_setup_hw_state(struct drm_device *dev)
  14176. {
  14177. struct drm_i915_private *dev_priv = to_i915(dev);
  14178. enum pipe pipe;
  14179. struct intel_crtc *crtc;
  14180. struct intel_encoder *encoder;
  14181. int i;
  14182. intel_modeset_readout_hw_state(dev);
  14183. /* HW state is read out, now we need to sanitize this mess. */
  14184. for_each_intel_encoder(dev, encoder) {
  14185. intel_sanitize_encoder(encoder);
  14186. }
  14187. for_each_pipe(dev_priv, pipe) {
  14188. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  14189. intel_sanitize_crtc(crtc);
  14190. intel_dump_pipe_config(crtc, crtc->config,
  14191. "[setup_hw_state]");
  14192. }
  14193. intel_modeset_update_connector_atomic_state(dev);
  14194. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  14195. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  14196. if (!pll->on || pll->active_mask)
  14197. continue;
  14198. DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
  14199. pll->funcs.disable(dev_priv, pll);
  14200. pll->on = false;
  14201. }
  14202. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  14203. vlv_wm_get_hw_state(dev);
  14204. else if (IS_GEN9(dev_priv))
  14205. skl_wm_get_hw_state(dev);
  14206. else if (HAS_PCH_SPLIT(dev_priv))
  14207. ilk_wm_get_hw_state(dev);
  14208. for_each_intel_crtc(dev, crtc) {
  14209. unsigned long put_domains;
  14210. put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
  14211. if (WARN_ON(put_domains))
  14212. modeset_put_power_domains(dev_priv, put_domains);
  14213. }
  14214. intel_display_set_init_power(dev_priv, false);
  14215. intel_fbc_init_pipe_state(dev_priv);
  14216. }
  14217. void intel_display_resume(struct drm_device *dev)
  14218. {
  14219. struct drm_i915_private *dev_priv = to_i915(dev);
  14220. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  14221. struct drm_modeset_acquire_ctx ctx;
  14222. int ret;
  14223. dev_priv->modeset_restore_state = NULL;
  14224. if (state)
  14225. state->acquire_ctx = &ctx;
  14226. /*
  14227. * This is a cludge because with real atomic modeset mode_config.mutex
  14228. * won't be taken. Unfortunately some probed state like
  14229. * audio_codec_enable is still protected by mode_config.mutex, so lock
  14230. * it here for now.
  14231. */
  14232. mutex_lock(&dev->mode_config.mutex);
  14233. drm_modeset_acquire_init(&ctx, 0);
  14234. while (1) {
  14235. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  14236. if (ret != -EDEADLK)
  14237. break;
  14238. drm_modeset_backoff(&ctx);
  14239. }
  14240. if (!ret)
  14241. ret = __intel_display_resume(dev, state);
  14242. drm_modeset_drop_locks(&ctx);
  14243. drm_modeset_acquire_fini(&ctx);
  14244. mutex_unlock(&dev->mode_config.mutex);
  14245. if (ret)
  14246. DRM_ERROR("Restoring old state failed with %i\n", ret);
  14247. drm_atomic_state_put(state);
  14248. }
  14249. void intel_modeset_gem_init(struct drm_device *dev)
  14250. {
  14251. struct drm_i915_private *dev_priv = to_i915(dev);
  14252. struct drm_crtc *c;
  14253. struct drm_i915_gem_object *obj;
  14254. intel_init_gt_powersave(dev_priv);
  14255. intel_modeset_init_hw(dev);
  14256. intel_setup_overlay(dev_priv);
  14257. /*
  14258. * Make sure any fbs we allocated at startup are properly
  14259. * pinned & fenced. When we do the allocation it's too early
  14260. * for this.
  14261. */
  14262. for_each_crtc(dev, c) {
  14263. struct i915_vma *vma;
  14264. obj = intel_fb_obj(c->primary->fb);
  14265. if (obj == NULL)
  14266. continue;
  14267. mutex_lock(&dev->struct_mutex);
  14268. vma = intel_pin_and_fence_fb_obj(c->primary->fb,
  14269. c->primary->state->rotation);
  14270. mutex_unlock(&dev->struct_mutex);
  14271. if (IS_ERR(vma)) {
  14272. DRM_ERROR("failed to pin boot fb on pipe %d\n",
  14273. to_intel_crtc(c)->pipe);
  14274. drm_framebuffer_unreference(c->primary->fb);
  14275. c->primary->fb = NULL;
  14276. c->primary->crtc = c->primary->state->crtc = NULL;
  14277. update_state_fb(c->primary);
  14278. c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
  14279. }
  14280. }
  14281. }
  14282. int intel_connector_register(struct drm_connector *connector)
  14283. {
  14284. struct intel_connector *intel_connector = to_intel_connector(connector);
  14285. int ret;
  14286. ret = intel_backlight_device_register(intel_connector);
  14287. if (ret)
  14288. goto err;
  14289. return 0;
  14290. err:
  14291. return ret;
  14292. }
  14293. void intel_connector_unregister(struct drm_connector *connector)
  14294. {
  14295. struct intel_connector *intel_connector = to_intel_connector(connector);
  14296. intel_backlight_device_unregister(intel_connector);
  14297. intel_panel_destroy_backlight(connector);
  14298. }
  14299. void intel_modeset_cleanup(struct drm_device *dev)
  14300. {
  14301. struct drm_i915_private *dev_priv = to_i915(dev);
  14302. intel_disable_gt_powersave(dev_priv);
  14303. /*
  14304. * Interrupts and polling as the first thing to avoid creating havoc.
  14305. * Too much stuff here (turning of connectors, ...) would
  14306. * experience fancy races otherwise.
  14307. */
  14308. intel_irq_uninstall(dev_priv);
  14309. /*
  14310. * Due to the hpd irq storm handling the hotplug work can re-arm the
  14311. * poll handlers. Hence disable polling after hpd handling is shut down.
  14312. */
  14313. drm_kms_helper_poll_fini(dev);
  14314. intel_unregister_dsm_handler();
  14315. intel_fbc_global_disable(dev_priv);
  14316. /* flush any delayed tasks or pending work */
  14317. flush_scheduled_work();
  14318. drm_mode_config_cleanup(dev);
  14319. intel_cleanup_overlay(dev_priv);
  14320. intel_cleanup_gt_powersave(dev_priv);
  14321. intel_teardown_gmbus(dev);
  14322. }
  14323. void intel_connector_attach_encoder(struct intel_connector *connector,
  14324. struct intel_encoder *encoder)
  14325. {
  14326. connector->encoder = encoder;
  14327. drm_mode_connector_attach_encoder(&connector->base,
  14328. &encoder->base);
  14329. }
  14330. /*
  14331. * set vga decode state - true == enable VGA decode
  14332. */
  14333. int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
  14334. {
  14335. unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
  14336. u16 gmch_ctrl;
  14337. if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
  14338. DRM_ERROR("failed to read control word\n");
  14339. return -EIO;
  14340. }
  14341. if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
  14342. return 0;
  14343. if (state)
  14344. gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
  14345. else
  14346. gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
  14347. if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
  14348. DRM_ERROR("failed to write control word\n");
  14349. return -EIO;
  14350. }
  14351. return 0;
  14352. }
  14353. #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
  14354. struct intel_display_error_state {
  14355. u32 power_well_driver;
  14356. int num_transcoders;
  14357. struct intel_cursor_error_state {
  14358. u32 control;
  14359. u32 position;
  14360. u32 base;
  14361. u32 size;
  14362. } cursor[I915_MAX_PIPES];
  14363. struct intel_pipe_error_state {
  14364. bool power_domain_on;
  14365. u32 source;
  14366. u32 stat;
  14367. } pipe[I915_MAX_PIPES];
  14368. struct intel_plane_error_state {
  14369. u32 control;
  14370. u32 stride;
  14371. u32 size;
  14372. u32 pos;
  14373. u32 addr;
  14374. u32 surface;
  14375. u32 tile_offset;
  14376. } plane[I915_MAX_PIPES];
  14377. struct intel_transcoder_error_state {
  14378. bool power_domain_on;
  14379. enum transcoder cpu_transcoder;
  14380. u32 conf;
  14381. u32 htotal;
  14382. u32 hblank;
  14383. u32 hsync;
  14384. u32 vtotal;
  14385. u32 vblank;
  14386. u32 vsync;
  14387. } transcoder[4];
  14388. };
  14389. struct intel_display_error_state *
  14390. intel_display_capture_error_state(struct drm_i915_private *dev_priv)
  14391. {
  14392. struct intel_display_error_state *error;
  14393. int transcoders[] = {
  14394. TRANSCODER_A,
  14395. TRANSCODER_B,
  14396. TRANSCODER_C,
  14397. TRANSCODER_EDP,
  14398. };
  14399. int i;
  14400. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  14401. return NULL;
  14402. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  14403. if (error == NULL)
  14404. return NULL;
  14405. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  14406. error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  14407. for_each_pipe(dev_priv, i) {
  14408. error->pipe[i].power_domain_on =
  14409. __intel_display_power_is_enabled(dev_priv,
  14410. POWER_DOMAIN_PIPE(i));
  14411. if (!error->pipe[i].power_domain_on)
  14412. continue;
  14413. error->cursor[i].control = I915_READ(CURCNTR(i));
  14414. error->cursor[i].position = I915_READ(CURPOS(i));
  14415. error->cursor[i].base = I915_READ(CURBASE(i));
  14416. error->plane[i].control = I915_READ(DSPCNTR(i));
  14417. error->plane[i].stride = I915_READ(DSPSTRIDE(i));
  14418. if (INTEL_GEN(dev_priv) <= 3) {
  14419. error->plane[i].size = I915_READ(DSPSIZE(i));
  14420. error->plane[i].pos = I915_READ(DSPPOS(i));
  14421. }
  14422. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  14423. error->plane[i].addr = I915_READ(DSPADDR(i));
  14424. if (INTEL_GEN(dev_priv) >= 4) {
  14425. error->plane[i].surface = I915_READ(DSPSURF(i));
  14426. error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
  14427. }
  14428. error->pipe[i].source = I915_READ(PIPESRC(i));
  14429. if (HAS_GMCH_DISPLAY(dev_priv))
  14430. error->pipe[i].stat = I915_READ(PIPESTAT(i));
  14431. }
  14432. /* Note: this does not include DSI transcoders. */
  14433. error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
  14434. if (HAS_DDI(dev_priv))
  14435. error->num_transcoders++; /* Account for eDP. */
  14436. for (i = 0; i < error->num_transcoders; i++) {
  14437. enum transcoder cpu_transcoder = transcoders[i];
  14438. error->transcoder[i].power_domain_on =
  14439. __intel_display_power_is_enabled(dev_priv,
  14440. POWER_DOMAIN_TRANSCODER(cpu_transcoder));
  14441. if (!error->transcoder[i].power_domain_on)
  14442. continue;
  14443. error->transcoder[i].cpu_transcoder = cpu_transcoder;
  14444. error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
  14445. error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
  14446. error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
  14447. error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
  14448. error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
  14449. error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
  14450. error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
  14451. }
  14452. return error;
  14453. }
  14454. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  14455. void
  14456. intel_display_print_error_state(struct drm_i915_error_state_buf *m,
  14457. struct drm_i915_private *dev_priv,
  14458. struct intel_display_error_state *error)
  14459. {
  14460. int i;
  14461. if (!error)
  14462. return;
  14463. err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
  14464. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  14465. err_printf(m, "PWR_WELL_CTL2: %08x\n",
  14466. error->power_well_driver);
  14467. for_each_pipe(dev_priv, i) {
  14468. err_printf(m, "Pipe [%d]:\n", i);
  14469. err_printf(m, " Power: %s\n",
  14470. onoff(error->pipe[i].power_domain_on));
  14471. err_printf(m, " SRC: %08x\n", error->pipe[i].source);
  14472. err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
  14473. err_printf(m, "Plane [%d]:\n", i);
  14474. err_printf(m, " CNTR: %08x\n", error->plane[i].control);
  14475. err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
  14476. if (INTEL_GEN(dev_priv) <= 3) {
  14477. err_printf(m, " SIZE: %08x\n", error->plane[i].size);
  14478. err_printf(m, " POS: %08x\n", error->plane[i].pos);
  14479. }
  14480. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  14481. err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
  14482. if (INTEL_GEN(dev_priv) >= 4) {
  14483. err_printf(m, " SURF: %08x\n", error->plane[i].surface);
  14484. err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
  14485. }
  14486. err_printf(m, "Cursor [%d]:\n", i);
  14487. err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
  14488. err_printf(m, " POS: %08x\n", error->cursor[i].position);
  14489. err_printf(m, " BASE: %08x\n", error->cursor[i].base);
  14490. }
  14491. for (i = 0; i < error->num_transcoders; i++) {
  14492. err_printf(m, "CPU transcoder: %s\n",
  14493. transcoder_name(error->transcoder[i].cpu_transcoder));
  14494. err_printf(m, " Power: %s\n",
  14495. onoff(error->transcoder[i].power_domain_on));
  14496. err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
  14497. err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
  14498. err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
  14499. err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
  14500. err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
  14501. err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
  14502. err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
  14503. }
  14504. }
  14505. #endif