intel_display.c 457 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382
  1. /*
  2. * Copyright © 2006-2007 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. */
  26. #include <linux/dmi.h>
  27. #include <linux/module.h>
  28. #include <linux/input.h>
  29. #include <linux/i2c.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/vgaarb.h>
  33. #include <drm/drm_edid.h>
  34. #include <drm/drmP.h>
  35. #include "intel_drv.h"
  36. #include <drm/i915_drm.h>
  37. #include "i915_drv.h"
  38. #include "intel_dsi.h"
  39. #include "i915_trace.h"
  40. #include <drm/drm_atomic.h>
  41. #include <drm/drm_atomic_helper.h>
  42. #include <drm/drm_dp_helper.h>
  43. #include <drm/drm_crtc_helper.h>
  44. #include <drm/drm_plane_helper.h>
  45. #include <drm/drm_rect.h>
  46. #include <linux/dma_remapping.h>
  47. #include <linux/reservation.h>
  48. #include <linux/dma-buf.h>
  49. /* Primary plane formats for gen <= 3 */
  50. static const uint32_t i8xx_primary_formats[] = {
  51. DRM_FORMAT_C8,
  52. DRM_FORMAT_RGB565,
  53. DRM_FORMAT_XRGB1555,
  54. DRM_FORMAT_XRGB8888,
  55. };
  56. /* Primary plane formats for gen >= 4 */
  57. static const uint32_t i965_primary_formats[] = {
  58. DRM_FORMAT_C8,
  59. DRM_FORMAT_RGB565,
  60. DRM_FORMAT_XRGB8888,
  61. DRM_FORMAT_XBGR8888,
  62. DRM_FORMAT_XRGB2101010,
  63. DRM_FORMAT_XBGR2101010,
  64. };
  65. static const uint32_t skl_primary_formats[] = {
  66. DRM_FORMAT_C8,
  67. DRM_FORMAT_RGB565,
  68. DRM_FORMAT_XRGB8888,
  69. DRM_FORMAT_XBGR8888,
  70. DRM_FORMAT_ARGB8888,
  71. DRM_FORMAT_ABGR8888,
  72. DRM_FORMAT_XRGB2101010,
  73. DRM_FORMAT_XBGR2101010,
  74. DRM_FORMAT_YUYV,
  75. DRM_FORMAT_YVYU,
  76. DRM_FORMAT_UYVY,
  77. DRM_FORMAT_VYUY,
  78. };
  79. /* Cursor formats */
  80. static const uint32_t intel_cursor_formats[] = {
  81. DRM_FORMAT_ARGB8888,
  82. };
  83. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  84. struct intel_crtc_state *pipe_config);
  85. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  86. struct intel_crtc_state *pipe_config);
  87. static int intel_framebuffer_init(struct drm_device *dev,
  88. struct intel_framebuffer *ifb,
  89. struct drm_mode_fb_cmd2 *mode_cmd,
  90. struct drm_i915_gem_object *obj);
  91. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  92. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  93. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
  94. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  95. struct intel_link_m_n *m_n,
  96. struct intel_link_m_n *m2_n2);
  97. static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  98. static void haswell_set_pipeconf(struct drm_crtc *crtc);
  99. static void haswell_set_pipemisc(struct drm_crtc *crtc);
  100. static void vlv_prepare_pll(struct intel_crtc *crtc,
  101. const struct intel_crtc_state *pipe_config);
  102. static void chv_prepare_pll(struct intel_crtc *crtc,
  103. const struct intel_crtc_state *pipe_config);
  104. static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  105. static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  106. static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
  107. struct intel_crtc_state *crtc_state);
  108. static void skylake_pfit_enable(struct intel_crtc *crtc);
  109. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
  110. static void ironlake_pfit_enable(struct intel_crtc *crtc);
  111. static void intel_modeset_setup_hw_state(struct drm_device *dev);
  112. static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
  113. typedef struct {
  114. int min, max;
  115. } intel_range_t;
  116. typedef struct {
  117. int dot_limit;
  118. int p2_slow, p2_fast;
  119. } intel_p2_t;
  120. typedef struct intel_limit intel_limit_t;
  121. struct intel_limit {
  122. intel_range_t dot, vco, n, m, m1, m2, p, p1;
  123. intel_p2_t p2;
  124. };
  125. /* returns HPLL frequency in kHz */
  126. static int valleyview_get_vco(struct drm_i915_private *dev_priv)
  127. {
  128. int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
  129. /* Obtain SKU information */
  130. mutex_lock(&dev_priv->sb_lock);
  131. hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
  132. CCK_FUSE_HPLL_FREQ_MASK;
  133. mutex_unlock(&dev_priv->sb_lock);
  134. return vco_freq[hpll_freq] * 1000;
  135. }
  136. int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
  137. const char *name, u32 reg, int ref_freq)
  138. {
  139. u32 val;
  140. int divider;
  141. mutex_lock(&dev_priv->sb_lock);
  142. val = vlv_cck_read(dev_priv, reg);
  143. mutex_unlock(&dev_priv->sb_lock);
  144. divider = val & CCK_FREQUENCY_VALUES;
  145. WARN((val & CCK_FREQUENCY_STATUS) !=
  146. (divider << CCK_FREQUENCY_STATUS_SHIFT),
  147. "%s change in progress\n", name);
  148. return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  149. }
  150. static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
  151. const char *name, u32 reg)
  152. {
  153. if (dev_priv->hpll_freq == 0)
  154. dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
  155. return vlv_get_cck_clock(dev_priv, name, reg,
  156. dev_priv->hpll_freq);
  157. }
  158. static int
  159. intel_pch_rawclk(struct drm_i915_private *dev_priv)
  160. {
  161. return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
  162. }
  163. static int
  164. intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
  165. {
  166. return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
  167. CCK_DISPLAY_REF_CLOCK_CONTROL);
  168. }
  169. static int
  170. intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
  171. {
  172. uint32_t clkcfg;
  173. /* hrawclock is 1/4 the FSB frequency */
  174. clkcfg = I915_READ(CLKCFG);
  175. switch (clkcfg & CLKCFG_FSB_MASK) {
  176. case CLKCFG_FSB_400:
  177. return 100000;
  178. case CLKCFG_FSB_533:
  179. return 133333;
  180. case CLKCFG_FSB_667:
  181. return 166667;
  182. case CLKCFG_FSB_800:
  183. return 200000;
  184. case CLKCFG_FSB_1067:
  185. return 266667;
  186. case CLKCFG_FSB_1333:
  187. return 333333;
  188. /* these two are just a guess; one of them might be right */
  189. case CLKCFG_FSB_1600:
  190. case CLKCFG_FSB_1600_ALT:
  191. return 400000;
  192. default:
  193. return 133333;
  194. }
  195. }
  196. static void intel_update_rawclk(struct drm_i915_private *dev_priv)
  197. {
  198. if (HAS_PCH_SPLIT(dev_priv))
  199. dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
  200. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  201. dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
  202. else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
  203. dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
  204. else
  205. return; /* no rawclk on other platforms, or no need to know it */
  206. DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
  207. }
  208. static void intel_update_czclk(struct drm_i915_private *dev_priv)
  209. {
  210. if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
  211. return;
  212. dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
  213. CCK_CZ_CLOCK_CONTROL);
  214. DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
  215. }
  216. static inline u32 /* units of 100MHz */
  217. intel_fdi_link_freq(struct drm_i915_private *dev_priv,
  218. const struct intel_crtc_state *pipe_config)
  219. {
  220. if (HAS_DDI(dev_priv))
  221. return pipe_config->port_clock; /* SPLL */
  222. else if (IS_GEN5(dev_priv))
  223. return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
  224. else
  225. return 270000;
  226. }
  227. static const intel_limit_t intel_limits_i8xx_dac = {
  228. .dot = { .min = 25000, .max = 350000 },
  229. .vco = { .min = 908000, .max = 1512000 },
  230. .n = { .min = 2, .max = 16 },
  231. .m = { .min = 96, .max = 140 },
  232. .m1 = { .min = 18, .max = 26 },
  233. .m2 = { .min = 6, .max = 16 },
  234. .p = { .min = 4, .max = 128 },
  235. .p1 = { .min = 2, .max = 33 },
  236. .p2 = { .dot_limit = 165000,
  237. .p2_slow = 4, .p2_fast = 2 },
  238. };
  239. static const intel_limit_t intel_limits_i8xx_dvo = {
  240. .dot = { .min = 25000, .max = 350000 },
  241. .vco = { .min = 908000, .max = 1512000 },
  242. .n = { .min = 2, .max = 16 },
  243. .m = { .min = 96, .max = 140 },
  244. .m1 = { .min = 18, .max = 26 },
  245. .m2 = { .min = 6, .max = 16 },
  246. .p = { .min = 4, .max = 128 },
  247. .p1 = { .min = 2, .max = 33 },
  248. .p2 = { .dot_limit = 165000,
  249. .p2_slow = 4, .p2_fast = 4 },
  250. };
  251. static const intel_limit_t intel_limits_i8xx_lvds = {
  252. .dot = { .min = 25000, .max = 350000 },
  253. .vco = { .min = 908000, .max = 1512000 },
  254. .n = { .min = 2, .max = 16 },
  255. .m = { .min = 96, .max = 140 },
  256. .m1 = { .min = 18, .max = 26 },
  257. .m2 = { .min = 6, .max = 16 },
  258. .p = { .min = 4, .max = 128 },
  259. .p1 = { .min = 1, .max = 6 },
  260. .p2 = { .dot_limit = 165000,
  261. .p2_slow = 14, .p2_fast = 7 },
  262. };
  263. static const intel_limit_t intel_limits_i9xx_sdvo = {
  264. .dot = { .min = 20000, .max = 400000 },
  265. .vco = { .min = 1400000, .max = 2800000 },
  266. .n = { .min = 1, .max = 6 },
  267. .m = { .min = 70, .max = 120 },
  268. .m1 = { .min = 8, .max = 18 },
  269. .m2 = { .min = 3, .max = 7 },
  270. .p = { .min = 5, .max = 80 },
  271. .p1 = { .min = 1, .max = 8 },
  272. .p2 = { .dot_limit = 200000,
  273. .p2_slow = 10, .p2_fast = 5 },
  274. };
  275. static const intel_limit_t intel_limits_i9xx_lvds = {
  276. .dot = { .min = 20000, .max = 400000 },
  277. .vco = { .min = 1400000, .max = 2800000 },
  278. .n = { .min = 1, .max = 6 },
  279. .m = { .min = 70, .max = 120 },
  280. .m1 = { .min = 8, .max = 18 },
  281. .m2 = { .min = 3, .max = 7 },
  282. .p = { .min = 7, .max = 98 },
  283. .p1 = { .min = 1, .max = 8 },
  284. .p2 = { .dot_limit = 112000,
  285. .p2_slow = 14, .p2_fast = 7 },
  286. };
  287. static const intel_limit_t intel_limits_g4x_sdvo = {
  288. .dot = { .min = 25000, .max = 270000 },
  289. .vco = { .min = 1750000, .max = 3500000},
  290. .n = { .min = 1, .max = 4 },
  291. .m = { .min = 104, .max = 138 },
  292. .m1 = { .min = 17, .max = 23 },
  293. .m2 = { .min = 5, .max = 11 },
  294. .p = { .min = 10, .max = 30 },
  295. .p1 = { .min = 1, .max = 3},
  296. .p2 = { .dot_limit = 270000,
  297. .p2_slow = 10,
  298. .p2_fast = 10
  299. },
  300. };
  301. static const intel_limit_t intel_limits_g4x_hdmi = {
  302. .dot = { .min = 22000, .max = 400000 },
  303. .vco = { .min = 1750000, .max = 3500000},
  304. .n = { .min = 1, .max = 4 },
  305. .m = { .min = 104, .max = 138 },
  306. .m1 = { .min = 16, .max = 23 },
  307. .m2 = { .min = 5, .max = 11 },
  308. .p = { .min = 5, .max = 80 },
  309. .p1 = { .min = 1, .max = 8},
  310. .p2 = { .dot_limit = 165000,
  311. .p2_slow = 10, .p2_fast = 5 },
  312. };
  313. static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
  314. .dot = { .min = 20000, .max = 115000 },
  315. .vco = { .min = 1750000, .max = 3500000 },
  316. .n = { .min = 1, .max = 3 },
  317. .m = { .min = 104, .max = 138 },
  318. .m1 = { .min = 17, .max = 23 },
  319. .m2 = { .min = 5, .max = 11 },
  320. .p = { .min = 28, .max = 112 },
  321. .p1 = { .min = 2, .max = 8 },
  322. .p2 = { .dot_limit = 0,
  323. .p2_slow = 14, .p2_fast = 14
  324. },
  325. };
  326. static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
  327. .dot = { .min = 80000, .max = 224000 },
  328. .vco = { .min = 1750000, .max = 3500000 },
  329. .n = { .min = 1, .max = 3 },
  330. .m = { .min = 104, .max = 138 },
  331. .m1 = { .min = 17, .max = 23 },
  332. .m2 = { .min = 5, .max = 11 },
  333. .p = { .min = 14, .max = 42 },
  334. .p1 = { .min = 2, .max = 6 },
  335. .p2 = { .dot_limit = 0,
  336. .p2_slow = 7, .p2_fast = 7
  337. },
  338. };
  339. static const intel_limit_t intel_limits_pineview_sdvo = {
  340. .dot = { .min = 20000, .max = 400000},
  341. .vco = { .min = 1700000, .max = 3500000 },
  342. /* Pineview's Ncounter is a ring counter */
  343. .n = { .min = 3, .max = 6 },
  344. .m = { .min = 2, .max = 256 },
  345. /* Pineview only has one combined m divider, which we treat as m2. */
  346. .m1 = { .min = 0, .max = 0 },
  347. .m2 = { .min = 0, .max = 254 },
  348. .p = { .min = 5, .max = 80 },
  349. .p1 = { .min = 1, .max = 8 },
  350. .p2 = { .dot_limit = 200000,
  351. .p2_slow = 10, .p2_fast = 5 },
  352. };
  353. static const intel_limit_t intel_limits_pineview_lvds = {
  354. .dot = { .min = 20000, .max = 400000 },
  355. .vco = { .min = 1700000, .max = 3500000 },
  356. .n = { .min = 3, .max = 6 },
  357. .m = { .min = 2, .max = 256 },
  358. .m1 = { .min = 0, .max = 0 },
  359. .m2 = { .min = 0, .max = 254 },
  360. .p = { .min = 7, .max = 112 },
  361. .p1 = { .min = 1, .max = 8 },
  362. .p2 = { .dot_limit = 112000,
  363. .p2_slow = 14, .p2_fast = 14 },
  364. };
  365. /* Ironlake / Sandybridge
  366. *
  367. * We calculate clock using (register_value + 2) for N/M1/M2, so here
  368. * the range value for them is (actual_value - 2).
  369. */
  370. static const intel_limit_t intel_limits_ironlake_dac = {
  371. .dot = { .min = 25000, .max = 350000 },
  372. .vco = { .min = 1760000, .max = 3510000 },
  373. .n = { .min = 1, .max = 5 },
  374. .m = { .min = 79, .max = 127 },
  375. .m1 = { .min = 12, .max = 22 },
  376. .m2 = { .min = 5, .max = 9 },
  377. .p = { .min = 5, .max = 80 },
  378. .p1 = { .min = 1, .max = 8 },
  379. .p2 = { .dot_limit = 225000,
  380. .p2_slow = 10, .p2_fast = 5 },
  381. };
  382. static const intel_limit_t intel_limits_ironlake_single_lvds = {
  383. .dot = { .min = 25000, .max = 350000 },
  384. .vco = { .min = 1760000, .max = 3510000 },
  385. .n = { .min = 1, .max = 3 },
  386. .m = { .min = 79, .max = 118 },
  387. .m1 = { .min = 12, .max = 22 },
  388. .m2 = { .min = 5, .max = 9 },
  389. .p = { .min = 28, .max = 112 },
  390. .p1 = { .min = 2, .max = 8 },
  391. .p2 = { .dot_limit = 225000,
  392. .p2_slow = 14, .p2_fast = 14 },
  393. };
  394. static const intel_limit_t intel_limits_ironlake_dual_lvds = {
  395. .dot = { .min = 25000, .max = 350000 },
  396. .vco = { .min = 1760000, .max = 3510000 },
  397. .n = { .min = 1, .max = 3 },
  398. .m = { .min = 79, .max = 127 },
  399. .m1 = { .min = 12, .max = 22 },
  400. .m2 = { .min = 5, .max = 9 },
  401. .p = { .min = 14, .max = 56 },
  402. .p1 = { .min = 2, .max = 8 },
  403. .p2 = { .dot_limit = 225000,
  404. .p2_slow = 7, .p2_fast = 7 },
  405. };
  406. /* LVDS 100mhz refclk limits. */
  407. static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
  408. .dot = { .min = 25000, .max = 350000 },
  409. .vco = { .min = 1760000, .max = 3510000 },
  410. .n = { .min = 1, .max = 2 },
  411. .m = { .min = 79, .max = 126 },
  412. .m1 = { .min = 12, .max = 22 },
  413. .m2 = { .min = 5, .max = 9 },
  414. .p = { .min = 28, .max = 112 },
  415. .p1 = { .min = 2, .max = 8 },
  416. .p2 = { .dot_limit = 225000,
  417. .p2_slow = 14, .p2_fast = 14 },
  418. };
  419. static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
  420. .dot = { .min = 25000, .max = 350000 },
  421. .vco = { .min = 1760000, .max = 3510000 },
  422. .n = { .min = 1, .max = 3 },
  423. .m = { .min = 79, .max = 126 },
  424. .m1 = { .min = 12, .max = 22 },
  425. .m2 = { .min = 5, .max = 9 },
  426. .p = { .min = 14, .max = 42 },
  427. .p1 = { .min = 2, .max = 6 },
  428. .p2 = { .dot_limit = 225000,
  429. .p2_slow = 7, .p2_fast = 7 },
  430. };
  431. static const intel_limit_t intel_limits_vlv = {
  432. /*
  433. * These are the data rate limits (measured in fast clocks)
  434. * since those are the strictest limits we have. The fast
  435. * clock and actual rate limits are more relaxed, so checking
  436. * them would make no difference.
  437. */
  438. .dot = { .min = 25000 * 5, .max = 270000 * 5 },
  439. .vco = { .min = 4000000, .max = 6000000 },
  440. .n = { .min = 1, .max = 7 },
  441. .m1 = { .min = 2, .max = 3 },
  442. .m2 = { .min = 11, .max = 156 },
  443. .p1 = { .min = 2, .max = 3 },
  444. .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  445. };
  446. static const intel_limit_t intel_limits_chv = {
  447. /*
  448. * These are the data rate limits (measured in fast clocks)
  449. * since those are the strictest limits we have. The fast
  450. * clock and actual rate limits are more relaxed, so checking
  451. * them would make no difference.
  452. */
  453. .dot = { .min = 25000 * 5, .max = 540000 * 5},
  454. .vco = { .min = 4800000, .max = 6480000 },
  455. .n = { .min = 1, .max = 1 },
  456. .m1 = { .min = 2, .max = 2 },
  457. .m2 = { .min = 24 << 22, .max = 175 << 22 },
  458. .p1 = { .min = 2, .max = 4 },
  459. .p2 = { .p2_slow = 1, .p2_fast = 14 },
  460. };
  461. static const intel_limit_t intel_limits_bxt = {
  462. /* FIXME: find real dot limits */
  463. .dot = { .min = 0, .max = INT_MAX },
  464. .vco = { .min = 4800000, .max = 6700000 },
  465. .n = { .min = 1, .max = 1 },
  466. .m1 = { .min = 2, .max = 2 },
  467. /* FIXME: find real m2 limits */
  468. .m2 = { .min = 2 << 22, .max = 255 << 22 },
  469. .p1 = { .min = 2, .max = 4 },
  470. .p2 = { .p2_slow = 1, .p2_fast = 20 },
  471. };
  472. static bool
  473. needs_modeset(struct drm_crtc_state *state)
  474. {
  475. return drm_atomic_crtc_needs_modeset(state);
  476. }
  477. /**
  478. * Returns whether any output on the specified pipe is of the specified type
  479. */
  480. bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
  481. {
  482. struct drm_device *dev = crtc->base.dev;
  483. struct intel_encoder *encoder;
  484. for_each_encoder_on_crtc(dev, &crtc->base, encoder)
  485. if (encoder->type == type)
  486. return true;
  487. return false;
  488. }
  489. /**
  490. * Returns whether any output on the specified pipe will have the specified
  491. * type after a staged modeset is complete, i.e., the same as
  492. * intel_pipe_has_type() but looking at encoder->new_crtc instead of
  493. * encoder->crtc.
  494. */
  495. static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
  496. int type)
  497. {
  498. struct drm_atomic_state *state = crtc_state->base.state;
  499. struct drm_connector *connector;
  500. struct drm_connector_state *connector_state;
  501. struct intel_encoder *encoder;
  502. int i, num_connectors = 0;
  503. for_each_connector_in_state(state, connector, connector_state, i) {
  504. if (connector_state->crtc != crtc_state->base.crtc)
  505. continue;
  506. num_connectors++;
  507. encoder = to_intel_encoder(connector_state->best_encoder);
  508. if (encoder->type == type)
  509. return true;
  510. }
  511. WARN_ON(num_connectors == 0);
  512. return false;
  513. }
  514. /*
  515. * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  516. * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
  517. * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
  518. * The helpers' return value is the rate of the clock that is fed to the
  519. * display engine's pipe which can be the above fast dot clock rate or a
  520. * divided-down version of it.
  521. */
  522. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  523. static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
  524. {
  525. clock->m = clock->m2 + 2;
  526. clock->p = clock->p1 * clock->p2;
  527. if (WARN_ON(clock->n == 0 || clock->p == 0))
  528. return 0;
  529. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  530. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  531. return clock->dot;
  532. }
  533. static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
  534. {
  535. return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  536. }
  537. static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
  538. {
  539. clock->m = i9xx_dpll_compute_m(clock);
  540. clock->p = clock->p1 * clock->p2;
  541. if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
  542. return 0;
  543. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
  544. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  545. return clock->dot;
  546. }
  547. static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
  548. {
  549. clock->m = clock->m1 * clock->m2;
  550. clock->p = clock->p1 * clock->p2;
  551. if (WARN_ON(clock->n == 0 || clock->p == 0))
  552. return 0;
  553. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  554. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  555. return clock->dot / 5;
  556. }
  557. int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
  558. {
  559. clock->m = clock->m1 * clock->m2;
  560. clock->p = clock->p1 * clock->p2;
  561. if (WARN_ON(clock->n == 0 || clock->p == 0))
  562. return 0;
  563. clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
  564. clock->n << 22);
  565. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  566. return clock->dot / 5;
  567. }
  568. #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
  569. /**
  570. * Returns whether the given set of divisors are valid for a given refclk with
  571. * the given connectors.
  572. */
  573. static bool intel_PLL_is_valid(struct drm_device *dev,
  574. const intel_limit_t *limit,
  575. const intel_clock_t *clock)
  576. {
  577. if (clock->n < limit->n.min || limit->n.max < clock->n)
  578. INTELPllInvalid("n out of range\n");
  579. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  580. INTELPllInvalid("p1 out of range\n");
  581. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  582. INTELPllInvalid("m2 out of range\n");
  583. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  584. INTELPllInvalid("m1 out of range\n");
  585. if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
  586. !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
  587. if (clock->m1 <= clock->m2)
  588. INTELPllInvalid("m1 <= m2\n");
  589. if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
  590. if (clock->p < limit->p.min || limit->p.max < clock->p)
  591. INTELPllInvalid("p out of range\n");
  592. if (clock->m < limit->m.min || limit->m.max < clock->m)
  593. INTELPllInvalid("m out of range\n");
  594. }
  595. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  596. INTELPllInvalid("vco out of range\n");
  597. /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  598. * connector, etc., rather than just a single range.
  599. */
  600. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  601. INTELPllInvalid("dot out of range\n");
  602. return true;
  603. }
  604. static int
  605. i9xx_select_p2_div(const intel_limit_t *limit,
  606. const struct intel_crtc_state *crtc_state,
  607. int target)
  608. {
  609. struct drm_device *dev = crtc_state->base.crtc->dev;
  610. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  611. /*
  612. * For LVDS just rely on its current settings for dual-channel.
  613. * We haven't figured out how to reliably set up different
  614. * single/dual channel state, if we even can.
  615. */
  616. if (intel_is_dual_link_lvds(dev))
  617. return limit->p2.p2_fast;
  618. else
  619. return limit->p2.p2_slow;
  620. } else {
  621. if (target < limit->p2.dot_limit)
  622. return limit->p2.p2_slow;
  623. else
  624. return limit->p2.p2_fast;
  625. }
  626. }
  627. /*
  628. * Returns a set of divisors for the desired target clock with the given
  629. * refclk, or FALSE. The returned values represent the clock equation:
  630. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  631. *
  632. * Target and reference clocks are specified in kHz.
  633. *
  634. * If match_clock is provided, then best_clock P divider must match the P
  635. * divider from @match_clock used for LVDS downclocking.
  636. */
  637. static bool
  638. i9xx_find_best_dpll(const intel_limit_t *limit,
  639. struct intel_crtc_state *crtc_state,
  640. int target, int refclk, intel_clock_t *match_clock,
  641. intel_clock_t *best_clock)
  642. {
  643. struct drm_device *dev = crtc_state->base.crtc->dev;
  644. intel_clock_t clock;
  645. int err = target;
  646. memset(best_clock, 0, sizeof(*best_clock));
  647. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  648. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  649. clock.m1++) {
  650. for (clock.m2 = limit->m2.min;
  651. clock.m2 <= limit->m2.max; clock.m2++) {
  652. if (clock.m2 >= clock.m1)
  653. break;
  654. for (clock.n = limit->n.min;
  655. clock.n <= limit->n.max; clock.n++) {
  656. for (clock.p1 = limit->p1.min;
  657. clock.p1 <= limit->p1.max; clock.p1++) {
  658. int this_err;
  659. i9xx_calc_dpll_params(refclk, &clock);
  660. if (!intel_PLL_is_valid(dev, limit,
  661. &clock))
  662. continue;
  663. if (match_clock &&
  664. clock.p != match_clock->p)
  665. continue;
  666. this_err = abs(clock.dot - target);
  667. if (this_err < err) {
  668. *best_clock = clock;
  669. err = this_err;
  670. }
  671. }
  672. }
  673. }
  674. }
  675. return (err != target);
  676. }
  677. /*
  678. * Returns a set of divisors for the desired target clock with the given
  679. * refclk, or FALSE. The returned values represent the clock equation:
  680. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  681. *
  682. * Target and reference clocks are specified in kHz.
  683. *
  684. * If match_clock is provided, then best_clock P divider must match the P
  685. * divider from @match_clock used for LVDS downclocking.
  686. */
  687. static bool
  688. pnv_find_best_dpll(const intel_limit_t *limit,
  689. struct intel_crtc_state *crtc_state,
  690. int target, int refclk, intel_clock_t *match_clock,
  691. intel_clock_t *best_clock)
  692. {
  693. struct drm_device *dev = crtc_state->base.crtc->dev;
  694. intel_clock_t clock;
  695. int err = target;
  696. memset(best_clock, 0, sizeof(*best_clock));
  697. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  698. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  699. clock.m1++) {
  700. for (clock.m2 = limit->m2.min;
  701. clock.m2 <= limit->m2.max; clock.m2++) {
  702. for (clock.n = limit->n.min;
  703. clock.n <= limit->n.max; clock.n++) {
  704. for (clock.p1 = limit->p1.min;
  705. clock.p1 <= limit->p1.max; clock.p1++) {
  706. int this_err;
  707. pnv_calc_dpll_params(refclk, &clock);
  708. if (!intel_PLL_is_valid(dev, limit,
  709. &clock))
  710. continue;
  711. if (match_clock &&
  712. clock.p != match_clock->p)
  713. continue;
  714. this_err = abs(clock.dot - target);
  715. if (this_err < err) {
  716. *best_clock = clock;
  717. err = this_err;
  718. }
  719. }
  720. }
  721. }
  722. }
  723. return (err != target);
  724. }
  725. /*
  726. * Returns a set of divisors for the desired target clock with the given
  727. * refclk, or FALSE. The returned values represent the clock equation:
  728. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  729. *
  730. * Target and reference clocks are specified in kHz.
  731. *
  732. * If match_clock is provided, then best_clock P divider must match the P
  733. * divider from @match_clock used for LVDS downclocking.
  734. */
  735. static bool
  736. g4x_find_best_dpll(const intel_limit_t *limit,
  737. struct intel_crtc_state *crtc_state,
  738. int target, int refclk, intel_clock_t *match_clock,
  739. intel_clock_t *best_clock)
  740. {
  741. struct drm_device *dev = crtc_state->base.crtc->dev;
  742. intel_clock_t clock;
  743. int max_n;
  744. bool found = false;
  745. /* approximately equals target * 0.00585 */
  746. int err_most = (target >> 8) + (target >> 9);
  747. memset(best_clock, 0, sizeof(*best_clock));
  748. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  749. max_n = limit->n.max;
  750. /* based on hardware requirement, prefer smaller n to precision */
  751. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  752. /* based on hardware requirement, prefere larger m1,m2 */
  753. for (clock.m1 = limit->m1.max;
  754. clock.m1 >= limit->m1.min; clock.m1--) {
  755. for (clock.m2 = limit->m2.max;
  756. clock.m2 >= limit->m2.min; clock.m2--) {
  757. for (clock.p1 = limit->p1.max;
  758. clock.p1 >= limit->p1.min; clock.p1--) {
  759. int this_err;
  760. i9xx_calc_dpll_params(refclk, &clock);
  761. if (!intel_PLL_is_valid(dev, limit,
  762. &clock))
  763. continue;
  764. this_err = abs(clock.dot - target);
  765. if (this_err < err_most) {
  766. *best_clock = clock;
  767. err_most = this_err;
  768. max_n = clock.n;
  769. found = true;
  770. }
  771. }
  772. }
  773. }
  774. }
  775. return found;
  776. }
  777. /*
  778. * Check if the calculated PLL configuration is more optimal compared to the
  779. * best configuration and error found so far. Return the calculated error.
  780. */
  781. static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
  782. const intel_clock_t *calculated_clock,
  783. const intel_clock_t *best_clock,
  784. unsigned int best_error_ppm,
  785. unsigned int *error_ppm)
  786. {
  787. /*
  788. * For CHV ignore the error and consider only the P value.
  789. * Prefer a bigger P value based on HW requirements.
  790. */
  791. if (IS_CHERRYVIEW(dev)) {
  792. *error_ppm = 0;
  793. return calculated_clock->p > best_clock->p;
  794. }
  795. if (WARN_ON_ONCE(!target_freq))
  796. return false;
  797. *error_ppm = div_u64(1000000ULL *
  798. abs(target_freq - calculated_clock->dot),
  799. target_freq);
  800. /*
  801. * Prefer a better P value over a better (smaller) error if the error
  802. * is small. Ensure this preference for future configurations too by
  803. * setting the error to 0.
  804. */
  805. if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
  806. *error_ppm = 0;
  807. return true;
  808. }
  809. return *error_ppm + 10 < best_error_ppm;
  810. }
  811. /*
  812. * Returns a set of divisors for the desired target clock with the given
  813. * refclk, or FALSE. The returned values represent the clock equation:
  814. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  815. */
  816. static bool
  817. vlv_find_best_dpll(const intel_limit_t *limit,
  818. struct intel_crtc_state *crtc_state,
  819. int target, int refclk, intel_clock_t *match_clock,
  820. intel_clock_t *best_clock)
  821. {
  822. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  823. struct drm_device *dev = crtc->base.dev;
  824. intel_clock_t clock;
  825. unsigned int bestppm = 1000000;
  826. /* min update 19.2 MHz */
  827. int max_n = min(limit->n.max, refclk / 19200);
  828. bool found = false;
  829. target *= 5; /* fast clock */
  830. memset(best_clock, 0, sizeof(*best_clock));
  831. /* based on hardware requirement, prefer smaller n to precision */
  832. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  833. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  834. for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
  835. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  836. clock.p = clock.p1 * clock.p2;
  837. /* based on hardware requirement, prefer bigger m1,m2 values */
  838. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  839. unsigned int ppm;
  840. clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
  841. refclk * clock.m1);
  842. vlv_calc_dpll_params(refclk, &clock);
  843. if (!intel_PLL_is_valid(dev, limit,
  844. &clock))
  845. continue;
  846. if (!vlv_PLL_is_optimal(dev, target,
  847. &clock,
  848. best_clock,
  849. bestppm, &ppm))
  850. continue;
  851. *best_clock = clock;
  852. bestppm = ppm;
  853. found = true;
  854. }
  855. }
  856. }
  857. }
  858. return found;
  859. }
  860. /*
  861. * Returns a set of divisors for the desired target clock with the given
  862. * refclk, or FALSE. The returned values represent the clock equation:
  863. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  864. */
  865. static bool
  866. chv_find_best_dpll(const intel_limit_t *limit,
  867. struct intel_crtc_state *crtc_state,
  868. int target, int refclk, intel_clock_t *match_clock,
  869. intel_clock_t *best_clock)
  870. {
  871. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  872. struct drm_device *dev = crtc->base.dev;
  873. unsigned int best_error_ppm;
  874. intel_clock_t clock;
  875. uint64_t m2;
  876. int found = false;
  877. memset(best_clock, 0, sizeof(*best_clock));
  878. best_error_ppm = 1000000;
  879. /*
  880. * Based on hardware doc, the n always set to 1, and m1 always
  881. * set to 2. If requires to support 200Mhz refclk, we need to
  882. * revisit this because n may not 1 anymore.
  883. */
  884. clock.n = 1, clock.m1 = 2;
  885. target *= 5; /* fast clock */
  886. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  887. for (clock.p2 = limit->p2.p2_fast;
  888. clock.p2 >= limit->p2.p2_slow;
  889. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  890. unsigned int error_ppm;
  891. clock.p = clock.p1 * clock.p2;
  892. m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
  893. clock.n) << 22, refclk * clock.m1);
  894. if (m2 > INT_MAX/clock.m1)
  895. continue;
  896. clock.m2 = m2;
  897. chv_calc_dpll_params(refclk, &clock);
  898. if (!intel_PLL_is_valid(dev, limit, &clock))
  899. continue;
  900. if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
  901. best_error_ppm, &error_ppm))
  902. continue;
  903. *best_clock = clock;
  904. best_error_ppm = error_ppm;
  905. found = true;
  906. }
  907. }
  908. return found;
  909. }
  910. bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
  911. intel_clock_t *best_clock)
  912. {
  913. int refclk = 100000;
  914. const intel_limit_t *limit = &intel_limits_bxt;
  915. return chv_find_best_dpll(limit, crtc_state,
  916. target_clock, refclk, NULL, best_clock);
  917. }
  918. bool intel_crtc_active(struct drm_crtc *crtc)
  919. {
  920. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  921. /* Be paranoid as we can arrive here with only partial
  922. * state retrieved from the hardware during setup.
  923. *
  924. * We can ditch the adjusted_mode.crtc_clock check as soon
  925. * as Haswell has gained clock readout/fastboot support.
  926. *
  927. * We can ditch the crtc->primary->fb check as soon as we can
  928. * properly reconstruct framebuffers.
  929. *
  930. * FIXME: The intel_crtc->active here should be switched to
  931. * crtc->state->active once we have proper CRTC states wired up
  932. * for atomic.
  933. */
  934. return intel_crtc->active && crtc->primary->state->fb &&
  935. intel_crtc->config->base.adjusted_mode.crtc_clock;
  936. }
  937. enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
  938. enum pipe pipe)
  939. {
  940. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  941. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  942. return intel_crtc->config->cpu_transcoder;
  943. }
  944. static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
  945. {
  946. struct drm_i915_private *dev_priv = dev->dev_private;
  947. i915_reg_t reg = PIPEDSL(pipe);
  948. u32 line1, line2;
  949. u32 line_mask;
  950. if (IS_GEN2(dev))
  951. line_mask = DSL_LINEMASK_GEN2;
  952. else
  953. line_mask = DSL_LINEMASK_GEN3;
  954. line1 = I915_READ(reg) & line_mask;
  955. msleep(5);
  956. line2 = I915_READ(reg) & line_mask;
  957. return line1 == line2;
  958. }
  959. /*
  960. * intel_wait_for_pipe_off - wait for pipe to turn off
  961. * @crtc: crtc whose pipe to wait for
  962. *
  963. * After disabling a pipe, we can't wait for vblank in the usual way,
  964. * spinning on the vblank interrupt status bit, since we won't actually
  965. * see an interrupt when the pipe is disabled.
  966. *
  967. * On Gen4 and above:
  968. * wait for the pipe register state bit to turn off
  969. *
  970. * Otherwise:
  971. * wait for the display line value to settle (it usually
  972. * ends up stopping at the start of the next frame).
  973. *
  974. */
  975. static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
  976. {
  977. struct drm_device *dev = crtc->base.dev;
  978. struct drm_i915_private *dev_priv = dev->dev_private;
  979. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  980. enum pipe pipe = crtc->pipe;
  981. if (INTEL_INFO(dev)->gen >= 4) {
  982. i915_reg_t reg = PIPECONF(cpu_transcoder);
  983. /* Wait for the Pipe State to go off */
  984. if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
  985. 100))
  986. WARN(1, "pipe_off wait timed out\n");
  987. } else {
  988. /* Wait for the display line to settle */
  989. if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
  990. WARN(1, "pipe_off wait timed out\n");
  991. }
  992. }
  993. /* Only for pre-ILK configs */
  994. void assert_pll(struct drm_i915_private *dev_priv,
  995. enum pipe pipe, bool state)
  996. {
  997. u32 val;
  998. bool cur_state;
  999. val = I915_READ(DPLL(pipe));
  1000. cur_state = !!(val & DPLL_VCO_ENABLE);
  1001. I915_STATE_WARN(cur_state != state,
  1002. "PLL state assertion failure (expected %s, current %s)\n",
  1003. onoff(state), onoff(cur_state));
  1004. }
  1005. /* XXX: the dsi pll is shared between MIPI DSI ports */
  1006. void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
  1007. {
  1008. u32 val;
  1009. bool cur_state;
  1010. mutex_lock(&dev_priv->sb_lock);
  1011. val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
  1012. mutex_unlock(&dev_priv->sb_lock);
  1013. cur_state = val & DSI_PLL_VCO_EN;
  1014. I915_STATE_WARN(cur_state != state,
  1015. "DSI PLL state assertion failure (expected %s, current %s)\n",
  1016. onoff(state), onoff(cur_state));
  1017. }
  1018. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  1019. enum pipe pipe, bool state)
  1020. {
  1021. bool cur_state;
  1022. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1023. pipe);
  1024. if (HAS_DDI(dev_priv)) {
  1025. /* DDI does not have a specific FDI_TX register */
  1026. u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  1027. cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
  1028. } else {
  1029. u32 val = I915_READ(FDI_TX_CTL(pipe));
  1030. cur_state = !!(val & FDI_TX_ENABLE);
  1031. }
  1032. I915_STATE_WARN(cur_state != state,
  1033. "FDI TX state assertion failure (expected %s, current %s)\n",
  1034. onoff(state), onoff(cur_state));
  1035. }
  1036. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  1037. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  1038. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  1039. enum pipe pipe, bool state)
  1040. {
  1041. u32 val;
  1042. bool cur_state;
  1043. val = I915_READ(FDI_RX_CTL(pipe));
  1044. cur_state = !!(val & FDI_RX_ENABLE);
  1045. I915_STATE_WARN(cur_state != state,
  1046. "FDI RX state assertion failure (expected %s, current %s)\n",
  1047. onoff(state), onoff(cur_state));
  1048. }
  1049. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  1050. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  1051. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  1052. enum pipe pipe)
  1053. {
  1054. u32 val;
  1055. /* ILK FDI PLL is always enabled */
  1056. if (INTEL_INFO(dev_priv)->gen == 5)
  1057. return;
  1058. /* On Haswell, DDI ports are responsible for the FDI PLL setup */
  1059. if (HAS_DDI(dev_priv))
  1060. return;
  1061. val = I915_READ(FDI_TX_CTL(pipe));
  1062. I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  1063. }
  1064. void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
  1065. enum pipe pipe, bool state)
  1066. {
  1067. u32 val;
  1068. bool cur_state;
  1069. val = I915_READ(FDI_RX_CTL(pipe));
  1070. cur_state = !!(val & FDI_RX_PLL_ENABLE);
  1071. I915_STATE_WARN(cur_state != state,
  1072. "FDI RX PLL assertion failure (expected %s, current %s)\n",
  1073. onoff(state), onoff(cur_state));
  1074. }
  1075. void assert_panel_unlocked(struct drm_i915_private *dev_priv,
  1076. enum pipe pipe)
  1077. {
  1078. struct drm_device *dev = dev_priv->dev;
  1079. i915_reg_t pp_reg;
  1080. u32 val;
  1081. enum pipe panel_pipe = PIPE_A;
  1082. bool locked = true;
  1083. if (WARN_ON(HAS_DDI(dev)))
  1084. return;
  1085. if (HAS_PCH_SPLIT(dev)) {
  1086. u32 port_sel;
  1087. pp_reg = PCH_PP_CONTROL;
  1088. port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
  1089. if (port_sel == PANEL_PORT_SELECT_LVDS &&
  1090. I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
  1091. panel_pipe = PIPE_B;
  1092. /* XXX: else fix for eDP */
  1093. } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  1094. /* presumably write lock depends on pipe, not port select */
  1095. pp_reg = VLV_PIPE_PP_CONTROL(pipe);
  1096. panel_pipe = pipe;
  1097. } else {
  1098. pp_reg = PP_CONTROL;
  1099. if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
  1100. panel_pipe = PIPE_B;
  1101. }
  1102. val = I915_READ(pp_reg);
  1103. if (!(val & PANEL_POWER_ON) ||
  1104. ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
  1105. locked = false;
  1106. I915_STATE_WARN(panel_pipe == pipe && locked,
  1107. "panel assertion failure, pipe %c regs locked\n",
  1108. pipe_name(pipe));
  1109. }
  1110. static void assert_cursor(struct drm_i915_private *dev_priv,
  1111. enum pipe pipe, bool state)
  1112. {
  1113. struct drm_device *dev = dev_priv->dev;
  1114. bool cur_state;
  1115. if (IS_845G(dev) || IS_I865G(dev))
  1116. cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
  1117. else
  1118. cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  1119. I915_STATE_WARN(cur_state != state,
  1120. "cursor on pipe %c assertion failure (expected %s, current %s)\n",
  1121. pipe_name(pipe), onoff(state), onoff(cur_state));
  1122. }
  1123. #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
  1124. #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
  1125. void assert_pipe(struct drm_i915_private *dev_priv,
  1126. enum pipe pipe, bool state)
  1127. {
  1128. bool cur_state;
  1129. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1130. pipe);
  1131. enum intel_display_power_domain power_domain;
  1132. /* if we need the pipe quirk it must be always on */
  1133. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1134. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1135. state = true;
  1136. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  1137. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  1138. u32 val = I915_READ(PIPECONF(cpu_transcoder));
  1139. cur_state = !!(val & PIPECONF_ENABLE);
  1140. intel_display_power_put(dev_priv, power_domain);
  1141. } else {
  1142. cur_state = false;
  1143. }
  1144. I915_STATE_WARN(cur_state != state,
  1145. "pipe %c assertion failure (expected %s, current %s)\n",
  1146. pipe_name(pipe), onoff(state), onoff(cur_state));
  1147. }
  1148. static void assert_plane(struct drm_i915_private *dev_priv,
  1149. enum plane plane, bool state)
  1150. {
  1151. u32 val;
  1152. bool cur_state;
  1153. val = I915_READ(DSPCNTR(plane));
  1154. cur_state = !!(val & DISPLAY_PLANE_ENABLE);
  1155. I915_STATE_WARN(cur_state != state,
  1156. "plane %c assertion failure (expected %s, current %s)\n",
  1157. plane_name(plane), onoff(state), onoff(cur_state));
  1158. }
  1159. #define assert_plane_enabled(d, p) assert_plane(d, p, true)
  1160. #define assert_plane_disabled(d, p) assert_plane(d, p, false)
  1161. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  1162. enum pipe pipe)
  1163. {
  1164. struct drm_device *dev = dev_priv->dev;
  1165. int i;
  1166. /* Primary planes are fixed to pipes on gen4+ */
  1167. if (INTEL_INFO(dev)->gen >= 4) {
  1168. u32 val = I915_READ(DSPCNTR(pipe));
  1169. I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
  1170. "plane %c assertion failure, should be disabled but not\n",
  1171. plane_name(pipe));
  1172. return;
  1173. }
  1174. /* Need to check both planes against the pipe */
  1175. for_each_pipe(dev_priv, i) {
  1176. u32 val = I915_READ(DSPCNTR(i));
  1177. enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  1178. DISPPLANE_SEL_PIPE_SHIFT;
  1179. I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  1180. "plane %c assertion failure, should be off on pipe %c but is still active\n",
  1181. plane_name(i), pipe_name(pipe));
  1182. }
  1183. }
  1184. static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
  1185. enum pipe pipe)
  1186. {
  1187. struct drm_device *dev = dev_priv->dev;
  1188. int sprite;
  1189. if (INTEL_INFO(dev)->gen >= 9) {
  1190. for_each_sprite(dev_priv, pipe, sprite) {
  1191. u32 val = I915_READ(PLANE_CTL(pipe, sprite));
  1192. I915_STATE_WARN(val & PLANE_CTL_ENABLE,
  1193. "plane %d assertion failure, should be off on pipe %c but is still active\n",
  1194. sprite, pipe_name(pipe));
  1195. }
  1196. } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  1197. for_each_sprite(dev_priv, pipe, sprite) {
  1198. u32 val = I915_READ(SPCNTR(pipe, sprite));
  1199. I915_STATE_WARN(val & SP_ENABLE,
  1200. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1201. sprite_name(pipe, sprite), pipe_name(pipe));
  1202. }
  1203. } else if (INTEL_INFO(dev)->gen >= 7) {
  1204. u32 val = I915_READ(SPRCTL(pipe));
  1205. I915_STATE_WARN(val & SPRITE_ENABLE,
  1206. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1207. plane_name(pipe), pipe_name(pipe));
  1208. } else if (INTEL_INFO(dev)->gen >= 5) {
  1209. u32 val = I915_READ(DVSCNTR(pipe));
  1210. I915_STATE_WARN(val & DVS_ENABLE,
  1211. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1212. plane_name(pipe), pipe_name(pipe));
  1213. }
  1214. }
  1215. static void assert_vblank_disabled(struct drm_crtc *crtc)
  1216. {
  1217. if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
  1218. drm_crtc_vblank_put(crtc);
  1219. }
  1220. void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
  1221. enum pipe pipe)
  1222. {
  1223. u32 val;
  1224. bool enabled;
  1225. val = I915_READ(PCH_TRANSCONF(pipe));
  1226. enabled = !!(val & TRANS_ENABLE);
  1227. I915_STATE_WARN(enabled,
  1228. "transcoder assertion failed, should be off on pipe %c but is still active\n",
  1229. pipe_name(pipe));
  1230. }
  1231. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  1232. enum pipe pipe, u32 port_sel, u32 val)
  1233. {
  1234. if ((val & DP_PORT_EN) == 0)
  1235. return false;
  1236. if (HAS_PCH_CPT(dev_priv)) {
  1237. u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
  1238. if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  1239. return false;
  1240. } else if (IS_CHERRYVIEW(dev_priv)) {
  1241. if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
  1242. return false;
  1243. } else {
  1244. if ((val & DP_PIPE_MASK) != (pipe << 30))
  1245. return false;
  1246. }
  1247. return true;
  1248. }
  1249. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  1250. enum pipe pipe, u32 val)
  1251. {
  1252. if ((val & SDVO_ENABLE) == 0)
  1253. return false;
  1254. if (HAS_PCH_CPT(dev_priv)) {
  1255. if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
  1256. return false;
  1257. } else if (IS_CHERRYVIEW(dev_priv)) {
  1258. if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
  1259. return false;
  1260. } else {
  1261. if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
  1262. return false;
  1263. }
  1264. return true;
  1265. }
  1266. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  1267. enum pipe pipe, u32 val)
  1268. {
  1269. if ((val & LVDS_PORT_EN) == 0)
  1270. return false;
  1271. if (HAS_PCH_CPT(dev_priv)) {
  1272. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1273. return false;
  1274. } else {
  1275. if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  1276. return false;
  1277. }
  1278. return true;
  1279. }
  1280. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  1281. enum pipe pipe, u32 val)
  1282. {
  1283. if ((val & ADPA_DAC_ENABLE) == 0)
  1284. return false;
  1285. if (HAS_PCH_CPT(dev_priv)) {
  1286. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1287. return false;
  1288. } else {
  1289. if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  1290. return false;
  1291. }
  1292. return true;
  1293. }
  1294. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  1295. enum pipe pipe, i915_reg_t reg,
  1296. u32 port_sel)
  1297. {
  1298. u32 val = I915_READ(reg);
  1299. I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  1300. "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1301. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1302. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
  1303. && (val & DP_PIPEB_SELECT),
  1304. "IBX PCH dp port still using transcoder B\n");
  1305. }
  1306. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  1307. enum pipe pipe, i915_reg_t reg)
  1308. {
  1309. u32 val = I915_READ(reg);
  1310. I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
  1311. "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
  1312. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1313. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
  1314. && (val & SDVO_PIPE_B_SELECT),
  1315. "IBX PCH hdmi port still using transcoder B\n");
  1316. }
  1317. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  1318. enum pipe pipe)
  1319. {
  1320. u32 val;
  1321. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1322. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1323. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1324. val = I915_READ(PCH_ADPA);
  1325. I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
  1326. "PCH VGA enabled on transcoder %c, should be disabled\n",
  1327. pipe_name(pipe));
  1328. val = I915_READ(PCH_LVDS);
  1329. I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
  1330. "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1331. pipe_name(pipe));
  1332. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
  1333. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
  1334. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
  1335. }
  1336. static void _vlv_enable_pll(struct intel_crtc *crtc,
  1337. const struct intel_crtc_state *pipe_config)
  1338. {
  1339. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1340. enum pipe pipe = crtc->pipe;
  1341. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1342. POSTING_READ(DPLL(pipe));
  1343. udelay(150);
  1344. if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
  1345. DRM_ERROR("DPLL %d failed to lock\n", pipe);
  1346. }
  1347. static void vlv_enable_pll(struct intel_crtc *crtc,
  1348. const struct intel_crtc_state *pipe_config)
  1349. {
  1350. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1351. enum pipe pipe = crtc->pipe;
  1352. assert_pipe_disabled(dev_priv, pipe);
  1353. /* PLL is protected by panel, make sure we can write it */
  1354. assert_panel_unlocked(dev_priv, pipe);
  1355. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1356. _vlv_enable_pll(crtc, pipe_config);
  1357. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1358. POSTING_READ(DPLL_MD(pipe));
  1359. }
  1360. static void _chv_enable_pll(struct intel_crtc *crtc,
  1361. const struct intel_crtc_state *pipe_config)
  1362. {
  1363. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1364. enum pipe pipe = crtc->pipe;
  1365. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1366. u32 tmp;
  1367. mutex_lock(&dev_priv->sb_lock);
  1368. /* Enable back the 10bit clock to display controller */
  1369. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1370. tmp |= DPIO_DCLKP_EN;
  1371. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
  1372. mutex_unlock(&dev_priv->sb_lock);
  1373. /*
  1374. * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
  1375. */
  1376. udelay(1);
  1377. /* Enable PLL */
  1378. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1379. /* Check PLL is locked */
  1380. if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
  1381. DRM_ERROR("PLL %d failed to lock\n", pipe);
  1382. }
  1383. static void chv_enable_pll(struct intel_crtc *crtc,
  1384. const struct intel_crtc_state *pipe_config)
  1385. {
  1386. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1387. enum pipe pipe = crtc->pipe;
  1388. assert_pipe_disabled(dev_priv, pipe);
  1389. /* PLL is protected by panel, make sure we can write it */
  1390. assert_panel_unlocked(dev_priv, pipe);
  1391. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1392. _chv_enable_pll(crtc, pipe_config);
  1393. if (pipe != PIPE_A) {
  1394. /*
  1395. * WaPixelRepeatModeFixForC0:chv
  1396. *
  1397. * DPLLCMD is AWOL. Use chicken bits to propagate
  1398. * the value from DPLLBMD to either pipe B or C.
  1399. */
  1400. I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
  1401. I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
  1402. I915_WRITE(CBR4_VLV, 0);
  1403. dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
  1404. /*
  1405. * DPLLB VGA mode also seems to cause problems.
  1406. * We should always have it disabled.
  1407. */
  1408. WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
  1409. } else {
  1410. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1411. POSTING_READ(DPLL_MD(pipe));
  1412. }
  1413. }
  1414. static int intel_num_dvo_pipes(struct drm_device *dev)
  1415. {
  1416. struct intel_crtc *crtc;
  1417. int count = 0;
  1418. for_each_intel_crtc(dev, crtc)
  1419. count += crtc->base.state->active &&
  1420. intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
  1421. return count;
  1422. }
  1423. static void i9xx_enable_pll(struct intel_crtc *crtc)
  1424. {
  1425. struct drm_device *dev = crtc->base.dev;
  1426. struct drm_i915_private *dev_priv = dev->dev_private;
  1427. i915_reg_t reg = DPLL(crtc->pipe);
  1428. u32 dpll = crtc->config->dpll_hw_state.dpll;
  1429. assert_pipe_disabled(dev_priv, crtc->pipe);
  1430. /* PLL is protected by panel, make sure we can write it */
  1431. if (IS_MOBILE(dev) && !IS_I830(dev))
  1432. assert_panel_unlocked(dev_priv, crtc->pipe);
  1433. /* Enable DVO 2x clock on both PLLs if necessary */
  1434. if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
  1435. /*
  1436. * It appears to be important that we don't enable this
  1437. * for the current pipe before otherwise configuring the
  1438. * PLL. No idea how this should be handled if multiple
  1439. * DVO outputs are enabled simultaneosly.
  1440. */
  1441. dpll |= DPLL_DVO_2X_MODE;
  1442. I915_WRITE(DPLL(!crtc->pipe),
  1443. I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
  1444. }
  1445. /*
  1446. * Apparently we need to have VGA mode enabled prior to changing
  1447. * the P1/P2 dividers. Otherwise the DPLL will keep using the old
  1448. * dividers, even though the register value does change.
  1449. */
  1450. I915_WRITE(reg, 0);
  1451. I915_WRITE(reg, dpll);
  1452. /* Wait for the clocks to stabilize. */
  1453. POSTING_READ(reg);
  1454. udelay(150);
  1455. if (INTEL_INFO(dev)->gen >= 4) {
  1456. I915_WRITE(DPLL_MD(crtc->pipe),
  1457. crtc->config->dpll_hw_state.dpll_md);
  1458. } else {
  1459. /* The pixel multiplier can only be updated once the
  1460. * DPLL is enabled and the clocks are stable.
  1461. *
  1462. * So write it again.
  1463. */
  1464. I915_WRITE(reg, dpll);
  1465. }
  1466. /* We do this three times for luck */
  1467. I915_WRITE(reg, dpll);
  1468. POSTING_READ(reg);
  1469. udelay(150); /* wait for warmup */
  1470. I915_WRITE(reg, dpll);
  1471. POSTING_READ(reg);
  1472. udelay(150); /* wait for warmup */
  1473. I915_WRITE(reg, dpll);
  1474. POSTING_READ(reg);
  1475. udelay(150); /* wait for warmup */
  1476. }
  1477. /**
  1478. * i9xx_disable_pll - disable a PLL
  1479. * @dev_priv: i915 private structure
  1480. * @pipe: pipe PLL to disable
  1481. *
  1482. * Disable the PLL for @pipe, making sure the pipe is off first.
  1483. *
  1484. * Note! This is for pre-ILK only.
  1485. */
  1486. static void i9xx_disable_pll(struct intel_crtc *crtc)
  1487. {
  1488. struct drm_device *dev = crtc->base.dev;
  1489. struct drm_i915_private *dev_priv = dev->dev_private;
  1490. enum pipe pipe = crtc->pipe;
  1491. /* Disable DVO 2x clock on both PLLs if necessary */
  1492. if (IS_I830(dev) &&
  1493. intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
  1494. !intel_num_dvo_pipes(dev)) {
  1495. I915_WRITE(DPLL(PIPE_B),
  1496. I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
  1497. I915_WRITE(DPLL(PIPE_A),
  1498. I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
  1499. }
  1500. /* Don't disable pipe or pipe PLLs if needed */
  1501. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1502. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1503. return;
  1504. /* Make sure the pipe isn't still relying on us */
  1505. assert_pipe_disabled(dev_priv, pipe);
  1506. I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
  1507. POSTING_READ(DPLL(pipe));
  1508. }
  1509. static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1510. {
  1511. u32 val;
  1512. /* Make sure the pipe isn't still relying on us */
  1513. assert_pipe_disabled(dev_priv, pipe);
  1514. val = DPLL_INTEGRATED_REF_CLK_VLV |
  1515. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1516. if (pipe != PIPE_A)
  1517. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1518. I915_WRITE(DPLL(pipe), val);
  1519. POSTING_READ(DPLL(pipe));
  1520. }
  1521. static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1522. {
  1523. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1524. u32 val;
  1525. /* Make sure the pipe isn't still relying on us */
  1526. assert_pipe_disabled(dev_priv, pipe);
  1527. val = DPLL_SSC_REF_CLK_CHV |
  1528. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1529. if (pipe != PIPE_A)
  1530. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1531. I915_WRITE(DPLL(pipe), val);
  1532. POSTING_READ(DPLL(pipe));
  1533. mutex_lock(&dev_priv->sb_lock);
  1534. /* Disable 10bit clock to display controller */
  1535. val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1536. val &= ~DPIO_DCLKP_EN;
  1537. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
  1538. mutex_unlock(&dev_priv->sb_lock);
  1539. }
  1540. void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  1541. struct intel_digital_port *dport,
  1542. unsigned int expected_mask)
  1543. {
  1544. u32 port_mask;
  1545. i915_reg_t dpll_reg;
  1546. switch (dport->port) {
  1547. case PORT_B:
  1548. port_mask = DPLL_PORTB_READY_MASK;
  1549. dpll_reg = DPLL(0);
  1550. break;
  1551. case PORT_C:
  1552. port_mask = DPLL_PORTC_READY_MASK;
  1553. dpll_reg = DPLL(0);
  1554. expected_mask <<= 4;
  1555. break;
  1556. case PORT_D:
  1557. port_mask = DPLL_PORTD_READY_MASK;
  1558. dpll_reg = DPIO_PHY_STATUS;
  1559. break;
  1560. default:
  1561. BUG();
  1562. }
  1563. if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
  1564. WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
  1565. port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
  1566. }
  1567. static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1568. enum pipe pipe)
  1569. {
  1570. struct drm_device *dev = dev_priv->dev;
  1571. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  1572. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  1573. i915_reg_t reg;
  1574. uint32_t val, pipeconf_val;
  1575. /* Make sure PCH DPLL is enabled */
  1576. assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
  1577. /* FDI must be feeding us bits for PCH ports */
  1578. assert_fdi_tx_enabled(dev_priv, pipe);
  1579. assert_fdi_rx_enabled(dev_priv, pipe);
  1580. if (HAS_PCH_CPT(dev)) {
  1581. /* Workaround: Set the timing override bit before enabling the
  1582. * pch transcoder. */
  1583. reg = TRANS_CHICKEN2(pipe);
  1584. val = I915_READ(reg);
  1585. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1586. I915_WRITE(reg, val);
  1587. }
  1588. reg = PCH_TRANSCONF(pipe);
  1589. val = I915_READ(reg);
  1590. pipeconf_val = I915_READ(PIPECONF(pipe));
  1591. if (HAS_PCH_IBX(dev_priv)) {
  1592. /*
  1593. * Make the BPC in transcoder be consistent with
  1594. * that in pipeconf reg. For HDMI we must use 8bpc
  1595. * here for both 8bpc and 12bpc.
  1596. */
  1597. val &= ~PIPECONF_BPC_MASK;
  1598. if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
  1599. val |= PIPECONF_8BPC;
  1600. else
  1601. val |= pipeconf_val & PIPECONF_BPC_MASK;
  1602. }
  1603. val &= ~TRANS_INTERLACE_MASK;
  1604. if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
  1605. if (HAS_PCH_IBX(dev_priv) &&
  1606. intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
  1607. val |= TRANS_LEGACY_INTERLACED_ILK;
  1608. else
  1609. val |= TRANS_INTERLACED;
  1610. else
  1611. val |= TRANS_PROGRESSIVE;
  1612. I915_WRITE(reg, val | TRANS_ENABLE);
  1613. if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
  1614. DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
  1615. }
  1616. static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1617. enum transcoder cpu_transcoder)
  1618. {
  1619. u32 val, pipeconf_val;
  1620. /* FDI must be feeding us bits for PCH ports */
  1621. assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
  1622. assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
  1623. /* Workaround: set timing override bit. */
  1624. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1625. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1626. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1627. val = TRANS_ENABLE;
  1628. pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
  1629. if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
  1630. PIPECONF_INTERLACED_ILK)
  1631. val |= TRANS_INTERLACED;
  1632. else
  1633. val |= TRANS_PROGRESSIVE;
  1634. I915_WRITE(LPT_TRANSCONF, val);
  1635. if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
  1636. DRM_ERROR("Failed to enable PCH transcoder\n");
  1637. }
  1638. static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
  1639. enum pipe pipe)
  1640. {
  1641. struct drm_device *dev = dev_priv->dev;
  1642. i915_reg_t reg;
  1643. uint32_t val;
  1644. /* FDI relies on the transcoder */
  1645. assert_fdi_tx_disabled(dev_priv, pipe);
  1646. assert_fdi_rx_disabled(dev_priv, pipe);
  1647. /* Ports must be off as well */
  1648. assert_pch_ports_disabled(dev_priv, pipe);
  1649. reg = PCH_TRANSCONF(pipe);
  1650. val = I915_READ(reg);
  1651. val &= ~TRANS_ENABLE;
  1652. I915_WRITE(reg, val);
  1653. /* wait for PCH transcoder off, transcoder state */
  1654. if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
  1655. DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
  1656. if (HAS_PCH_CPT(dev)) {
  1657. /* Workaround: Clear the timing override chicken bit again. */
  1658. reg = TRANS_CHICKEN2(pipe);
  1659. val = I915_READ(reg);
  1660. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1661. I915_WRITE(reg, val);
  1662. }
  1663. }
  1664. static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  1665. {
  1666. u32 val;
  1667. val = I915_READ(LPT_TRANSCONF);
  1668. val &= ~TRANS_ENABLE;
  1669. I915_WRITE(LPT_TRANSCONF, val);
  1670. /* wait for PCH transcoder off, transcoder state */
  1671. if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
  1672. DRM_ERROR("Failed to disable PCH transcoder\n");
  1673. /* Workaround: clear timing override bit. */
  1674. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1675. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1676. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1677. }
  1678. /**
  1679. * intel_enable_pipe - enable a pipe, asserting requirements
  1680. * @crtc: crtc responsible for the pipe
  1681. *
  1682. * Enable @crtc's pipe, making sure that various hardware specific requirements
  1683. * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  1684. */
  1685. static void intel_enable_pipe(struct intel_crtc *crtc)
  1686. {
  1687. struct drm_device *dev = crtc->base.dev;
  1688. struct drm_i915_private *dev_priv = dev->dev_private;
  1689. enum pipe pipe = crtc->pipe;
  1690. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1691. enum pipe pch_transcoder;
  1692. i915_reg_t reg;
  1693. u32 val;
  1694. DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
  1695. assert_planes_disabled(dev_priv, pipe);
  1696. assert_cursor_disabled(dev_priv, pipe);
  1697. assert_sprites_disabled(dev_priv, pipe);
  1698. if (HAS_PCH_LPT(dev_priv))
  1699. pch_transcoder = TRANSCODER_A;
  1700. else
  1701. pch_transcoder = pipe;
  1702. /*
  1703. * A pipe without a PLL won't actually be able to drive bits from
  1704. * a plane. On ILK+ the pipe PLLs are integrated, so we don't
  1705. * need the check.
  1706. */
  1707. if (HAS_GMCH_DISPLAY(dev_priv))
  1708. if (crtc->config->has_dsi_encoder)
  1709. assert_dsi_pll_enabled(dev_priv);
  1710. else
  1711. assert_pll_enabled(dev_priv, pipe);
  1712. else {
  1713. if (crtc->config->has_pch_encoder) {
  1714. /* if driving the PCH, we need FDI enabled */
  1715. assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
  1716. assert_fdi_tx_pll_enabled(dev_priv,
  1717. (enum pipe) cpu_transcoder);
  1718. }
  1719. /* FIXME: assert CPU port conditions for SNB+ */
  1720. }
  1721. reg = PIPECONF(cpu_transcoder);
  1722. val = I915_READ(reg);
  1723. if (val & PIPECONF_ENABLE) {
  1724. WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1725. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
  1726. return;
  1727. }
  1728. I915_WRITE(reg, val | PIPECONF_ENABLE);
  1729. POSTING_READ(reg);
  1730. /*
  1731. * Until the pipe starts DSL will read as 0, which would cause
  1732. * an apparent vblank timestamp jump, which messes up also the
  1733. * frame count when it's derived from the timestamps. So let's
  1734. * wait for the pipe to start properly before we call
  1735. * drm_crtc_vblank_on()
  1736. */
  1737. if (dev->max_vblank_count == 0 &&
  1738. wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
  1739. DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
  1740. }
  1741. /**
  1742. * intel_disable_pipe - disable a pipe, asserting requirements
  1743. * @crtc: crtc whose pipes is to be disabled
  1744. *
  1745. * Disable the pipe of @crtc, making sure that various hardware
  1746. * specific requirements are met, if applicable, e.g. plane
  1747. * disabled, panel fitter off, etc.
  1748. *
  1749. * Will wait until the pipe has shut down before returning.
  1750. */
  1751. static void intel_disable_pipe(struct intel_crtc *crtc)
  1752. {
  1753. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  1754. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1755. enum pipe pipe = crtc->pipe;
  1756. i915_reg_t reg;
  1757. u32 val;
  1758. DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
  1759. /*
  1760. * Make sure planes won't keep trying to pump pixels to us,
  1761. * or we might hang the display.
  1762. */
  1763. assert_planes_disabled(dev_priv, pipe);
  1764. assert_cursor_disabled(dev_priv, pipe);
  1765. assert_sprites_disabled(dev_priv, pipe);
  1766. reg = PIPECONF(cpu_transcoder);
  1767. val = I915_READ(reg);
  1768. if ((val & PIPECONF_ENABLE) == 0)
  1769. return;
  1770. /*
  1771. * Double wide has implications for planes
  1772. * so best keep it disabled when not needed.
  1773. */
  1774. if (crtc->config->double_wide)
  1775. val &= ~PIPECONF_DOUBLE_WIDE;
  1776. /* Don't disable pipe or pipe PLLs if needed */
  1777. if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
  1778. !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1779. val &= ~PIPECONF_ENABLE;
  1780. I915_WRITE(reg, val);
  1781. if ((val & PIPECONF_ENABLE) == 0)
  1782. intel_wait_for_pipe_off(crtc);
  1783. }
  1784. static bool need_vtd_wa(struct drm_device *dev)
  1785. {
  1786. #ifdef CONFIG_INTEL_IOMMU
  1787. if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
  1788. return true;
  1789. #endif
  1790. return false;
  1791. }
  1792. static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
  1793. {
  1794. return IS_GEN2(dev_priv) ? 2048 : 4096;
  1795. }
  1796. static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
  1797. uint64_t fb_modifier, unsigned int cpp)
  1798. {
  1799. switch (fb_modifier) {
  1800. case DRM_FORMAT_MOD_NONE:
  1801. return cpp;
  1802. case I915_FORMAT_MOD_X_TILED:
  1803. if (IS_GEN2(dev_priv))
  1804. return 128;
  1805. else
  1806. return 512;
  1807. case I915_FORMAT_MOD_Y_TILED:
  1808. if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
  1809. return 128;
  1810. else
  1811. return 512;
  1812. case I915_FORMAT_MOD_Yf_TILED:
  1813. switch (cpp) {
  1814. case 1:
  1815. return 64;
  1816. case 2:
  1817. case 4:
  1818. return 128;
  1819. case 8:
  1820. case 16:
  1821. return 256;
  1822. default:
  1823. MISSING_CASE(cpp);
  1824. return cpp;
  1825. }
  1826. break;
  1827. default:
  1828. MISSING_CASE(fb_modifier);
  1829. return cpp;
  1830. }
  1831. }
  1832. unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
  1833. uint64_t fb_modifier, unsigned int cpp)
  1834. {
  1835. if (fb_modifier == DRM_FORMAT_MOD_NONE)
  1836. return 1;
  1837. else
  1838. return intel_tile_size(dev_priv) /
  1839. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1840. }
  1841. /* Return the tile dimensions in pixel units */
  1842. static void intel_tile_dims(const struct drm_i915_private *dev_priv,
  1843. unsigned int *tile_width,
  1844. unsigned int *tile_height,
  1845. uint64_t fb_modifier,
  1846. unsigned int cpp)
  1847. {
  1848. unsigned int tile_width_bytes =
  1849. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1850. *tile_width = tile_width_bytes / cpp;
  1851. *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
  1852. }
  1853. unsigned int
  1854. intel_fb_align_height(struct drm_device *dev, unsigned int height,
  1855. uint32_t pixel_format, uint64_t fb_modifier)
  1856. {
  1857. unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
  1858. unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
  1859. return ALIGN(height, tile_height);
  1860. }
  1861. unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
  1862. {
  1863. unsigned int size = 0;
  1864. int i;
  1865. for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
  1866. size += rot_info->plane[i].width * rot_info->plane[i].height;
  1867. return size;
  1868. }
  1869. static void
  1870. intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
  1871. const struct drm_framebuffer *fb,
  1872. unsigned int rotation)
  1873. {
  1874. if (intel_rotation_90_or_270(rotation)) {
  1875. *view = i915_ggtt_view_rotated;
  1876. view->params.rotated = to_intel_framebuffer(fb)->rot_info;
  1877. } else {
  1878. *view = i915_ggtt_view_normal;
  1879. }
  1880. }
  1881. static void
  1882. intel_fill_fb_info(struct drm_i915_private *dev_priv,
  1883. struct drm_framebuffer *fb)
  1884. {
  1885. struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
  1886. unsigned int tile_size, tile_width, tile_height, cpp;
  1887. tile_size = intel_tile_size(dev_priv);
  1888. cpp = drm_format_plane_cpp(fb->pixel_format, 0);
  1889. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  1890. fb->modifier[0], cpp);
  1891. info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
  1892. info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
  1893. if (info->pixel_format == DRM_FORMAT_NV12) {
  1894. cpp = drm_format_plane_cpp(fb->pixel_format, 1);
  1895. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  1896. fb->modifier[1], cpp);
  1897. info->uv_offset = fb->offsets[1];
  1898. info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
  1899. info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
  1900. }
  1901. }
  1902. static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
  1903. {
  1904. if (INTEL_INFO(dev_priv)->gen >= 9)
  1905. return 256 * 1024;
  1906. else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
  1907. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1908. return 128 * 1024;
  1909. else if (INTEL_INFO(dev_priv)->gen >= 4)
  1910. return 4 * 1024;
  1911. else
  1912. return 0;
  1913. }
  1914. static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
  1915. uint64_t fb_modifier)
  1916. {
  1917. switch (fb_modifier) {
  1918. case DRM_FORMAT_MOD_NONE:
  1919. return intel_linear_alignment(dev_priv);
  1920. case I915_FORMAT_MOD_X_TILED:
  1921. if (INTEL_INFO(dev_priv)->gen >= 9)
  1922. return 256 * 1024;
  1923. return 0;
  1924. case I915_FORMAT_MOD_Y_TILED:
  1925. case I915_FORMAT_MOD_Yf_TILED:
  1926. return 1 * 1024 * 1024;
  1927. default:
  1928. MISSING_CASE(fb_modifier);
  1929. return 0;
  1930. }
  1931. }
  1932. int
  1933. intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
  1934. unsigned int rotation)
  1935. {
  1936. struct drm_device *dev = fb->dev;
  1937. struct drm_i915_private *dev_priv = dev->dev_private;
  1938. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1939. struct i915_ggtt_view view;
  1940. u32 alignment;
  1941. int ret;
  1942. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1943. alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
  1944. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1945. /* Note that the w/a also requires 64 PTE of padding following the
  1946. * bo. We currently fill all unused PTE with the shadow page and so
  1947. * we should always have valid PTE following the scanout preventing
  1948. * the VT-d warning.
  1949. */
  1950. if (need_vtd_wa(dev) && alignment < 256 * 1024)
  1951. alignment = 256 * 1024;
  1952. /*
  1953. * Global gtt pte registers are special registers which actually forward
  1954. * writes to a chunk of system memory. Which means that there is no risk
  1955. * that the register values disappear as soon as we call
  1956. * intel_runtime_pm_put(), so it is correct to wrap only the
  1957. * pin/unpin/fence and not more.
  1958. */
  1959. intel_runtime_pm_get(dev_priv);
  1960. ret = i915_gem_object_pin_to_display_plane(obj, alignment,
  1961. &view);
  1962. if (ret)
  1963. goto err_pm;
  1964. /* Install a fence for tiled scan-out. Pre-i965 always needs a
  1965. * fence, whereas 965+ only requires a fence if using
  1966. * framebuffer compression. For simplicity, we always install
  1967. * a fence as the cost is not that onerous.
  1968. */
  1969. if (view.type == I915_GGTT_VIEW_NORMAL) {
  1970. ret = i915_gem_object_get_fence(obj);
  1971. if (ret == -EDEADLK) {
  1972. /*
  1973. * -EDEADLK means there are no free fences
  1974. * no pending flips.
  1975. *
  1976. * This is propagated to atomic, but it uses
  1977. * -EDEADLK to force a locking recovery, so
  1978. * change the returned error to -EBUSY.
  1979. */
  1980. ret = -EBUSY;
  1981. goto err_unpin;
  1982. } else if (ret)
  1983. goto err_unpin;
  1984. i915_gem_object_pin_fence(obj);
  1985. }
  1986. intel_runtime_pm_put(dev_priv);
  1987. return 0;
  1988. err_unpin:
  1989. i915_gem_object_unpin_from_display_plane(obj, &view);
  1990. err_pm:
  1991. intel_runtime_pm_put(dev_priv);
  1992. return ret;
  1993. }
  1994. static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1995. {
  1996. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1997. struct i915_ggtt_view view;
  1998. WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
  1999. intel_fill_fb_ggtt_view(&view, fb, rotation);
  2000. if (view.type == I915_GGTT_VIEW_NORMAL)
  2001. i915_gem_object_unpin_fence(obj);
  2002. i915_gem_object_unpin_from_display_plane(obj, &view);
  2003. }
  2004. /*
  2005. * Adjust the tile offset by moving the difference into
  2006. * the x/y offsets.
  2007. *
  2008. * Input tile dimensions and pitch must already be
  2009. * rotated to match x and y, and in pixel units.
  2010. */
  2011. static u32 intel_adjust_tile_offset(int *x, int *y,
  2012. unsigned int tile_width,
  2013. unsigned int tile_height,
  2014. unsigned int tile_size,
  2015. unsigned int pitch_tiles,
  2016. u32 old_offset,
  2017. u32 new_offset)
  2018. {
  2019. unsigned int tiles;
  2020. WARN_ON(old_offset & (tile_size - 1));
  2021. WARN_ON(new_offset & (tile_size - 1));
  2022. WARN_ON(new_offset > old_offset);
  2023. tiles = (old_offset - new_offset) / tile_size;
  2024. *y += tiles / pitch_tiles * tile_height;
  2025. *x += tiles % pitch_tiles * tile_width;
  2026. return new_offset;
  2027. }
  2028. /*
  2029. * Computes the linear offset to the base tile and adjusts
  2030. * x, y. bytes per pixel is assumed to be a power-of-two.
  2031. *
  2032. * In the 90/270 rotated case, x and y are assumed
  2033. * to be already rotated to match the rotated GTT view, and
  2034. * pitch is the tile_height aligned framebuffer height.
  2035. */
  2036. u32 intel_compute_tile_offset(int *x, int *y,
  2037. const struct drm_framebuffer *fb, int plane,
  2038. unsigned int pitch,
  2039. unsigned int rotation)
  2040. {
  2041. const struct drm_i915_private *dev_priv = to_i915(fb->dev);
  2042. uint64_t fb_modifier = fb->modifier[plane];
  2043. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2044. u32 offset, offset_aligned, alignment;
  2045. alignment = intel_surf_alignment(dev_priv, fb_modifier);
  2046. if (alignment)
  2047. alignment--;
  2048. if (fb_modifier != DRM_FORMAT_MOD_NONE) {
  2049. unsigned int tile_size, tile_width, tile_height;
  2050. unsigned int tile_rows, tiles, pitch_tiles;
  2051. tile_size = intel_tile_size(dev_priv);
  2052. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2053. fb_modifier, cpp);
  2054. if (intel_rotation_90_or_270(rotation)) {
  2055. pitch_tiles = pitch / tile_height;
  2056. swap(tile_width, tile_height);
  2057. } else {
  2058. pitch_tiles = pitch / (tile_width * cpp);
  2059. }
  2060. tile_rows = *y / tile_height;
  2061. *y %= tile_height;
  2062. tiles = *x / tile_width;
  2063. *x %= tile_width;
  2064. offset = (tile_rows * pitch_tiles + tiles) * tile_size;
  2065. offset_aligned = offset & ~alignment;
  2066. intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2067. tile_size, pitch_tiles,
  2068. offset, offset_aligned);
  2069. } else {
  2070. offset = *y * pitch + *x * cpp;
  2071. offset_aligned = offset & ~alignment;
  2072. *y = (offset & alignment) / pitch;
  2073. *x = ((offset & alignment) - *y * pitch) / cpp;
  2074. }
  2075. return offset_aligned;
  2076. }
  2077. static int i9xx_format_to_fourcc(int format)
  2078. {
  2079. switch (format) {
  2080. case DISPPLANE_8BPP:
  2081. return DRM_FORMAT_C8;
  2082. case DISPPLANE_BGRX555:
  2083. return DRM_FORMAT_XRGB1555;
  2084. case DISPPLANE_BGRX565:
  2085. return DRM_FORMAT_RGB565;
  2086. default:
  2087. case DISPPLANE_BGRX888:
  2088. return DRM_FORMAT_XRGB8888;
  2089. case DISPPLANE_RGBX888:
  2090. return DRM_FORMAT_XBGR8888;
  2091. case DISPPLANE_BGRX101010:
  2092. return DRM_FORMAT_XRGB2101010;
  2093. case DISPPLANE_RGBX101010:
  2094. return DRM_FORMAT_XBGR2101010;
  2095. }
  2096. }
  2097. static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
  2098. {
  2099. switch (format) {
  2100. case PLANE_CTL_FORMAT_RGB_565:
  2101. return DRM_FORMAT_RGB565;
  2102. default:
  2103. case PLANE_CTL_FORMAT_XRGB_8888:
  2104. if (rgb_order) {
  2105. if (alpha)
  2106. return DRM_FORMAT_ABGR8888;
  2107. else
  2108. return DRM_FORMAT_XBGR8888;
  2109. } else {
  2110. if (alpha)
  2111. return DRM_FORMAT_ARGB8888;
  2112. else
  2113. return DRM_FORMAT_XRGB8888;
  2114. }
  2115. case PLANE_CTL_FORMAT_XRGB_2101010:
  2116. if (rgb_order)
  2117. return DRM_FORMAT_XBGR2101010;
  2118. else
  2119. return DRM_FORMAT_XRGB2101010;
  2120. }
  2121. }
  2122. static bool
  2123. intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  2124. struct intel_initial_plane_config *plane_config)
  2125. {
  2126. struct drm_device *dev = crtc->base.dev;
  2127. struct drm_i915_private *dev_priv = to_i915(dev);
  2128. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  2129. struct drm_i915_gem_object *obj = NULL;
  2130. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  2131. struct drm_framebuffer *fb = &plane_config->fb->base;
  2132. u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
  2133. u32 size_aligned = round_up(plane_config->base + plane_config->size,
  2134. PAGE_SIZE);
  2135. size_aligned -= base_aligned;
  2136. if (plane_config->size == 0)
  2137. return false;
  2138. /* If the FB is too big, just don't use it since fbdev is not very
  2139. * important and we should probably use that space with FBC or other
  2140. * features. */
  2141. if (size_aligned * 2 > ggtt->stolen_usable_size)
  2142. return false;
  2143. mutex_lock(&dev->struct_mutex);
  2144. obj = i915_gem_object_create_stolen_for_preallocated(dev,
  2145. base_aligned,
  2146. base_aligned,
  2147. size_aligned);
  2148. if (!obj) {
  2149. mutex_unlock(&dev->struct_mutex);
  2150. return false;
  2151. }
  2152. obj->tiling_mode = plane_config->tiling;
  2153. if (obj->tiling_mode == I915_TILING_X)
  2154. obj->stride = fb->pitches[0];
  2155. mode_cmd.pixel_format = fb->pixel_format;
  2156. mode_cmd.width = fb->width;
  2157. mode_cmd.height = fb->height;
  2158. mode_cmd.pitches[0] = fb->pitches[0];
  2159. mode_cmd.modifier[0] = fb->modifier[0];
  2160. mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
  2161. if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
  2162. &mode_cmd, obj)) {
  2163. DRM_DEBUG_KMS("intel fb init failed\n");
  2164. goto out_unref_obj;
  2165. }
  2166. mutex_unlock(&dev->struct_mutex);
  2167. DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
  2168. return true;
  2169. out_unref_obj:
  2170. drm_gem_object_unreference(&obj->base);
  2171. mutex_unlock(&dev->struct_mutex);
  2172. return false;
  2173. }
  2174. /* Update plane->state->fb to match plane->fb after driver-internal updates */
  2175. static void
  2176. update_state_fb(struct drm_plane *plane)
  2177. {
  2178. if (plane->fb == plane->state->fb)
  2179. return;
  2180. if (plane->state->fb)
  2181. drm_framebuffer_unreference(plane->state->fb);
  2182. plane->state->fb = plane->fb;
  2183. if (plane->state->fb)
  2184. drm_framebuffer_reference(plane->state->fb);
  2185. }
  2186. static void
  2187. intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
  2188. struct intel_initial_plane_config *plane_config)
  2189. {
  2190. struct drm_device *dev = intel_crtc->base.dev;
  2191. struct drm_i915_private *dev_priv = dev->dev_private;
  2192. struct drm_crtc *c;
  2193. struct intel_crtc *i;
  2194. struct drm_i915_gem_object *obj;
  2195. struct drm_plane *primary = intel_crtc->base.primary;
  2196. struct drm_plane_state *plane_state = primary->state;
  2197. struct drm_crtc_state *crtc_state = intel_crtc->base.state;
  2198. struct intel_plane *intel_plane = to_intel_plane(primary);
  2199. struct intel_plane_state *intel_state =
  2200. to_intel_plane_state(plane_state);
  2201. struct drm_framebuffer *fb;
  2202. if (!plane_config->fb)
  2203. return;
  2204. if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
  2205. fb = &plane_config->fb->base;
  2206. goto valid_fb;
  2207. }
  2208. kfree(plane_config->fb);
  2209. /*
  2210. * Failed to alloc the obj, check to see if we should share
  2211. * an fb with another CRTC instead
  2212. */
  2213. for_each_crtc(dev, c) {
  2214. i = to_intel_crtc(c);
  2215. if (c == &intel_crtc->base)
  2216. continue;
  2217. if (!i->active)
  2218. continue;
  2219. fb = c->primary->fb;
  2220. if (!fb)
  2221. continue;
  2222. obj = intel_fb_obj(fb);
  2223. if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
  2224. drm_framebuffer_reference(fb);
  2225. goto valid_fb;
  2226. }
  2227. }
  2228. /*
  2229. * We've failed to reconstruct the BIOS FB. Current display state
  2230. * indicates that the primary plane is visible, but has a NULL FB,
  2231. * which will lead to problems later if we don't fix it up. The
  2232. * simplest solution is to just disable the primary plane now and
  2233. * pretend the BIOS never had it enabled.
  2234. */
  2235. to_intel_plane_state(plane_state)->visible = false;
  2236. crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
  2237. intel_pre_disable_primary_noatomic(&intel_crtc->base);
  2238. intel_plane->disable_plane(primary, &intel_crtc->base);
  2239. return;
  2240. valid_fb:
  2241. plane_state->src_x = 0;
  2242. plane_state->src_y = 0;
  2243. plane_state->src_w = fb->width << 16;
  2244. plane_state->src_h = fb->height << 16;
  2245. plane_state->crtc_x = 0;
  2246. plane_state->crtc_y = 0;
  2247. plane_state->crtc_w = fb->width;
  2248. plane_state->crtc_h = fb->height;
  2249. intel_state->src.x1 = plane_state->src_x;
  2250. intel_state->src.y1 = plane_state->src_y;
  2251. intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
  2252. intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
  2253. intel_state->dst.x1 = plane_state->crtc_x;
  2254. intel_state->dst.y1 = plane_state->crtc_y;
  2255. intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
  2256. intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
  2257. obj = intel_fb_obj(fb);
  2258. if (obj->tiling_mode != I915_TILING_NONE)
  2259. dev_priv->preserve_bios_swizzle = true;
  2260. drm_framebuffer_reference(fb);
  2261. primary->fb = primary->state->fb = fb;
  2262. primary->crtc = primary->state->crtc = &intel_crtc->base;
  2263. intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
  2264. obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
  2265. }
  2266. static void i9xx_update_primary_plane(struct drm_plane *primary,
  2267. const struct intel_crtc_state *crtc_state,
  2268. const struct intel_plane_state *plane_state)
  2269. {
  2270. struct drm_device *dev = primary->dev;
  2271. struct drm_i915_private *dev_priv = dev->dev_private;
  2272. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2273. struct drm_framebuffer *fb = plane_state->base.fb;
  2274. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  2275. int plane = intel_crtc->plane;
  2276. u32 linear_offset;
  2277. u32 dspcntr;
  2278. i915_reg_t reg = DSPCNTR(plane);
  2279. unsigned int rotation = plane_state->base.rotation;
  2280. int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
  2281. int x = plane_state->src.x1 >> 16;
  2282. int y = plane_state->src.y1 >> 16;
  2283. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2284. dspcntr |= DISPLAY_PLANE_ENABLE;
  2285. if (INTEL_INFO(dev)->gen < 4) {
  2286. if (intel_crtc->pipe == PIPE_B)
  2287. dspcntr |= DISPPLANE_SEL_PIPE_B;
  2288. /* pipesrc and dspsize control the size that is scaled from,
  2289. * which should always be the user's requested size.
  2290. */
  2291. I915_WRITE(DSPSIZE(plane),
  2292. ((crtc_state->pipe_src_h - 1) << 16) |
  2293. (crtc_state->pipe_src_w - 1));
  2294. I915_WRITE(DSPPOS(plane), 0);
  2295. } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
  2296. I915_WRITE(PRIMSIZE(plane),
  2297. ((crtc_state->pipe_src_h - 1) << 16) |
  2298. (crtc_state->pipe_src_w - 1));
  2299. I915_WRITE(PRIMPOS(plane), 0);
  2300. I915_WRITE(PRIMCNSTALPHA(plane), 0);
  2301. }
  2302. switch (fb->pixel_format) {
  2303. case DRM_FORMAT_C8:
  2304. dspcntr |= DISPPLANE_8BPP;
  2305. break;
  2306. case DRM_FORMAT_XRGB1555:
  2307. dspcntr |= DISPPLANE_BGRX555;
  2308. break;
  2309. case DRM_FORMAT_RGB565:
  2310. dspcntr |= DISPPLANE_BGRX565;
  2311. break;
  2312. case DRM_FORMAT_XRGB8888:
  2313. dspcntr |= DISPPLANE_BGRX888;
  2314. break;
  2315. case DRM_FORMAT_XBGR8888:
  2316. dspcntr |= DISPPLANE_RGBX888;
  2317. break;
  2318. case DRM_FORMAT_XRGB2101010:
  2319. dspcntr |= DISPPLANE_BGRX101010;
  2320. break;
  2321. case DRM_FORMAT_XBGR2101010:
  2322. dspcntr |= DISPPLANE_RGBX101010;
  2323. break;
  2324. default:
  2325. BUG();
  2326. }
  2327. if (INTEL_INFO(dev)->gen >= 4 &&
  2328. obj->tiling_mode != I915_TILING_NONE)
  2329. dspcntr |= DISPPLANE_TILED;
  2330. if (IS_G4X(dev))
  2331. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2332. linear_offset = y * fb->pitches[0] + x * cpp;
  2333. if (INTEL_INFO(dev)->gen >= 4) {
  2334. intel_crtc->dspaddr_offset =
  2335. intel_compute_tile_offset(&x, &y, fb, 0,
  2336. fb->pitches[0], rotation);
  2337. linear_offset -= intel_crtc->dspaddr_offset;
  2338. } else {
  2339. intel_crtc->dspaddr_offset = linear_offset;
  2340. }
  2341. if (rotation == BIT(DRM_ROTATE_180)) {
  2342. dspcntr |= DISPPLANE_ROTATE_180;
  2343. x += (crtc_state->pipe_src_w - 1);
  2344. y += (crtc_state->pipe_src_h - 1);
  2345. /* Finding the last pixel of the last line of the display
  2346. data and adding to linear_offset*/
  2347. linear_offset +=
  2348. (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
  2349. (crtc_state->pipe_src_w - 1) * cpp;
  2350. }
  2351. intel_crtc->adjusted_x = x;
  2352. intel_crtc->adjusted_y = y;
  2353. I915_WRITE(reg, dspcntr);
  2354. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2355. if (INTEL_INFO(dev)->gen >= 4) {
  2356. I915_WRITE(DSPSURF(plane),
  2357. i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
  2358. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2359. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2360. } else
  2361. I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
  2362. POSTING_READ(reg);
  2363. }
  2364. static void i9xx_disable_primary_plane(struct drm_plane *primary,
  2365. struct drm_crtc *crtc)
  2366. {
  2367. struct drm_device *dev = crtc->dev;
  2368. struct drm_i915_private *dev_priv = dev->dev_private;
  2369. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2370. int plane = intel_crtc->plane;
  2371. I915_WRITE(DSPCNTR(plane), 0);
  2372. if (INTEL_INFO(dev_priv)->gen >= 4)
  2373. I915_WRITE(DSPSURF(plane), 0);
  2374. else
  2375. I915_WRITE(DSPADDR(plane), 0);
  2376. POSTING_READ(DSPCNTR(plane));
  2377. }
  2378. static void ironlake_update_primary_plane(struct drm_plane *primary,
  2379. const struct intel_crtc_state *crtc_state,
  2380. const struct intel_plane_state *plane_state)
  2381. {
  2382. struct drm_device *dev = primary->dev;
  2383. struct drm_i915_private *dev_priv = dev->dev_private;
  2384. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2385. struct drm_framebuffer *fb = plane_state->base.fb;
  2386. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  2387. int plane = intel_crtc->plane;
  2388. u32 linear_offset;
  2389. u32 dspcntr;
  2390. i915_reg_t reg = DSPCNTR(plane);
  2391. unsigned int rotation = plane_state->base.rotation;
  2392. int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
  2393. int x = plane_state->src.x1 >> 16;
  2394. int y = plane_state->src.y1 >> 16;
  2395. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2396. dspcntr |= DISPLAY_PLANE_ENABLE;
  2397. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  2398. dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
  2399. switch (fb->pixel_format) {
  2400. case DRM_FORMAT_C8:
  2401. dspcntr |= DISPPLANE_8BPP;
  2402. break;
  2403. case DRM_FORMAT_RGB565:
  2404. dspcntr |= DISPPLANE_BGRX565;
  2405. break;
  2406. case DRM_FORMAT_XRGB8888:
  2407. dspcntr |= DISPPLANE_BGRX888;
  2408. break;
  2409. case DRM_FORMAT_XBGR8888:
  2410. dspcntr |= DISPPLANE_RGBX888;
  2411. break;
  2412. case DRM_FORMAT_XRGB2101010:
  2413. dspcntr |= DISPPLANE_BGRX101010;
  2414. break;
  2415. case DRM_FORMAT_XBGR2101010:
  2416. dspcntr |= DISPPLANE_RGBX101010;
  2417. break;
  2418. default:
  2419. BUG();
  2420. }
  2421. if (obj->tiling_mode != I915_TILING_NONE)
  2422. dspcntr |= DISPPLANE_TILED;
  2423. if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
  2424. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2425. linear_offset = y * fb->pitches[0] + x * cpp;
  2426. intel_crtc->dspaddr_offset =
  2427. intel_compute_tile_offset(&x, &y, fb, 0,
  2428. fb->pitches[0], rotation);
  2429. linear_offset -= intel_crtc->dspaddr_offset;
  2430. if (rotation == BIT(DRM_ROTATE_180)) {
  2431. dspcntr |= DISPPLANE_ROTATE_180;
  2432. if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
  2433. x += (crtc_state->pipe_src_w - 1);
  2434. y += (crtc_state->pipe_src_h - 1);
  2435. /* Finding the last pixel of the last line of the display
  2436. data and adding to linear_offset*/
  2437. linear_offset +=
  2438. (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
  2439. (crtc_state->pipe_src_w - 1) * cpp;
  2440. }
  2441. }
  2442. intel_crtc->adjusted_x = x;
  2443. intel_crtc->adjusted_y = y;
  2444. I915_WRITE(reg, dspcntr);
  2445. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2446. I915_WRITE(DSPSURF(plane),
  2447. i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
  2448. if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  2449. I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
  2450. } else {
  2451. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2452. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2453. }
  2454. POSTING_READ(reg);
  2455. }
  2456. u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
  2457. uint64_t fb_modifier, uint32_t pixel_format)
  2458. {
  2459. if (fb_modifier == DRM_FORMAT_MOD_NONE) {
  2460. return 64;
  2461. } else {
  2462. int cpp = drm_format_plane_cpp(pixel_format, 0);
  2463. return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  2464. }
  2465. }
  2466. u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
  2467. struct drm_i915_gem_object *obj,
  2468. unsigned int plane)
  2469. {
  2470. struct i915_ggtt_view view;
  2471. struct i915_vma *vma;
  2472. u64 offset;
  2473. intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
  2474. intel_plane->base.state->rotation);
  2475. vma = i915_gem_obj_to_ggtt_view(obj, &view);
  2476. if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
  2477. view.type))
  2478. return -1;
  2479. offset = vma->node.start;
  2480. if (plane == 1) {
  2481. offset += vma->ggtt_view.params.rotated.uv_start_page *
  2482. PAGE_SIZE;
  2483. }
  2484. WARN_ON(upper_32_bits(offset));
  2485. return lower_32_bits(offset);
  2486. }
  2487. static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
  2488. {
  2489. struct drm_device *dev = intel_crtc->base.dev;
  2490. struct drm_i915_private *dev_priv = dev->dev_private;
  2491. I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
  2492. I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
  2493. I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
  2494. }
  2495. /*
  2496. * This function detaches (aka. unbinds) unused scalers in hardware
  2497. */
  2498. static void skl_detach_scalers(struct intel_crtc *intel_crtc)
  2499. {
  2500. struct intel_crtc_scaler_state *scaler_state;
  2501. int i;
  2502. scaler_state = &intel_crtc->config->scaler_state;
  2503. /* loop through and disable scalers that aren't in use */
  2504. for (i = 0; i < intel_crtc->num_scalers; i++) {
  2505. if (!scaler_state->scalers[i].in_use)
  2506. skl_detach_scaler(intel_crtc, i);
  2507. }
  2508. }
  2509. u32 skl_plane_ctl_format(uint32_t pixel_format)
  2510. {
  2511. switch (pixel_format) {
  2512. case DRM_FORMAT_C8:
  2513. return PLANE_CTL_FORMAT_INDEXED;
  2514. case DRM_FORMAT_RGB565:
  2515. return PLANE_CTL_FORMAT_RGB_565;
  2516. case DRM_FORMAT_XBGR8888:
  2517. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
  2518. case DRM_FORMAT_XRGB8888:
  2519. return PLANE_CTL_FORMAT_XRGB_8888;
  2520. /*
  2521. * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
  2522. * to be already pre-multiplied. We need to add a knob (or a different
  2523. * DRM_FORMAT) for user-space to configure that.
  2524. */
  2525. case DRM_FORMAT_ABGR8888:
  2526. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
  2527. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2528. case DRM_FORMAT_ARGB8888:
  2529. return PLANE_CTL_FORMAT_XRGB_8888 |
  2530. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2531. case DRM_FORMAT_XRGB2101010:
  2532. return PLANE_CTL_FORMAT_XRGB_2101010;
  2533. case DRM_FORMAT_XBGR2101010:
  2534. return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
  2535. case DRM_FORMAT_YUYV:
  2536. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
  2537. case DRM_FORMAT_YVYU:
  2538. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
  2539. case DRM_FORMAT_UYVY:
  2540. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
  2541. case DRM_FORMAT_VYUY:
  2542. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
  2543. default:
  2544. MISSING_CASE(pixel_format);
  2545. }
  2546. return 0;
  2547. }
  2548. u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
  2549. {
  2550. switch (fb_modifier) {
  2551. case DRM_FORMAT_MOD_NONE:
  2552. break;
  2553. case I915_FORMAT_MOD_X_TILED:
  2554. return PLANE_CTL_TILED_X;
  2555. case I915_FORMAT_MOD_Y_TILED:
  2556. return PLANE_CTL_TILED_Y;
  2557. case I915_FORMAT_MOD_Yf_TILED:
  2558. return PLANE_CTL_TILED_YF;
  2559. default:
  2560. MISSING_CASE(fb_modifier);
  2561. }
  2562. return 0;
  2563. }
  2564. u32 skl_plane_ctl_rotation(unsigned int rotation)
  2565. {
  2566. switch (rotation) {
  2567. case BIT(DRM_ROTATE_0):
  2568. break;
  2569. /*
  2570. * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
  2571. * while i915 HW rotation is clockwise, thats why this swapping.
  2572. */
  2573. case BIT(DRM_ROTATE_90):
  2574. return PLANE_CTL_ROTATE_270;
  2575. case BIT(DRM_ROTATE_180):
  2576. return PLANE_CTL_ROTATE_180;
  2577. case BIT(DRM_ROTATE_270):
  2578. return PLANE_CTL_ROTATE_90;
  2579. default:
  2580. MISSING_CASE(rotation);
  2581. }
  2582. return 0;
  2583. }
  2584. static void skylake_update_primary_plane(struct drm_plane *plane,
  2585. const struct intel_crtc_state *crtc_state,
  2586. const struct intel_plane_state *plane_state)
  2587. {
  2588. struct drm_device *dev = plane->dev;
  2589. struct drm_i915_private *dev_priv = dev->dev_private;
  2590. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2591. struct drm_framebuffer *fb = plane_state->base.fb;
  2592. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  2593. int pipe = intel_crtc->pipe;
  2594. u32 plane_ctl, stride_div, stride;
  2595. u32 tile_height, plane_offset, plane_size;
  2596. unsigned int rotation = plane_state->base.rotation;
  2597. int x_offset, y_offset;
  2598. u32 surf_addr;
  2599. int scaler_id = plane_state->scaler_id;
  2600. int src_x = plane_state->src.x1 >> 16;
  2601. int src_y = plane_state->src.y1 >> 16;
  2602. int src_w = drm_rect_width(&plane_state->src) >> 16;
  2603. int src_h = drm_rect_height(&plane_state->src) >> 16;
  2604. int dst_x = plane_state->dst.x1;
  2605. int dst_y = plane_state->dst.y1;
  2606. int dst_w = drm_rect_width(&plane_state->dst);
  2607. int dst_h = drm_rect_height(&plane_state->dst);
  2608. plane_ctl = PLANE_CTL_ENABLE |
  2609. PLANE_CTL_PIPE_GAMMA_ENABLE |
  2610. PLANE_CTL_PIPE_CSC_ENABLE;
  2611. plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
  2612. plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
  2613. plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
  2614. plane_ctl |= skl_plane_ctl_rotation(rotation);
  2615. stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
  2616. fb->pixel_format);
  2617. surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
  2618. WARN_ON(drm_rect_width(&plane_state->src) == 0);
  2619. if (intel_rotation_90_or_270(rotation)) {
  2620. int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
  2621. /* stride = Surface height in tiles */
  2622. tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
  2623. stride = DIV_ROUND_UP(fb->height, tile_height);
  2624. x_offset = stride * tile_height - src_y - src_h;
  2625. y_offset = src_x;
  2626. plane_size = (src_w - 1) << 16 | (src_h - 1);
  2627. } else {
  2628. stride = fb->pitches[0] / stride_div;
  2629. x_offset = src_x;
  2630. y_offset = src_y;
  2631. plane_size = (src_h - 1) << 16 | (src_w - 1);
  2632. }
  2633. plane_offset = y_offset << 16 | x_offset;
  2634. intel_crtc->adjusted_x = x_offset;
  2635. intel_crtc->adjusted_y = y_offset;
  2636. I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
  2637. I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
  2638. I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
  2639. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  2640. if (scaler_id >= 0) {
  2641. uint32_t ps_ctrl = 0;
  2642. WARN_ON(!dst_w || !dst_h);
  2643. ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
  2644. crtc_state->scaler_state.scalers[scaler_id].mode;
  2645. I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
  2646. I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
  2647. I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
  2648. I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
  2649. I915_WRITE(PLANE_POS(pipe, 0), 0);
  2650. } else {
  2651. I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
  2652. }
  2653. I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
  2654. POSTING_READ(PLANE_SURF(pipe, 0));
  2655. }
  2656. static void skylake_disable_primary_plane(struct drm_plane *primary,
  2657. struct drm_crtc *crtc)
  2658. {
  2659. struct drm_device *dev = crtc->dev;
  2660. struct drm_i915_private *dev_priv = dev->dev_private;
  2661. int pipe = to_intel_crtc(crtc)->pipe;
  2662. I915_WRITE(PLANE_CTL(pipe, 0), 0);
  2663. I915_WRITE(PLANE_SURF(pipe, 0), 0);
  2664. POSTING_READ(PLANE_SURF(pipe, 0));
  2665. }
  2666. /* Assume fb object is pinned & idle & fenced and just update base pointers */
  2667. static int
  2668. intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  2669. int x, int y, enum mode_set_atomic state)
  2670. {
  2671. /* Support for kgdboc is disabled, this needs a major rework. */
  2672. DRM_ERROR("legacy panic handler not supported any more.\n");
  2673. return -ENODEV;
  2674. }
  2675. static void intel_complete_page_flips(struct drm_device *dev)
  2676. {
  2677. struct drm_crtc *crtc;
  2678. for_each_crtc(dev, crtc) {
  2679. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2680. enum plane plane = intel_crtc->plane;
  2681. intel_prepare_page_flip(dev, plane);
  2682. intel_finish_page_flip_plane(dev, plane);
  2683. }
  2684. }
  2685. static void intel_update_primary_planes(struct drm_device *dev)
  2686. {
  2687. struct drm_crtc *crtc;
  2688. for_each_crtc(dev, crtc) {
  2689. struct intel_plane *plane = to_intel_plane(crtc->primary);
  2690. struct intel_plane_state *plane_state;
  2691. drm_modeset_lock_crtc(crtc, &plane->base);
  2692. plane_state = to_intel_plane_state(plane->base.state);
  2693. if (plane_state->visible)
  2694. plane->update_plane(&plane->base,
  2695. to_intel_crtc_state(crtc->state),
  2696. plane_state);
  2697. drm_modeset_unlock_crtc(crtc);
  2698. }
  2699. }
  2700. void intel_prepare_reset(struct drm_device *dev)
  2701. {
  2702. /* no reset support for gen2 */
  2703. if (IS_GEN2(dev))
  2704. return;
  2705. /* reset doesn't touch the display */
  2706. if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
  2707. return;
  2708. drm_modeset_lock_all(dev);
  2709. /*
  2710. * Disabling the crtcs gracefully seems nicer. Also the
  2711. * g33 docs say we should at least disable all the planes.
  2712. */
  2713. intel_display_suspend(dev);
  2714. }
  2715. void intel_finish_reset(struct drm_device *dev)
  2716. {
  2717. struct drm_i915_private *dev_priv = to_i915(dev);
  2718. /*
  2719. * Flips in the rings will be nuked by the reset,
  2720. * so complete all pending flips so that user space
  2721. * will get its events and not get stuck.
  2722. */
  2723. intel_complete_page_flips(dev);
  2724. /* no reset support for gen2 */
  2725. if (IS_GEN2(dev))
  2726. return;
  2727. /* reset doesn't touch the display */
  2728. if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
  2729. /*
  2730. * Flips in the rings have been nuked by the reset,
  2731. * so update the base address of all primary
  2732. * planes to the the last fb to make sure we're
  2733. * showing the correct fb after a reset.
  2734. *
  2735. * FIXME: Atomic will make this obsolete since we won't schedule
  2736. * CS-based flips (which might get lost in gpu resets) any more.
  2737. */
  2738. intel_update_primary_planes(dev);
  2739. return;
  2740. }
  2741. /*
  2742. * The display has been reset as well,
  2743. * so need a full re-initialization.
  2744. */
  2745. intel_runtime_pm_disable_interrupts(dev_priv);
  2746. intel_runtime_pm_enable_interrupts(dev_priv);
  2747. intel_modeset_init_hw(dev);
  2748. spin_lock_irq(&dev_priv->irq_lock);
  2749. if (dev_priv->display.hpd_irq_setup)
  2750. dev_priv->display.hpd_irq_setup(dev);
  2751. spin_unlock_irq(&dev_priv->irq_lock);
  2752. intel_display_resume(dev);
  2753. intel_hpd_init(dev_priv);
  2754. drm_modeset_unlock_all(dev);
  2755. }
  2756. static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
  2757. {
  2758. struct drm_device *dev = crtc->dev;
  2759. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2760. unsigned reset_counter;
  2761. bool pending;
  2762. reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
  2763. if (intel_crtc->reset_counter != reset_counter)
  2764. return false;
  2765. spin_lock_irq(&dev->event_lock);
  2766. pending = to_intel_crtc(crtc)->unpin_work != NULL;
  2767. spin_unlock_irq(&dev->event_lock);
  2768. return pending;
  2769. }
  2770. static void intel_update_pipe_config(struct intel_crtc *crtc,
  2771. struct intel_crtc_state *old_crtc_state)
  2772. {
  2773. struct drm_device *dev = crtc->base.dev;
  2774. struct drm_i915_private *dev_priv = dev->dev_private;
  2775. struct intel_crtc_state *pipe_config =
  2776. to_intel_crtc_state(crtc->base.state);
  2777. /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
  2778. crtc->base.mode = crtc->base.state->mode;
  2779. DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
  2780. old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
  2781. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  2782. /*
  2783. * Update pipe size and adjust fitter if needed: the reason for this is
  2784. * that in compute_mode_changes we check the native mode (not the pfit
  2785. * mode) to see if we can flip rather than do a full mode set. In the
  2786. * fastboot case, we'll flip, but if we don't update the pipesrc and
  2787. * pfit state, we'll end up with a big fb scanned out into the wrong
  2788. * sized surface.
  2789. */
  2790. I915_WRITE(PIPESRC(crtc->pipe),
  2791. ((pipe_config->pipe_src_w - 1) << 16) |
  2792. (pipe_config->pipe_src_h - 1));
  2793. /* on skylake this is done by detaching scalers */
  2794. if (INTEL_INFO(dev)->gen >= 9) {
  2795. skl_detach_scalers(crtc);
  2796. if (pipe_config->pch_pfit.enabled)
  2797. skylake_pfit_enable(crtc);
  2798. } else if (HAS_PCH_SPLIT(dev)) {
  2799. if (pipe_config->pch_pfit.enabled)
  2800. ironlake_pfit_enable(crtc);
  2801. else if (old_crtc_state->pch_pfit.enabled)
  2802. ironlake_pfit_disable(crtc, true);
  2803. }
  2804. }
  2805. static void intel_fdi_normal_train(struct drm_crtc *crtc)
  2806. {
  2807. struct drm_device *dev = crtc->dev;
  2808. struct drm_i915_private *dev_priv = dev->dev_private;
  2809. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2810. int pipe = intel_crtc->pipe;
  2811. i915_reg_t reg;
  2812. u32 temp;
  2813. /* enable normal train */
  2814. reg = FDI_TX_CTL(pipe);
  2815. temp = I915_READ(reg);
  2816. if (IS_IVYBRIDGE(dev)) {
  2817. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  2818. temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  2819. } else {
  2820. temp &= ~FDI_LINK_TRAIN_NONE;
  2821. temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
  2822. }
  2823. I915_WRITE(reg, temp);
  2824. reg = FDI_RX_CTL(pipe);
  2825. temp = I915_READ(reg);
  2826. if (HAS_PCH_CPT(dev)) {
  2827. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2828. temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  2829. } else {
  2830. temp &= ~FDI_LINK_TRAIN_NONE;
  2831. temp |= FDI_LINK_TRAIN_NONE;
  2832. }
  2833. I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
  2834. /* wait one idle pattern time */
  2835. POSTING_READ(reg);
  2836. udelay(1000);
  2837. /* IVB wants error correction enabled */
  2838. if (IS_IVYBRIDGE(dev))
  2839. I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  2840. FDI_FE_ERRC_ENABLE);
  2841. }
  2842. /* The FDI link training functions for ILK/Ibexpeak. */
  2843. static void ironlake_fdi_link_train(struct drm_crtc *crtc)
  2844. {
  2845. struct drm_device *dev = crtc->dev;
  2846. struct drm_i915_private *dev_priv = dev->dev_private;
  2847. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2848. int pipe = intel_crtc->pipe;
  2849. i915_reg_t reg;
  2850. u32 temp, tries;
  2851. /* FDI needs bits from pipe first */
  2852. assert_pipe_enabled(dev_priv, pipe);
  2853. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2854. for train result */
  2855. reg = FDI_RX_IMR(pipe);
  2856. temp = I915_READ(reg);
  2857. temp &= ~FDI_RX_SYMBOL_LOCK;
  2858. temp &= ~FDI_RX_BIT_LOCK;
  2859. I915_WRITE(reg, temp);
  2860. I915_READ(reg);
  2861. udelay(150);
  2862. /* enable CPU FDI TX and PCH FDI RX */
  2863. reg = FDI_TX_CTL(pipe);
  2864. temp = I915_READ(reg);
  2865. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  2866. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  2867. temp &= ~FDI_LINK_TRAIN_NONE;
  2868. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2869. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2870. reg = FDI_RX_CTL(pipe);
  2871. temp = I915_READ(reg);
  2872. temp &= ~FDI_LINK_TRAIN_NONE;
  2873. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2874. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2875. POSTING_READ(reg);
  2876. udelay(150);
  2877. /* Ironlake workaround, enable clock pointer after FDI enable*/
  2878. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  2879. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
  2880. FDI_RX_PHASE_SYNC_POINTER_EN);
  2881. reg = FDI_RX_IIR(pipe);
  2882. for (tries = 0; tries < 5; tries++) {
  2883. temp = I915_READ(reg);
  2884. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2885. if ((temp & FDI_RX_BIT_LOCK)) {
  2886. DRM_DEBUG_KMS("FDI train 1 done.\n");
  2887. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2888. break;
  2889. }
  2890. }
  2891. if (tries == 5)
  2892. DRM_ERROR("FDI train 1 fail!\n");
  2893. /* Train 2 */
  2894. reg = FDI_TX_CTL(pipe);
  2895. temp = I915_READ(reg);
  2896. temp &= ~FDI_LINK_TRAIN_NONE;
  2897. temp |= FDI_LINK_TRAIN_PATTERN_2;
  2898. I915_WRITE(reg, temp);
  2899. reg = FDI_RX_CTL(pipe);
  2900. temp = I915_READ(reg);
  2901. temp &= ~FDI_LINK_TRAIN_NONE;
  2902. temp |= FDI_LINK_TRAIN_PATTERN_2;
  2903. I915_WRITE(reg, temp);
  2904. POSTING_READ(reg);
  2905. udelay(150);
  2906. reg = FDI_RX_IIR(pipe);
  2907. for (tries = 0; tries < 5; tries++) {
  2908. temp = I915_READ(reg);
  2909. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2910. if (temp & FDI_RX_SYMBOL_LOCK) {
  2911. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  2912. DRM_DEBUG_KMS("FDI train 2 done.\n");
  2913. break;
  2914. }
  2915. }
  2916. if (tries == 5)
  2917. DRM_ERROR("FDI train 2 fail!\n");
  2918. DRM_DEBUG_KMS("FDI train done\n");
  2919. }
  2920. static const int snb_b_fdi_train_param[] = {
  2921. FDI_LINK_TRAIN_400MV_0DB_SNB_B,
  2922. FDI_LINK_TRAIN_400MV_6DB_SNB_B,
  2923. FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
  2924. FDI_LINK_TRAIN_800MV_0DB_SNB_B,
  2925. };
  2926. /* The FDI link training functions for SNB/Cougarpoint. */
  2927. static void gen6_fdi_link_train(struct drm_crtc *crtc)
  2928. {
  2929. struct drm_device *dev = crtc->dev;
  2930. struct drm_i915_private *dev_priv = dev->dev_private;
  2931. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2932. int pipe = intel_crtc->pipe;
  2933. i915_reg_t reg;
  2934. u32 temp, i, retry;
  2935. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  2936. for train result */
  2937. reg = FDI_RX_IMR(pipe);
  2938. temp = I915_READ(reg);
  2939. temp &= ~FDI_RX_SYMBOL_LOCK;
  2940. temp &= ~FDI_RX_BIT_LOCK;
  2941. I915_WRITE(reg, temp);
  2942. POSTING_READ(reg);
  2943. udelay(150);
  2944. /* enable CPU FDI TX and PCH FDI RX */
  2945. reg = FDI_TX_CTL(pipe);
  2946. temp = I915_READ(reg);
  2947. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  2948. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  2949. temp &= ~FDI_LINK_TRAIN_NONE;
  2950. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2951. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2952. /* SNB-B */
  2953. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  2954. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  2955. I915_WRITE(FDI_RX_MISC(pipe),
  2956. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  2957. reg = FDI_RX_CTL(pipe);
  2958. temp = I915_READ(reg);
  2959. if (HAS_PCH_CPT(dev)) {
  2960. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  2961. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  2962. } else {
  2963. temp &= ~FDI_LINK_TRAIN_NONE;
  2964. temp |= FDI_LINK_TRAIN_PATTERN_1;
  2965. }
  2966. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  2967. POSTING_READ(reg);
  2968. udelay(150);
  2969. for (i = 0; i < 4; i++) {
  2970. reg = FDI_TX_CTL(pipe);
  2971. temp = I915_READ(reg);
  2972. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  2973. temp |= snb_b_fdi_train_param[i];
  2974. I915_WRITE(reg, temp);
  2975. POSTING_READ(reg);
  2976. udelay(500);
  2977. for (retry = 0; retry < 5; retry++) {
  2978. reg = FDI_RX_IIR(pipe);
  2979. temp = I915_READ(reg);
  2980. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  2981. if (temp & FDI_RX_BIT_LOCK) {
  2982. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  2983. DRM_DEBUG_KMS("FDI train 1 done.\n");
  2984. break;
  2985. }
  2986. udelay(50);
  2987. }
  2988. if (retry < 5)
  2989. break;
  2990. }
  2991. if (i == 4)
  2992. DRM_ERROR("FDI train 1 fail!\n");
  2993. /* Train 2 */
  2994. reg = FDI_TX_CTL(pipe);
  2995. temp = I915_READ(reg);
  2996. temp &= ~FDI_LINK_TRAIN_NONE;
  2997. temp |= FDI_LINK_TRAIN_PATTERN_2;
  2998. if (IS_GEN6(dev)) {
  2999. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3000. /* SNB-B */
  3001. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3002. }
  3003. I915_WRITE(reg, temp);
  3004. reg = FDI_RX_CTL(pipe);
  3005. temp = I915_READ(reg);
  3006. if (HAS_PCH_CPT(dev)) {
  3007. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3008. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3009. } else {
  3010. temp &= ~FDI_LINK_TRAIN_NONE;
  3011. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3012. }
  3013. I915_WRITE(reg, temp);
  3014. POSTING_READ(reg);
  3015. udelay(150);
  3016. for (i = 0; i < 4; i++) {
  3017. reg = FDI_TX_CTL(pipe);
  3018. temp = I915_READ(reg);
  3019. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3020. temp |= snb_b_fdi_train_param[i];
  3021. I915_WRITE(reg, temp);
  3022. POSTING_READ(reg);
  3023. udelay(500);
  3024. for (retry = 0; retry < 5; retry++) {
  3025. reg = FDI_RX_IIR(pipe);
  3026. temp = I915_READ(reg);
  3027. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3028. if (temp & FDI_RX_SYMBOL_LOCK) {
  3029. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3030. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3031. break;
  3032. }
  3033. udelay(50);
  3034. }
  3035. if (retry < 5)
  3036. break;
  3037. }
  3038. if (i == 4)
  3039. DRM_ERROR("FDI train 2 fail!\n");
  3040. DRM_DEBUG_KMS("FDI train done.\n");
  3041. }
  3042. /* Manual link training for Ivy Bridge A0 parts */
  3043. static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
  3044. {
  3045. struct drm_device *dev = crtc->dev;
  3046. struct drm_i915_private *dev_priv = dev->dev_private;
  3047. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3048. int pipe = intel_crtc->pipe;
  3049. i915_reg_t reg;
  3050. u32 temp, i, j;
  3051. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3052. for train result */
  3053. reg = FDI_RX_IMR(pipe);
  3054. temp = I915_READ(reg);
  3055. temp &= ~FDI_RX_SYMBOL_LOCK;
  3056. temp &= ~FDI_RX_BIT_LOCK;
  3057. I915_WRITE(reg, temp);
  3058. POSTING_READ(reg);
  3059. udelay(150);
  3060. DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
  3061. I915_READ(FDI_RX_IIR(pipe)));
  3062. /* Try each vswing and preemphasis setting twice before moving on */
  3063. for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
  3064. /* disable first in case we need to retry */
  3065. reg = FDI_TX_CTL(pipe);
  3066. temp = I915_READ(reg);
  3067. temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
  3068. temp &= ~FDI_TX_ENABLE;
  3069. I915_WRITE(reg, temp);
  3070. reg = FDI_RX_CTL(pipe);
  3071. temp = I915_READ(reg);
  3072. temp &= ~FDI_LINK_TRAIN_AUTO;
  3073. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3074. temp &= ~FDI_RX_ENABLE;
  3075. I915_WRITE(reg, temp);
  3076. /* enable CPU FDI TX and PCH FDI RX */
  3077. reg = FDI_TX_CTL(pipe);
  3078. temp = I915_READ(reg);
  3079. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3080. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3081. temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
  3082. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3083. temp |= snb_b_fdi_train_param[j/2];
  3084. temp |= FDI_COMPOSITE_SYNC;
  3085. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3086. I915_WRITE(FDI_RX_MISC(pipe),
  3087. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3088. reg = FDI_RX_CTL(pipe);
  3089. temp = I915_READ(reg);
  3090. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3091. temp |= FDI_COMPOSITE_SYNC;
  3092. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3093. POSTING_READ(reg);
  3094. udelay(1); /* should be 0.5us */
  3095. for (i = 0; i < 4; i++) {
  3096. reg = FDI_RX_IIR(pipe);
  3097. temp = I915_READ(reg);
  3098. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3099. if (temp & FDI_RX_BIT_LOCK ||
  3100. (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
  3101. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3102. DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
  3103. i);
  3104. break;
  3105. }
  3106. udelay(1); /* should be 0.5us */
  3107. }
  3108. if (i == 4) {
  3109. DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
  3110. continue;
  3111. }
  3112. /* Train 2 */
  3113. reg = FDI_TX_CTL(pipe);
  3114. temp = I915_READ(reg);
  3115. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3116. temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
  3117. I915_WRITE(reg, temp);
  3118. reg = FDI_RX_CTL(pipe);
  3119. temp = I915_READ(reg);
  3120. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3121. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3122. I915_WRITE(reg, temp);
  3123. POSTING_READ(reg);
  3124. udelay(2); /* should be 1.5us */
  3125. for (i = 0; i < 4; i++) {
  3126. reg = FDI_RX_IIR(pipe);
  3127. temp = I915_READ(reg);
  3128. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3129. if (temp & FDI_RX_SYMBOL_LOCK ||
  3130. (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
  3131. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3132. DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
  3133. i);
  3134. goto train_done;
  3135. }
  3136. udelay(2); /* should be 1.5us */
  3137. }
  3138. if (i == 4)
  3139. DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
  3140. }
  3141. train_done:
  3142. DRM_DEBUG_KMS("FDI train done.\n");
  3143. }
  3144. static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
  3145. {
  3146. struct drm_device *dev = intel_crtc->base.dev;
  3147. struct drm_i915_private *dev_priv = dev->dev_private;
  3148. int pipe = intel_crtc->pipe;
  3149. i915_reg_t reg;
  3150. u32 temp;
  3151. /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
  3152. reg = FDI_RX_CTL(pipe);
  3153. temp = I915_READ(reg);
  3154. temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
  3155. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3156. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3157. I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  3158. POSTING_READ(reg);
  3159. udelay(200);
  3160. /* Switch from Rawclk to PCDclk */
  3161. temp = I915_READ(reg);
  3162. I915_WRITE(reg, temp | FDI_PCDCLK);
  3163. POSTING_READ(reg);
  3164. udelay(200);
  3165. /* Enable CPU FDI TX PLL, always on for Ironlake */
  3166. reg = FDI_TX_CTL(pipe);
  3167. temp = I915_READ(reg);
  3168. if ((temp & FDI_TX_PLL_ENABLE) == 0) {
  3169. I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  3170. POSTING_READ(reg);
  3171. udelay(100);
  3172. }
  3173. }
  3174. static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
  3175. {
  3176. struct drm_device *dev = intel_crtc->base.dev;
  3177. struct drm_i915_private *dev_priv = dev->dev_private;
  3178. int pipe = intel_crtc->pipe;
  3179. i915_reg_t reg;
  3180. u32 temp;
  3181. /* Switch from PCDclk to Rawclk */
  3182. reg = FDI_RX_CTL(pipe);
  3183. temp = I915_READ(reg);
  3184. I915_WRITE(reg, temp & ~FDI_PCDCLK);
  3185. /* Disable CPU FDI TX PLL */
  3186. reg = FDI_TX_CTL(pipe);
  3187. temp = I915_READ(reg);
  3188. I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
  3189. POSTING_READ(reg);
  3190. udelay(100);
  3191. reg = FDI_RX_CTL(pipe);
  3192. temp = I915_READ(reg);
  3193. I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
  3194. /* Wait for the clocks to turn off. */
  3195. POSTING_READ(reg);
  3196. udelay(100);
  3197. }
  3198. static void ironlake_fdi_disable(struct drm_crtc *crtc)
  3199. {
  3200. struct drm_device *dev = crtc->dev;
  3201. struct drm_i915_private *dev_priv = dev->dev_private;
  3202. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3203. int pipe = intel_crtc->pipe;
  3204. i915_reg_t reg;
  3205. u32 temp;
  3206. /* disable CPU FDI tx and PCH FDI rx */
  3207. reg = FDI_TX_CTL(pipe);
  3208. temp = I915_READ(reg);
  3209. I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
  3210. POSTING_READ(reg);
  3211. reg = FDI_RX_CTL(pipe);
  3212. temp = I915_READ(reg);
  3213. temp &= ~(0x7 << 16);
  3214. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3215. I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  3216. POSTING_READ(reg);
  3217. udelay(100);
  3218. /* Ironlake workaround, disable clock pointer after downing FDI */
  3219. if (HAS_PCH_IBX(dev))
  3220. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3221. /* still set train pattern 1 */
  3222. reg = FDI_TX_CTL(pipe);
  3223. temp = I915_READ(reg);
  3224. temp &= ~FDI_LINK_TRAIN_NONE;
  3225. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3226. I915_WRITE(reg, temp);
  3227. reg = FDI_RX_CTL(pipe);
  3228. temp = I915_READ(reg);
  3229. if (HAS_PCH_CPT(dev)) {
  3230. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3231. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3232. } else {
  3233. temp &= ~FDI_LINK_TRAIN_NONE;
  3234. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3235. }
  3236. /* BPC in FDI rx is consistent with that in PIPECONF */
  3237. temp &= ~(0x07 << 16);
  3238. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3239. I915_WRITE(reg, temp);
  3240. POSTING_READ(reg);
  3241. udelay(100);
  3242. }
  3243. bool intel_has_pending_fb_unpin(struct drm_device *dev)
  3244. {
  3245. struct intel_crtc *crtc;
  3246. /* Note that we don't need to be called with mode_config.lock here
  3247. * as our list of CRTC objects is static for the lifetime of the
  3248. * device and so cannot disappear as we iterate. Similarly, we can
  3249. * happily treat the predicates as racy, atomic checks as userspace
  3250. * cannot claim and pin a new fb without at least acquring the
  3251. * struct_mutex and so serialising with us.
  3252. */
  3253. for_each_intel_crtc(dev, crtc) {
  3254. if (atomic_read(&crtc->unpin_work_count) == 0)
  3255. continue;
  3256. if (crtc->unpin_work)
  3257. intel_wait_for_vblank(dev, crtc->pipe);
  3258. return true;
  3259. }
  3260. return false;
  3261. }
  3262. static void page_flip_completed(struct intel_crtc *intel_crtc)
  3263. {
  3264. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  3265. struct intel_unpin_work *work = intel_crtc->unpin_work;
  3266. /* ensure that the unpin work is consistent wrt ->pending. */
  3267. smp_rmb();
  3268. intel_crtc->unpin_work = NULL;
  3269. if (work->event)
  3270. drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
  3271. drm_crtc_vblank_put(&intel_crtc->base);
  3272. wake_up_all(&dev_priv->pending_flip_queue);
  3273. queue_work(dev_priv->wq, &work->work);
  3274. trace_i915_flip_complete(intel_crtc->plane,
  3275. work->pending_flip_obj);
  3276. }
  3277. static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  3278. {
  3279. struct drm_device *dev = crtc->dev;
  3280. struct drm_i915_private *dev_priv = dev->dev_private;
  3281. long ret;
  3282. WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
  3283. ret = wait_event_interruptible_timeout(
  3284. dev_priv->pending_flip_queue,
  3285. !intel_crtc_has_pending_flip(crtc),
  3286. 60*HZ);
  3287. if (ret < 0)
  3288. return ret;
  3289. if (ret == 0) {
  3290. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3291. spin_lock_irq(&dev->event_lock);
  3292. if (intel_crtc->unpin_work) {
  3293. WARN_ONCE(1, "Removing stuck page flip\n");
  3294. page_flip_completed(intel_crtc);
  3295. }
  3296. spin_unlock_irq(&dev->event_lock);
  3297. }
  3298. return 0;
  3299. }
  3300. static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
  3301. {
  3302. u32 temp;
  3303. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
  3304. mutex_lock(&dev_priv->sb_lock);
  3305. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3306. temp |= SBI_SSCCTL_DISABLE;
  3307. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3308. mutex_unlock(&dev_priv->sb_lock);
  3309. }
  3310. /* Program iCLKIP clock to the desired frequency */
  3311. static void lpt_program_iclkip(struct drm_crtc *crtc)
  3312. {
  3313. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  3314. int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
  3315. u32 divsel, phaseinc, auxdiv, phasedir = 0;
  3316. u32 temp;
  3317. lpt_disable_iclkip(dev_priv);
  3318. /* The iCLK virtual clock root frequency is in MHz,
  3319. * but the adjusted_mode->crtc_clock in in KHz. To get the
  3320. * divisors, it is necessary to divide one by another, so we
  3321. * convert the virtual clock precision to KHz here for higher
  3322. * precision.
  3323. */
  3324. for (auxdiv = 0; auxdiv < 2; auxdiv++) {
  3325. u32 iclk_virtual_root_freq = 172800 * 1000;
  3326. u32 iclk_pi_range = 64;
  3327. u32 desired_divisor;
  3328. desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3329. clock << auxdiv);
  3330. divsel = (desired_divisor / iclk_pi_range) - 2;
  3331. phaseinc = desired_divisor % iclk_pi_range;
  3332. /*
  3333. * Near 20MHz is a corner case which is
  3334. * out of range for the 7-bit divisor
  3335. */
  3336. if (divsel <= 0x7f)
  3337. break;
  3338. }
  3339. /* This should not happen with any sane values */
  3340. WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
  3341. ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
  3342. WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
  3343. ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
  3344. DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
  3345. clock,
  3346. auxdiv,
  3347. divsel,
  3348. phasedir,
  3349. phaseinc);
  3350. mutex_lock(&dev_priv->sb_lock);
  3351. /* Program SSCDIVINTPHASE6 */
  3352. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3353. temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
  3354. temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
  3355. temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
  3356. temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
  3357. temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
  3358. temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
  3359. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
  3360. /* Program SSCAUXDIV */
  3361. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3362. temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
  3363. temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
  3364. intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
  3365. /* Enable modulator and associated divider */
  3366. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3367. temp &= ~SBI_SSCCTL_DISABLE;
  3368. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3369. mutex_unlock(&dev_priv->sb_lock);
  3370. /* Wait for initialization time */
  3371. udelay(24);
  3372. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
  3373. }
  3374. int lpt_get_iclkip(struct drm_i915_private *dev_priv)
  3375. {
  3376. u32 divsel, phaseinc, auxdiv;
  3377. u32 iclk_virtual_root_freq = 172800 * 1000;
  3378. u32 iclk_pi_range = 64;
  3379. u32 desired_divisor;
  3380. u32 temp;
  3381. if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
  3382. return 0;
  3383. mutex_lock(&dev_priv->sb_lock);
  3384. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3385. if (temp & SBI_SSCCTL_DISABLE) {
  3386. mutex_unlock(&dev_priv->sb_lock);
  3387. return 0;
  3388. }
  3389. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3390. divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
  3391. SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
  3392. phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
  3393. SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
  3394. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3395. auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
  3396. SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
  3397. mutex_unlock(&dev_priv->sb_lock);
  3398. desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
  3399. return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3400. desired_divisor << auxdiv);
  3401. }
  3402. static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
  3403. enum pipe pch_transcoder)
  3404. {
  3405. struct drm_device *dev = crtc->base.dev;
  3406. struct drm_i915_private *dev_priv = dev->dev_private;
  3407. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  3408. I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
  3409. I915_READ(HTOTAL(cpu_transcoder)));
  3410. I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
  3411. I915_READ(HBLANK(cpu_transcoder)));
  3412. I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
  3413. I915_READ(HSYNC(cpu_transcoder)));
  3414. I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
  3415. I915_READ(VTOTAL(cpu_transcoder)));
  3416. I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
  3417. I915_READ(VBLANK(cpu_transcoder)));
  3418. I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
  3419. I915_READ(VSYNC(cpu_transcoder)));
  3420. I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
  3421. I915_READ(VSYNCSHIFT(cpu_transcoder)));
  3422. }
  3423. static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
  3424. {
  3425. struct drm_i915_private *dev_priv = dev->dev_private;
  3426. uint32_t temp;
  3427. temp = I915_READ(SOUTH_CHICKEN1);
  3428. if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
  3429. return;
  3430. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
  3431. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
  3432. temp &= ~FDI_BC_BIFURCATION_SELECT;
  3433. if (enable)
  3434. temp |= FDI_BC_BIFURCATION_SELECT;
  3435. DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
  3436. I915_WRITE(SOUTH_CHICKEN1, temp);
  3437. POSTING_READ(SOUTH_CHICKEN1);
  3438. }
  3439. static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
  3440. {
  3441. struct drm_device *dev = intel_crtc->base.dev;
  3442. switch (intel_crtc->pipe) {
  3443. case PIPE_A:
  3444. break;
  3445. case PIPE_B:
  3446. if (intel_crtc->config->fdi_lanes > 2)
  3447. cpt_set_fdi_bc_bifurcation(dev, false);
  3448. else
  3449. cpt_set_fdi_bc_bifurcation(dev, true);
  3450. break;
  3451. case PIPE_C:
  3452. cpt_set_fdi_bc_bifurcation(dev, true);
  3453. break;
  3454. default:
  3455. BUG();
  3456. }
  3457. }
  3458. /* Return which DP Port should be selected for Transcoder DP control */
  3459. static enum port
  3460. intel_trans_dp_port_sel(struct drm_crtc *crtc)
  3461. {
  3462. struct drm_device *dev = crtc->dev;
  3463. struct intel_encoder *encoder;
  3464. for_each_encoder_on_crtc(dev, crtc, encoder) {
  3465. if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
  3466. encoder->type == INTEL_OUTPUT_EDP)
  3467. return enc_to_dig_port(&encoder->base)->port;
  3468. }
  3469. return -1;
  3470. }
  3471. /*
  3472. * Enable PCH resources required for PCH ports:
  3473. * - PCH PLLs
  3474. * - FDI training & RX/TX
  3475. * - update transcoder timings
  3476. * - DP transcoding bits
  3477. * - transcoder
  3478. */
  3479. static void ironlake_pch_enable(struct drm_crtc *crtc)
  3480. {
  3481. struct drm_device *dev = crtc->dev;
  3482. struct drm_i915_private *dev_priv = dev->dev_private;
  3483. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3484. int pipe = intel_crtc->pipe;
  3485. u32 temp;
  3486. assert_pch_transcoder_disabled(dev_priv, pipe);
  3487. if (IS_IVYBRIDGE(dev))
  3488. ivybridge_update_fdi_bc_bifurcation(intel_crtc);
  3489. /* Write the TU size bits before fdi link training, so that error
  3490. * detection works. */
  3491. I915_WRITE(FDI_RX_TUSIZE1(pipe),
  3492. I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  3493. /* For PCH output, training FDI link */
  3494. dev_priv->display.fdi_link_train(crtc);
  3495. /* We need to program the right clock selection before writing the pixel
  3496. * mutliplier into the DPLL. */
  3497. if (HAS_PCH_CPT(dev)) {
  3498. u32 sel;
  3499. temp = I915_READ(PCH_DPLL_SEL);
  3500. temp |= TRANS_DPLL_ENABLE(pipe);
  3501. sel = TRANS_DPLLB_SEL(pipe);
  3502. if (intel_crtc->config->shared_dpll ==
  3503. intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
  3504. temp |= sel;
  3505. else
  3506. temp &= ~sel;
  3507. I915_WRITE(PCH_DPLL_SEL, temp);
  3508. }
  3509. /* XXX: pch pll's can be enabled any time before we enable the PCH
  3510. * transcoder, and we actually should do this to not upset any PCH
  3511. * transcoder that already use the clock when we share it.
  3512. *
  3513. * Note that enable_shared_dpll tries to do the right thing, but
  3514. * get_shared_dpll unconditionally resets the pll - we need that to have
  3515. * the right LVDS enable sequence. */
  3516. intel_enable_shared_dpll(intel_crtc);
  3517. /* set transcoder timing, panel must allow it */
  3518. assert_panel_unlocked(dev_priv, pipe);
  3519. ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
  3520. intel_fdi_normal_train(crtc);
  3521. /* For PCH DP, enable TRANS_DP_CTL */
  3522. if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
  3523. const struct drm_display_mode *adjusted_mode =
  3524. &intel_crtc->config->base.adjusted_mode;
  3525. u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
  3526. i915_reg_t reg = TRANS_DP_CTL(pipe);
  3527. temp = I915_READ(reg);
  3528. temp &= ~(TRANS_DP_PORT_SEL_MASK |
  3529. TRANS_DP_SYNC_MASK |
  3530. TRANS_DP_BPC_MASK);
  3531. temp |= TRANS_DP_OUTPUT_ENABLE;
  3532. temp |= bpc << 9; /* same format but at 11:9 */
  3533. if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  3534. temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
  3535. if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  3536. temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
  3537. switch (intel_trans_dp_port_sel(crtc)) {
  3538. case PORT_B:
  3539. temp |= TRANS_DP_PORT_SEL_B;
  3540. break;
  3541. case PORT_C:
  3542. temp |= TRANS_DP_PORT_SEL_C;
  3543. break;
  3544. case PORT_D:
  3545. temp |= TRANS_DP_PORT_SEL_D;
  3546. break;
  3547. default:
  3548. BUG();
  3549. }
  3550. I915_WRITE(reg, temp);
  3551. }
  3552. ironlake_enable_pch_transcoder(dev_priv, pipe);
  3553. }
  3554. static void lpt_pch_enable(struct drm_crtc *crtc)
  3555. {
  3556. struct drm_device *dev = crtc->dev;
  3557. struct drm_i915_private *dev_priv = dev->dev_private;
  3558. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3559. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  3560. assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
  3561. lpt_program_iclkip(crtc);
  3562. /* Set transcoder timing. */
  3563. ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
  3564. lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
  3565. }
  3566. static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  3567. {
  3568. struct drm_i915_private *dev_priv = dev->dev_private;
  3569. i915_reg_t dslreg = PIPEDSL(pipe);
  3570. u32 temp;
  3571. temp = I915_READ(dslreg);
  3572. udelay(500);
  3573. if (wait_for(I915_READ(dslreg) != temp, 5)) {
  3574. if (wait_for(I915_READ(dslreg) != temp, 5))
  3575. DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
  3576. }
  3577. }
  3578. static int
  3579. skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  3580. unsigned scaler_user, int *scaler_id, unsigned int rotation,
  3581. int src_w, int src_h, int dst_w, int dst_h)
  3582. {
  3583. struct intel_crtc_scaler_state *scaler_state =
  3584. &crtc_state->scaler_state;
  3585. struct intel_crtc *intel_crtc =
  3586. to_intel_crtc(crtc_state->base.crtc);
  3587. int need_scaling;
  3588. need_scaling = intel_rotation_90_or_270(rotation) ?
  3589. (src_h != dst_w || src_w != dst_h):
  3590. (src_w != dst_w || src_h != dst_h);
  3591. /*
  3592. * if plane is being disabled or scaler is no more required or force detach
  3593. * - free scaler binded to this plane/crtc
  3594. * - in order to do this, update crtc->scaler_usage
  3595. *
  3596. * Here scaler state in crtc_state is set free so that
  3597. * scaler can be assigned to other user. Actual register
  3598. * update to free the scaler is done in plane/panel-fit programming.
  3599. * For this purpose crtc/plane_state->scaler_id isn't reset here.
  3600. */
  3601. if (force_detach || !need_scaling) {
  3602. if (*scaler_id >= 0) {
  3603. scaler_state->scaler_users &= ~(1 << scaler_user);
  3604. scaler_state->scalers[*scaler_id].in_use = 0;
  3605. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  3606. "Staged freeing scaler id %d scaler_users = 0x%x\n",
  3607. intel_crtc->pipe, scaler_user, *scaler_id,
  3608. scaler_state->scaler_users);
  3609. *scaler_id = -1;
  3610. }
  3611. return 0;
  3612. }
  3613. /* range checks */
  3614. if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
  3615. dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
  3616. src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
  3617. dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
  3618. DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
  3619. "size is out of scaler range\n",
  3620. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
  3621. return -EINVAL;
  3622. }
  3623. /* mark this plane as a scaler user in crtc_state */
  3624. scaler_state->scaler_users |= (1 << scaler_user);
  3625. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  3626. "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
  3627. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
  3628. scaler_state->scaler_users);
  3629. return 0;
  3630. }
  3631. /**
  3632. * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
  3633. *
  3634. * @state: crtc's scaler state
  3635. *
  3636. * Return
  3637. * 0 - scaler_usage updated successfully
  3638. * error - requested scaling cannot be supported or other error condition
  3639. */
  3640. int skl_update_scaler_crtc(struct intel_crtc_state *state)
  3641. {
  3642. struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
  3643. const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
  3644. DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
  3645. intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
  3646. return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
  3647. &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
  3648. state->pipe_src_w, state->pipe_src_h,
  3649. adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
  3650. }
  3651. /**
  3652. * skl_update_scaler_plane - Stages update to scaler state for a given plane.
  3653. *
  3654. * @state: crtc's scaler state
  3655. * @plane_state: atomic plane state to update
  3656. *
  3657. * Return
  3658. * 0 - scaler_usage updated successfully
  3659. * error - requested scaling cannot be supported or other error condition
  3660. */
  3661. static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
  3662. struct intel_plane_state *plane_state)
  3663. {
  3664. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  3665. struct intel_plane *intel_plane =
  3666. to_intel_plane(plane_state->base.plane);
  3667. struct drm_framebuffer *fb = plane_state->base.fb;
  3668. int ret;
  3669. bool force_detach = !fb || !plane_state->visible;
  3670. DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
  3671. intel_plane->base.base.id, intel_crtc->pipe,
  3672. drm_plane_index(&intel_plane->base));
  3673. ret = skl_update_scaler(crtc_state, force_detach,
  3674. drm_plane_index(&intel_plane->base),
  3675. &plane_state->scaler_id,
  3676. plane_state->base.rotation,
  3677. drm_rect_width(&plane_state->src) >> 16,
  3678. drm_rect_height(&plane_state->src) >> 16,
  3679. drm_rect_width(&plane_state->dst),
  3680. drm_rect_height(&plane_state->dst));
  3681. if (ret || plane_state->scaler_id < 0)
  3682. return ret;
  3683. /* check colorkey */
  3684. if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
  3685. DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
  3686. intel_plane->base.base.id);
  3687. return -EINVAL;
  3688. }
  3689. /* Check src format */
  3690. switch (fb->pixel_format) {
  3691. case DRM_FORMAT_RGB565:
  3692. case DRM_FORMAT_XBGR8888:
  3693. case DRM_FORMAT_XRGB8888:
  3694. case DRM_FORMAT_ABGR8888:
  3695. case DRM_FORMAT_ARGB8888:
  3696. case DRM_FORMAT_XRGB2101010:
  3697. case DRM_FORMAT_XBGR2101010:
  3698. case DRM_FORMAT_YUYV:
  3699. case DRM_FORMAT_YVYU:
  3700. case DRM_FORMAT_UYVY:
  3701. case DRM_FORMAT_VYUY:
  3702. break;
  3703. default:
  3704. DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
  3705. intel_plane->base.base.id, fb->base.id, fb->pixel_format);
  3706. return -EINVAL;
  3707. }
  3708. return 0;
  3709. }
  3710. static void skylake_scaler_disable(struct intel_crtc *crtc)
  3711. {
  3712. int i;
  3713. for (i = 0; i < crtc->num_scalers; i++)
  3714. skl_detach_scaler(crtc, i);
  3715. }
  3716. static void skylake_pfit_enable(struct intel_crtc *crtc)
  3717. {
  3718. struct drm_device *dev = crtc->base.dev;
  3719. struct drm_i915_private *dev_priv = dev->dev_private;
  3720. int pipe = crtc->pipe;
  3721. struct intel_crtc_scaler_state *scaler_state =
  3722. &crtc->config->scaler_state;
  3723. DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
  3724. if (crtc->config->pch_pfit.enabled) {
  3725. int id;
  3726. if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
  3727. DRM_ERROR("Requesting pfit without getting a scaler first\n");
  3728. return;
  3729. }
  3730. id = scaler_state->scaler_id;
  3731. I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
  3732. PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
  3733. I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
  3734. I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
  3735. DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
  3736. }
  3737. }
  3738. static void ironlake_pfit_enable(struct intel_crtc *crtc)
  3739. {
  3740. struct drm_device *dev = crtc->base.dev;
  3741. struct drm_i915_private *dev_priv = dev->dev_private;
  3742. int pipe = crtc->pipe;
  3743. if (crtc->config->pch_pfit.enabled) {
  3744. /* Force use of hard-coded filter coefficients
  3745. * as some pre-programmed values are broken,
  3746. * e.g. x201.
  3747. */
  3748. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  3749. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
  3750. PF_PIPE_SEL_IVB(pipe));
  3751. else
  3752. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
  3753. I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
  3754. I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
  3755. }
  3756. }
  3757. void hsw_enable_ips(struct intel_crtc *crtc)
  3758. {
  3759. struct drm_device *dev = crtc->base.dev;
  3760. struct drm_i915_private *dev_priv = dev->dev_private;
  3761. if (!crtc->config->ips_enabled)
  3762. return;
  3763. /*
  3764. * We can only enable IPS after we enable a plane and wait for a vblank
  3765. * This function is called from post_plane_update, which is run after
  3766. * a vblank wait.
  3767. */
  3768. assert_plane_enabled(dev_priv, crtc->plane);
  3769. if (IS_BROADWELL(dev)) {
  3770. mutex_lock(&dev_priv->rps.hw_lock);
  3771. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
  3772. mutex_unlock(&dev_priv->rps.hw_lock);
  3773. /* Quoting Art Runyan: "its not safe to expect any particular
  3774. * value in IPS_CTL bit 31 after enabling IPS through the
  3775. * mailbox." Moreover, the mailbox may return a bogus state,
  3776. * so we need to just enable it and continue on.
  3777. */
  3778. } else {
  3779. I915_WRITE(IPS_CTL, IPS_ENABLE);
  3780. /* The bit only becomes 1 in the next vblank, so this wait here
  3781. * is essentially intel_wait_for_vblank. If we don't have this
  3782. * and don't wait for vblanks until the end of crtc_enable, then
  3783. * the HW state readout code will complain that the expected
  3784. * IPS_CTL value is not the one we read. */
  3785. if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
  3786. DRM_ERROR("Timed out waiting for IPS enable\n");
  3787. }
  3788. }
  3789. void hsw_disable_ips(struct intel_crtc *crtc)
  3790. {
  3791. struct drm_device *dev = crtc->base.dev;
  3792. struct drm_i915_private *dev_priv = dev->dev_private;
  3793. if (!crtc->config->ips_enabled)
  3794. return;
  3795. assert_plane_enabled(dev_priv, crtc->plane);
  3796. if (IS_BROADWELL(dev)) {
  3797. mutex_lock(&dev_priv->rps.hw_lock);
  3798. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
  3799. mutex_unlock(&dev_priv->rps.hw_lock);
  3800. /* wait for pcode to finish disabling IPS, which may take up to 42ms */
  3801. if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
  3802. DRM_ERROR("Timed out waiting for IPS disable\n");
  3803. } else {
  3804. I915_WRITE(IPS_CTL, 0);
  3805. POSTING_READ(IPS_CTL);
  3806. }
  3807. /* We need to wait for a vblank before we can disable the plane. */
  3808. intel_wait_for_vblank(dev, crtc->pipe);
  3809. }
  3810. static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
  3811. {
  3812. if (intel_crtc->overlay) {
  3813. struct drm_device *dev = intel_crtc->base.dev;
  3814. struct drm_i915_private *dev_priv = dev->dev_private;
  3815. mutex_lock(&dev->struct_mutex);
  3816. dev_priv->mm.interruptible = false;
  3817. (void) intel_overlay_switch_off(intel_crtc->overlay);
  3818. dev_priv->mm.interruptible = true;
  3819. mutex_unlock(&dev->struct_mutex);
  3820. }
  3821. /* Let userspace switch the overlay on again. In most cases userspace
  3822. * has to recompute where to put it anyway.
  3823. */
  3824. }
  3825. /**
  3826. * intel_post_enable_primary - Perform operations after enabling primary plane
  3827. * @crtc: the CRTC whose primary plane was just enabled
  3828. *
  3829. * Performs potentially sleeping operations that must be done after the primary
  3830. * plane is enabled, such as updating FBC and IPS. Note that this may be
  3831. * called due to an explicit primary plane update, or due to an implicit
  3832. * re-enable that is caused when a sprite plane is updated to no longer
  3833. * completely hide the primary plane.
  3834. */
  3835. static void
  3836. intel_post_enable_primary(struct drm_crtc *crtc)
  3837. {
  3838. struct drm_device *dev = crtc->dev;
  3839. struct drm_i915_private *dev_priv = dev->dev_private;
  3840. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3841. int pipe = intel_crtc->pipe;
  3842. /*
  3843. * FIXME IPS should be fine as long as one plane is
  3844. * enabled, but in practice it seems to have problems
  3845. * when going from primary only to sprite only and vice
  3846. * versa.
  3847. */
  3848. hsw_enable_ips(intel_crtc);
  3849. /*
  3850. * Gen2 reports pipe underruns whenever all planes are disabled.
  3851. * So don't enable underrun reporting before at least some planes
  3852. * are enabled.
  3853. * FIXME: Need to fix the logic to work when we turn off all planes
  3854. * but leave the pipe running.
  3855. */
  3856. if (IS_GEN2(dev))
  3857. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  3858. /* Underruns don't always raise interrupts, so check manually. */
  3859. intel_check_cpu_fifo_underruns(dev_priv);
  3860. intel_check_pch_fifo_underruns(dev_priv);
  3861. }
  3862. /* FIXME move all this to pre_plane_update() with proper state tracking */
  3863. static void
  3864. intel_pre_disable_primary(struct drm_crtc *crtc)
  3865. {
  3866. struct drm_device *dev = crtc->dev;
  3867. struct drm_i915_private *dev_priv = dev->dev_private;
  3868. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3869. int pipe = intel_crtc->pipe;
  3870. /*
  3871. * Gen2 reports pipe underruns whenever all planes are disabled.
  3872. * So diasble underrun reporting before all the planes get disabled.
  3873. * FIXME: Need to fix the logic to work when we turn off all planes
  3874. * but leave the pipe running.
  3875. */
  3876. if (IS_GEN2(dev))
  3877. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  3878. /*
  3879. * FIXME IPS should be fine as long as one plane is
  3880. * enabled, but in practice it seems to have problems
  3881. * when going from primary only to sprite only and vice
  3882. * versa.
  3883. */
  3884. hsw_disable_ips(intel_crtc);
  3885. }
  3886. /* FIXME get rid of this and use pre_plane_update */
  3887. static void
  3888. intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  3889. {
  3890. struct drm_device *dev = crtc->dev;
  3891. struct drm_i915_private *dev_priv = dev->dev_private;
  3892. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3893. int pipe = intel_crtc->pipe;
  3894. intel_pre_disable_primary(crtc);
  3895. /*
  3896. * Vblank time updates from the shadow to live plane control register
  3897. * are blocked if the memory self-refresh mode is active at that
  3898. * moment. So to make sure the plane gets truly disabled, disable
  3899. * first the self-refresh mode. The self-refresh enable bit in turn
  3900. * will be checked/applied by the HW only at the next frame start
  3901. * event which is after the vblank start event, so we need to have a
  3902. * wait-for-vblank between disabling the plane and the pipe.
  3903. */
  3904. if (HAS_GMCH_DISPLAY(dev)) {
  3905. intel_set_memory_cxsr(dev_priv, false);
  3906. dev_priv->wm.vlv.cxsr = false;
  3907. intel_wait_for_vblank(dev, pipe);
  3908. }
  3909. }
  3910. static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
  3911. {
  3912. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  3913. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  3914. struct intel_crtc_state *pipe_config =
  3915. to_intel_crtc_state(crtc->base.state);
  3916. struct drm_device *dev = crtc->base.dev;
  3917. struct drm_plane *primary = crtc->base.primary;
  3918. struct drm_plane_state *old_pri_state =
  3919. drm_atomic_get_existing_plane_state(old_state, primary);
  3920. intel_frontbuffer_flip(dev, pipe_config->fb_bits);
  3921. crtc->wm.cxsr_allowed = true;
  3922. if (pipe_config->update_wm_post && pipe_config->base.active)
  3923. intel_update_watermarks(&crtc->base);
  3924. if (old_pri_state) {
  3925. struct intel_plane_state *primary_state =
  3926. to_intel_plane_state(primary->state);
  3927. struct intel_plane_state *old_primary_state =
  3928. to_intel_plane_state(old_pri_state);
  3929. intel_fbc_post_update(crtc);
  3930. if (primary_state->visible &&
  3931. (needs_modeset(&pipe_config->base) ||
  3932. !old_primary_state->visible))
  3933. intel_post_enable_primary(&crtc->base);
  3934. }
  3935. }
  3936. static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
  3937. {
  3938. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  3939. struct drm_device *dev = crtc->base.dev;
  3940. struct drm_i915_private *dev_priv = dev->dev_private;
  3941. struct intel_crtc_state *pipe_config =
  3942. to_intel_crtc_state(crtc->base.state);
  3943. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  3944. struct drm_plane *primary = crtc->base.primary;
  3945. struct drm_plane_state *old_pri_state =
  3946. drm_atomic_get_existing_plane_state(old_state, primary);
  3947. bool modeset = needs_modeset(&pipe_config->base);
  3948. if (old_pri_state) {
  3949. struct intel_plane_state *primary_state =
  3950. to_intel_plane_state(primary->state);
  3951. struct intel_plane_state *old_primary_state =
  3952. to_intel_plane_state(old_pri_state);
  3953. intel_fbc_pre_update(crtc);
  3954. if (old_primary_state->visible &&
  3955. (modeset || !primary_state->visible))
  3956. intel_pre_disable_primary(&crtc->base);
  3957. }
  3958. if (pipe_config->disable_cxsr) {
  3959. crtc->wm.cxsr_allowed = false;
  3960. /*
  3961. * Vblank time updates from the shadow to live plane control register
  3962. * are blocked if the memory self-refresh mode is active at that
  3963. * moment. So to make sure the plane gets truly disabled, disable
  3964. * first the self-refresh mode. The self-refresh enable bit in turn
  3965. * will be checked/applied by the HW only at the next frame start
  3966. * event which is after the vblank start event, so we need to have a
  3967. * wait-for-vblank between disabling the plane and the pipe.
  3968. */
  3969. if (old_crtc_state->base.active) {
  3970. intel_set_memory_cxsr(dev_priv, false);
  3971. dev_priv->wm.vlv.cxsr = false;
  3972. intel_wait_for_vblank(dev, crtc->pipe);
  3973. }
  3974. }
  3975. /*
  3976. * IVB workaround: must disable low power watermarks for at least
  3977. * one frame before enabling scaling. LP watermarks can be re-enabled
  3978. * when scaling is disabled.
  3979. *
  3980. * WaCxSRDisabledForSpriteScaling:ivb
  3981. */
  3982. if (pipe_config->disable_lp_wm) {
  3983. ilk_disable_lp_wm(dev);
  3984. intel_wait_for_vblank(dev, crtc->pipe);
  3985. }
  3986. /*
  3987. * If we're doing a modeset, we're done. No need to do any pre-vblank
  3988. * watermark programming here.
  3989. */
  3990. if (needs_modeset(&pipe_config->base))
  3991. return;
  3992. /*
  3993. * For platforms that support atomic watermarks, program the
  3994. * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
  3995. * will be the intermediate values that are safe for both pre- and
  3996. * post- vblank; when vblank happens, the 'active' values will be set
  3997. * to the final 'target' values and we'll do this again to get the
  3998. * optimal watermarks. For gen9+ platforms, the values we program here
  3999. * will be the final target values which will get automatically latched
  4000. * at vblank time; no further programming will be necessary.
  4001. *
  4002. * If a platform hasn't been transitioned to atomic watermarks yet,
  4003. * we'll continue to update watermarks the old way, if flags tell
  4004. * us to.
  4005. */
  4006. if (dev_priv->display.initial_watermarks != NULL)
  4007. dev_priv->display.initial_watermarks(pipe_config);
  4008. else if (pipe_config->update_wm_pre)
  4009. intel_update_watermarks(&crtc->base);
  4010. }
  4011. static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
  4012. {
  4013. struct drm_device *dev = crtc->dev;
  4014. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4015. struct drm_plane *p;
  4016. int pipe = intel_crtc->pipe;
  4017. intel_crtc_dpms_overlay_disable(intel_crtc);
  4018. drm_for_each_plane_mask(p, dev, plane_mask)
  4019. to_intel_plane(p)->disable_plane(p, crtc);
  4020. /*
  4021. * FIXME: Once we grow proper nuclear flip support out of this we need
  4022. * to compute the mask of flip planes precisely. For the time being
  4023. * consider this a flip to a NULL plane.
  4024. */
  4025. intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
  4026. }
  4027. static void ironlake_crtc_enable(struct drm_crtc *crtc)
  4028. {
  4029. struct drm_device *dev = crtc->dev;
  4030. struct drm_i915_private *dev_priv = dev->dev_private;
  4031. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4032. struct intel_encoder *encoder;
  4033. int pipe = intel_crtc->pipe;
  4034. struct intel_crtc_state *pipe_config =
  4035. to_intel_crtc_state(crtc->state);
  4036. if (WARN_ON(intel_crtc->active))
  4037. return;
  4038. /*
  4039. * Sometimes spurious CPU pipe underruns happen during FDI
  4040. * training, at least with VGA+HDMI cloning. Suppress them.
  4041. *
  4042. * On ILK we get an occasional spurious CPU pipe underruns
  4043. * between eDP port A enable and vdd enable. Also PCH port
  4044. * enable seems to result in the occasional CPU pipe underrun.
  4045. *
  4046. * Spurious PCH underruns also occur during PCH enabling.
  4047. */
  4048. if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
  4049. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4050. if (intel_crtc->config->has_pch_encoder)
  4051. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4052. if (intel_crtc->config->has_pch_encoder)
  4053. intel_prepare_shared_dpll(intel_crtc);
  4054. if (intel_crtc->config->has_dp_encoder)
  4055. intel_dp_set_m_n(intel_crtc, M1_N1);
  4056. intel_set_pipe_timings(intel_crtc);
  4057. intel_set_pipe_src_size(intel_crtc);
  4058. if (intel_crtc->config->has_pch_encoder) {
  4059. intel_cpu_transcoder_set_m_n(intel_crtc,
  4060. &intel_crtc->config->fdi_m_n, NULL);
  4061. }
  4062. ironlake_set_pipeconf(crtc);
  4063. intel_crtc->active = true;
  4064. for_each_encoder_on_crtc(dev, crtc, encoder)
  4065. if (encoder->pre_enable)
  4066. encoder->pre_enable(encoder);
  4067. if (intel_crtc->config->has_pch_encoder) {
  4068. /* Note: FDI PLL enabling _must_ be done before we enable the
  4069. * cpu pipes, hence this is separate from all the other fdi/pch
  4070. * enabling. */
  4071. ironlake_fdi_pll_enable(intel_crtc);
  4072. } else {
  4073. assert_fdi_tx_disabled(dev_priv, pipe);
  4074. assert_fdi_rx_disabled(dev_priv, pipe);
  4075. }
  4076. ironlake_pfit_enable(intel_crtc);
  4077. /*
  4078. * On ILK+ LUT must be loaded before the pipe is running but with
  4079. * clocks enabled
  4080. */
  4081. intel_color_load_luts(&pipe_config->base);
  4082. if (dev_priv->display.initial_watermarks != NULL)
  4083. dev_priv->display.initial_watermarks(intel_crtc->config);
  4084. intel_enable_pipe(intel_crtc);
  4085. if (intel_crtc->config->has_pch_encoder)
  4086. ironlake_pch_enable(crtc);
  4087. assert_vblank_disabled(crtc);
  4088. drm_crtc_vblank_on(crtc);
  4089. for_each_encoder_on_crtc(dev, crtc, encoder)
  4090. encoder->enable(encoder);
  4091. if (HAS_PCH_CPT(dev))
  4092. cpt_verify_modeset(dev, intel_crtc->pipe);
  4093. /* Must wait for vblank to avoid spurious PCH FIFO underruns */
  4094. if (intel_crtc->config->has_pch_encoder)
  4095. intel_wait_for_vblank(dev, pipe);
  4096. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4097. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4098. }
  4099. /* IPS only exists on ULT machines and is tied to pipe A. */
  4100. static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
  4101. {
  4102. return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
  4103. }
  4104. static void haswell_crtc_enable(struct drm_crtc *crtc)
  4105. {
  4106. struct drm_device *dev = crtc->dev;
  4107. struct drm_i915_private *dev_priv = dev->dev_private;
  4108. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4109. struct intel_encoder *encoder;
  4110. int pipe = intel_crtc->pipe, hsw_workaround_pipe;
  4111. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4112. struct intel_crtc_state *pipe_config =
  4113. to_intel_crtc_state(crtc->state);
  4114. if (WARN_ON(intel_crtc->active))
  4115. return;
  4116. if (intel_crtc->config->has_pch_encoder)
  4117. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4118. false);
  4119. if (intel_crtc->config->shared_dpll)
  4120. intel_enable_shared_dpll(intel_crtc);
  4121. if (intel_crtc->config->has_dp_encoder)
  4122. intel_dp_set_m_n(intel_crtc, M1_N1);
  4123. if (!intel_crtc->config->has_dsi_encoder)
  4124. intel_set_pipe_timings(intel_crtc);
  4125. intel_set_pipe_src_size(intel_crtc);
  4126. if (cpu_transcoder != TRANSCODER_EDP &&
  4127. !transcoder_is_dsi(cpu_transcoder)) {
  4128. I915_WRITE(PIPE_MULT(cpu_transcoder),
  4129. intel_crtc->config->pixel_multiplier - 1);
  4130. }
  4131. if (intel_crtc->config->has_pch_encoder) {
  4132. intel_cpu_transcoder_set_m_n(intel_crtc,
  4133. &intel_crtc->config->fdi_m_n, NULL);
  4134. }
  4135. if (!intel_crtc->config->has_dsi_encoder)
  4136. haswell_set_pipeconf(crtc);
  4137. haswell_set_pipemisc(crtc);
  4138. intel_color_set_csc(&pipe_config->base);
  4139. intel_crtc->active = true;
  4140. if (intel_crtc->config->has_pch_encoder)
  4141. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4142. else
  4143. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4144. for_each_encoder_on_crtc(dev, crtc, encoder) {
  4145. if (encoder->pre_enable)
  4146. encoder->pre_enable(encoder);
  4147. }
  4148. if (intel_crtc->config->has_pch_encoder)
  4149. dev_priv->display.fdi_link_train(crtc);
  4150. if (!intel_crtc->config->has_dsi_encoder)
  4151. intel_ddi_enable_pipe_clock(intel_crtc);
  4152. if (INTEL_INFO(dev)->gen >= 9)
  4153. skylake_pfit_enable(intel_crtc);
  4154. else
  4155. ironlake_pfit_enable(intel_crtc);
  4156. /*
  4157. * On ILK+ LUT must be loaded before the pipe is running but with
  4158. * clocks enabled
  4159. */
  4160. intel_color_load_luts(&pipe_config->base);
  4161. intel_ddi_set_pipe_settings(crtc);
  4162. if (!intel_crtc->config->has_dsi_encoder)
  4163. intel_ddi_enable_transcoder_func(crtc);
  4164. if (dev_priv->display.initial_watermarks != NULL)
  4165. dev_priv->display.initial_watermarks(pipe_config);
  4166. else
  4167. intel_update_watermarks(crtc);
  4168. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4169. if (!intel_crtc->config->has_dsi_encoder)
  4170. intel_enable_pipe(intel_crtc);
  4171. if (intel_crtc->config->has_pch_encoder)
  4172. lpt_pch_enable(crtc);
  4173. if (intel_crtc->config->dp_encoder_is_mst)
  4174. intel_ddi_set_vc_payload_alloc(crtc, true);
  4175. assert_vblank_disabled(crtc);
  4176. drm_crtc_vblank_on(crtc);
  4177. for_each_encoder_on_crtc(dev, crtc, encoder) {
  4178. encoder->enable(encoder);
  4179. intel_opregion_notify_encoder(encoder, true);
  4180. }
  4181. if (intel_crtc->config->has_pch_encoder) {
  4182. intel_wait_for_vblank(dev, pipe);
  4183. intel_wait_for_vblank(dev, pipe);
  4184. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4185. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4186. true);
  4187. }
  4188. /* If we change the relative order between pipe/planes enabling, we need
  4189. * to change the workaround. */
  4190. hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
  4191. if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
  4192. intel_wait_for_vblank(dev, hsw_workaround_pipe);
  4193. intel_wait_for_vblank(dev, hsw_workaround_pipe);
  4194. }
  4195. }
  4196. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
  4197. {
  4198. struct drm_device *dev = crtc->base.dev;
  4199. struct drm_i915_private *dev_priv = dev->dev_private;
  4200. int pipe = crtc->pipe;
  4201. /* To avoid upsetting the power well on haswell only disable the pfit if
  4202. * it's in use. The hw state code will make sure we get this right. */
  4203. if (force || crtc->config->pch_pfit.enabled) {
  4204. I915_WRITE(PF_CTL(pipe), 0);
  4205. I915_WRITE(PF_WIN_POS(pipe), 0);
  4206. I915_WRITE(PF_WIN_SZ(pipe), 0);
  4207. }
  4208. }
  4209. static void ironlake_crtc_disable(struct drm_crtc *crtc)
  4210. {
  4211. struct drm_device *dev = crtc->dev;
  4212. struct drm_i915_private *dev_priv = dev->dev_private;
  4213. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4214. struct intel_encoder *encoder;
  4215. int pipe = intel_crtc->pipe;
  4216. /*
  4217. * Sometimes spurious CPU pipe underruns happen when the
  4218. * pipe is already disabled, but FDI RX/TX is still enabled.
  4219. * Happens at least with VGA+HDMI cloning. Suppress them.
  4220. */
  4221. if (intel_crtc->config->has_pch_encoder) {
  4222. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4223. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4224. }
  4225. for_each_encoder_on_crtc(dev, crtc, encoder)
  4226. encoder->disable(encoder);
  4227. drm_crtc_vblank_off(crtc);
  4228. assert_vblank_disabled(crtc);
  4229. intel_disable_pipe(intel_crtc);
  4230. ironlake_pfit_disable(intel_crtc, false);
  4231. if (intel_crtc->config->has_pch_encoder)
  4232. ironlake_fdi_disable(crtc);
  4233. for_each_encoder_on_crtc(dev, crtc, encoder)
  4234. if (encoder->post_disable)
  4235. encoder->post_disable(encoder);
  4236. if (intel_crtc->config->has_pch_encoder) {
  4237. ironlake_disable_pch_transcoder(dev_priv, pipe);
  4238. if (HAS_PCH_CPT(dev)) {
  4239. i915_reg_t reg;
  4240. u32 temp;
  4241. /* disable TRANS_DP_CTL */
  4242. reg = TRANS_DP_CTL(pipe);
  4243. temp = I915_READ(reg);
  4244. temp &= ~(TRANS_DP_OUTPUT_ENABLE |
  4245. TRANS_DP_PORT_SEL_MASK);
  4246. temp |= TRANS_DP_PORT_SEL_NONE;
  4247. I915_WRITE(reg, temp);
  4248. /* disable DPLL_SEL */
  4249. temp = I915_READ(PCH_DPLL_SEL);
  4250. temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
  4251. I915_WRITE(PCH_DPLL_SEL, temp);
  4252. }
  4253. ironlake_fdi_pll_disable(intel_crtc);
  4254. }
  4255. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4256. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4257. }
  4258. static void haswell_crtc_disable(struct drm_crtc *crtc)
  4259. {
  4260. struct drm_device *dev = crtc->dev;
  4261. struct drm_i915_private *dev_priv = dev->dev_private;
  4262. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4263. struct intel_encoder *encoder;
  4264. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4265. if (intel_crtc->config->has_pch_encoder)
  4266. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4267. false);
  4268. for_each_encoder_on_crtc(dev, crtc, encoder) {
  4269. intel_opregion_notify_encoder(encoder, false);
  4270. encoder->disable(encoder);
  4271. }
  4272. drm_crtc_vblank_off(crtc);
  4273. assert_vblank_disabled(crtc);
  4274. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4275. if (!intel_crtc->config->has_dsi_encoder)
  4276. intel_disable_pipe(intel_crtc);
  4277. if (intel_crtc->config->dp_encoder_is_mst)
  4278. intel_ddi_set_vc_payload_alloc(crtc, false);
  4279. if (!intel_crtc->config->has_dsi_encoder)
  4280. intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
  4281. if (INTEL_INFO(dev)->gen >= 9)
  4282. skylake_scaler_disable(intel_crtc);
  4283. else
  4284. ironlake_pfit_disable(intel_crtc, false);
  4285. if (!intel_crtc->config->has_dsi_encoder)
  4286. intel_ddi_disable_pipe_clock(intel_crtc);
  4287. for_each_encoder_on_crtc(dev, crtc, encoder)
  4288. if (encoder->post_disable)
  4289. encoder->post_disable(encoder);
  4290. if (intel_crtc->config->has_pch_encoder) {
  4291. lpt_disable_pch_transcoder(dev_priv);
  4292. lpt_disable_iclkip(dev_priv);
  4293. intel_ddi_fdi_disable(crtc);
  4294. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4295. true);
  4296. }
  4297. }
  4298. static void i9xx_pfit_enable(struct intel_crtc *crtc)
  4299. {
  4300. struct drm_device *dev = crtc->base.dev;
  4301. struct drm_i915_private *dev_priv = dev->dev_private;
  4302. struct intel_crtc_state *pipe_config = crtc->config;
  4303. if (!pipe_config->gmch_pfit.control)
  4304. return;
  4305. /*
  4306. * The panel fitter should only be adjusted whilst the pipe is disabled,
  4307. * according to register description and PRM.
  4308. */
  4309. WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
  4310. assert_pipe_disabled(dev_priv, crtc->pipe);
  4311. I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
  4312. I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
  4313. /* Border color in case we don't scale up to the full screen. Black by
  4314. * default, change to something else for debugging. */
  4315. I915_WRITE(BCLRPAT(crtc->pipe), 0);
  4316. }
  4317. static enum intel_display_power_domain port_to_power_domain(enum port port)
  4318. {
  4319. switch (port) {
  4320. case PORT_A:
  4321. return POWER_DOMAIN_PORT_DDI_A_LANES;
  4322. case PORT_B:
  4323. return POWER_DOMAIN_PORT_DDI_B_LANES;
  4324. case PORT_C:
  4325. return POWER_DOMAIN_PORT_DDI_C_LANES;
  4326. case PORT_D:
  4327. return POWER_DOMAIN_PORT_DDI_D_LANES;
  4328. case PORT_E:
  4329. return POWER_DOMAIN_PORT_DDI_E_LANES;
  4330. default:
  4331. MISSING_CASE(port);
  4332. return POWER_DOMAIN_PORT_OTHER;
  4333. }
  4334. }
  4335. static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
  4336. {
  4337. switch (port) {
  4338. case PORT_A:
  4339. return POWER_DOMAIN_AUX_A;
  4340. case PORT_B:
  4341. return POWER_DOMAIN_AUX_B;
  4342. case PORT_C:
  4343. return POWER_DOMAIN_AUX_C;
  4344. case PORT_D:
  4345. return POWER_DOMAIN_AUX_D;
  4346. case PORT_E:
  4347. /* FIXME: Check VBT for actual wiring of PORT E */
  4348. return POWER_DOMAIN_AUX_D;
  4349. default:
  4350. MISSING_CASE(port);
  4351. return POWER_DOMAIN_AUX_A;
  4352. }
  4353. }
  4354. enum intel_display_power_domain
  4355. intel_display_port_power_domain(struct intel_encoder *intel_encoder)
  4356. {
  4357. struct drm_device *dev = intel_encoder->base.dev;
  4358. struct intel_digital_port *intel_dig_port;
  4359. switch (intel_encoder->type) {
  4360. case INTEL_OUTPUT_UNKNOWN:
  4361. /* Only DDI platforms should ever use this output type */
  4362. WARN_ON_ONCE(!HAS_DDI(dev));
  4363. case INTEL_OUTPUT_DISPLAYPORT:
  4364. case INTEL_OUTPUT_HDMI:
  4365. case INTEL_OUTPUT_EDP:
  4366. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4367. return port_to_power_domain(intel_dig_port->port);
  4368. case INTEL_OUTPUT_DP_MST:
  4369. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4370. return port_to_power_domain(intel_dig_port->port);
  4371. case INTEL_OUTPUT_ANALOG:
  4372. return POWER_DOMAIN_PORT_CRT;
  4373. case INTEL_OUTPUT_DSI:
  4374. return POWER_DOMAIN_PORT_DSI;
  4375. default:
  4376. return POWER_DOMAIN_PORT_OTHER;
  4377. }
  4378. }
  4379. enum intel_display_power_domain
  4380. intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
  4381. {
  4382. struct drm_device *dev = intel_encoder->base.dev;
  4383. struct intel_digital_port *intel_dig_port;
  4384. switch (intel_encoder->type) {
  4385. case INTEL_OUTPUT_UNKNOWN:
  4386. case INTEL_OUTPUT_HDMI:
  4387. /*
  4388. * Only DDI platforms should ever use these output types.
  4389. * We can get here after the HDMI detect code has already set
  4390. * the type of the shared encoder. Since we can't be sure
  4391. * what's the status of the given connectors, play safe and
  4392. * run the DP detection too.
  4393. */
  4394. WARN_ON_ONCE(!HAS_DDI(dev));
  4395. case INTEL_OUTPUT_DISPLAYPORT:
  4396. case INTEL_OUTPUT_EDP:
  4397. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4398. return port_to_aux_power_domain(intel_dig_port->port);
  4399. case INTEL_OUTPUT_DP_MST:
  4400. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4401. return port_to_aux_power_domain(intel_dig_port->port);
  4402. default:
  4403. MISSING_CASE(intel_encoder->type);
  4404. return POWER_DOMAIN_AUX_A;
  4405. }
  4406. }
  4407. static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
  4408. struct intel_crtc_state *crtc_state)
  4409. {
  4410. struct drm_device *dev = crtc->dev;
  4411. struct drm_encoder *encoder;
  4412. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4413. enum pipe pipe = intel_crtc->pipe;
  4414. unsigned long mask;
  4415. enum transcoder transcoder = crtc_state->cpu_transcoder;
  4416. if (!crtc_state->base.active)
  4417. return 0;
  4418. mask = BIT(POWER_DOMAIN_PIPE(pipe));
  4419. mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
  4420. if (crtc_state->pch_pfit.enabled ||
  4421. crtc_state->pch_pfit.force_thru)
  4422. mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
  4423. drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
  4424. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  4425. mask |= BIT(intel_display_port_power_domain(intel_encoder));
  4426. }
  4427. if (crtc_state->shared_dpll)
  4428. mask |= BIT(POWER_DOMAIN_PLLS);
  4429. return mask;
  4430. }
  4431. static unsigned long
  4432. modeset_get_crtc_power_domains(struct drm_crtc *crtc,
  4433. struct intel_crtc_state *crtc_state)
  4434. {
  4435. struct drm_i915_private *dev_priv = crtc->dev->dev_private;
  4436. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4437. enum intel_display_power_domain domain;
  4438. unsigned long domains, new_domains, old_domains;
  4439. old_domains = intel_crtc->enabled_power_domains;
  4440. intel_crtc->enabled_power_domains = new_domains =
  4441. get_crtc_power_domains(crtc, crtc_state);
  4442. domains = new_domains & ~old_domains;
  4443. for_each_power_domain(domain, domains)
  4444. intel_display_power_get(dev_priv, domain);
  4445. return old_domains & ~new_domains;
  4446. }
  4447. static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
  4448. unsigned long domains)
  4449. {
  4450. enum intel_display_power_domain domain;
  4451. for_each_power_domain(domain, domains)
  4452. intel_display_power_put(dev_priv, domain);
  4453. }
  4454. static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
  4455. {
  4456. int max_cdclk_freq = dev_priv->max_cdclk_freq;
  4457. if (INTEL_INFO(dev_priv)->gen >= 9 ||
  4458. IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  4459. return max_cdclk_freq;
  4460. else if (IS_CHERRYVIEW(dev_priv))
  4461. return max_cdclk_freq*95/100;
  4462. else if (INTEL_INFO(dev_priv)->gen < 4)
  4463. return 2*max_cdclk_freq*90/100;
  4464. else
  4465. return max_cdclk_freq*90/100;
  4466. }
  4467. static void intel_update_max_cdclk(struct drm_device *dev)
  4468. {
  4469. struct drm_i915_private *dev_priv = dev->dev_private;
  4470. if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
  4471. u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
  4472. if (limit == SKL_DFSM_CDCLK_LIMIT_675)
  4473. dev_priv->max_cdclk_freq = 675000;
  4474. else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
  4475. dev_priv->max_cdclk_freq = 540000;
  4476. else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
  4477. dev_priv->max_cdclk_freq = 450000;
  4478. else
  4479. dev_priv->max_cdclk_freq = 337500;
  4480. } else if (IS_BROXTON(dev)) {
  4481. dev_priv->max_cdclk_freq = 624000;
  4482. } else if (IS_BROADWELL(dev)) {
  4483. /*
  4484. * FIXME with extra cooling we can allow
  4485. * 540 MHz for ULX and 675 Mhz for ULT.
  4486. * How can we know if extra cooling is
  4487. * available? PCI ID, VTB, something else?
  4488. */
  4489. if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  4490. dev_priv->max_cdclk_freq = 450000;
  4491. else if (IS_BDW_ULX(dev))
  4492. dev_priv->max_cdclk_freq = 450000;
  4493. else if (IS_BDW_ULT(dev))
  4494. dev_priv->max_cdclk_freq = 540000;
  4495. else
  4496. dev_priv->max_cdclk_freq = 675000;
  4497. } else if (IS_CHERRYVIEW(dev)) {
  4498. dev_priv->max_cdclk_freq = 320000;
  4499. } else if (IS_VALLEYVIEW(dev)) {
  4500. dev_priv->max_cdclk_freq = 400000;
  4501. } else {
  4502. /* otherwise assume cdclk is fixed */
  4503. dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
  4504. }
  4505. dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
  4506. DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
  4507. dev_priv->max_cdclk_freq);
  4508. DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
  4509. dev_priv->max_dotclk_freq);
  4510. }
  4511. static void intel_update_cdclk(struct drm_device *dev)
  4512. {
  4513. struct drm_i915_private *dev_priv = dev->dev_private;
  4514. dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
  4515. DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
  4516. dev_priv->cdclk_freq);
  4517. /*
  4518. * Program the gmbus_freq based on the cdclk frequency.
  4519. * BSpec erroneously claims we should aim for 4MHz, but
  4520. * in fact 1MHz is the correct frequency.
  4521. */
  4522. if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  4523. /*
  4524. * Program the gmbus_freq based on the cdclk frequency.
  4525. * BSpec erroneously claims we should aim for 4MHz, but
  4526. * in fact 1MHz is the correct frequency.
  4527. */
  4528. I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
  4529. }
  4530. if (dev_priv->max_cdclk_freq == 0)
  4531. intel_update_max_cdclk(dev);
  4532. }
  4533. static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
  4534. {
  4535. uint32_t divider;
  4536. uint32_t ratio;
  4537. uint32_t current_freq;
  4538. int ret;
  4539. /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
  4540. switch (frequency) {
  4541. case 144000:
  4542. divider = BXT_CDCLK_CD2X_DIV_SEL_4;
  4543. ratio = BXT_DE_PLL_RATIO(60);
  4544. break;
  4545. case 288000:
  4546. divider = BXT_CDCLK_CD2X_DIV_SEL_2;
  4547. ratio = BXT_DE_PLL_RATIO(60);
  4548. break;
  4549. case 384000:
  4550. divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
  4551. ratio = BXT_DE_PLL_RATIO(60);
  4552. break;
  4553. case 576000:
  4554. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  4555. ratio = BXT_DE_PLL_RATIO(60);
  4556. break;
  4557. case 624000:
  4558. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  4559. ratio = BXT_DE_PLL_RATIO(65);
  4560. break;
  4561. case 19200:
  4562. /*
  4563. * Bypass frequency with DE PLL disabled. Init ratio, divider
  4564. * to suppress GCC warning.
  4565. */
  4566. ratio = 0;
  4567. divider = 0;
  4568. break;
  4569. default:
  4570. DRM_ERROR("unsupported CDCLK freq %d", frequency);
  4571. return;
  4572. }
  4573. mutex_lock(&dev_priv->rps.hw_lock);
  4574. /* Inform power controller of upcoming frequency change */
  4575. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  4576. 0x80000000);
  4577. mutex_unlock(&dev_priv->rps.hw_lock);
  4578. if (ret) {
  4579. DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
  4580. ret, frequency);
  4581. return;
  4582. }
  4583. current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
  4584. /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
  4585. current_freq = current_freq * 500 + 1000;
  4586. /*
  4587. * DE PLL has to be disabled when
  4588. * - setting to 19.2MHz (bypass, PLL isn't used)
  4589. * - before setting to 624MHz (PLL needs toggling)
  4590. * - before setting to any frequency from 624MHz (PLL needs toggling)
  4591. */
  4592. if (frequency == 19200 || frequency == 624000 ||
  4593. current_freq == 624000) {
  4594. I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
  4595. /* Timeout 200us */
  4596. if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
  4597. 1))
  4598. DRM_ERROR("timout waiting for DE PLL unlock\n");
  4599. }
  4600. if (frequency != 19200) {
  4601. uint32_t val;
  4602. val = I915_READ(BXT_DE_PLL_CTL);
  4603. val &= ~BXT_DE_PLL_RATIO_MASK;
  4604. val |= ratio;
  4605. I915_WRITE(BXT_DE_PLL_CTL, val);
  4606. I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
  4607. /* Timeout 200us */
  4608. if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
  4609. DRM_ERROR("timeout waiting for DE PLL lock\n");
  4610. val = I915_READ(CDCLK_CTL);
  4611. val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
  4612. val |= divider;
  4613. /*
  4614. * Disable SSA Precharge when CD clock frequency < 500 MHz,
  4615. * enable otherwise.
  4616. */
  4617. val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  4618. if (frequency >= 500000)
  4619. val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  4620. val &= ~CDCLK_FREQ_DECIMAL_MASK;
  4621. /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
  4622. val |= (frequency - 1000) / 500;
  4623. I915_WRITE(CDCLK_CTL, val);
  4624. }
  4625. mutex_lock(&dev_priv->rps.hw_lock);
  4626. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  4627. DIV_ROUND_UP(frequency, 25000));
  4628. mutex_unlock(&dev_priv->rps.hw_lock);
  4629. if (ret) {
  4630. DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
  4631. ret, frequency);
  4632. return;
  4633. }
  4634. intel_update_cdclk(dev_priv->dev);
  4635. }
  4636. static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
  4637. {
  4638. if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
  4639. return false;
  4640. /* TODO: Check for a valid CDCLK rate */
  4641. if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
  4642. DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
  4643. return false;
  4644. }
  4645. if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
  4646. DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
  4647. return false;
  4648. }
  4649. return true;
  4650. }
  4651. bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
  4652. {
  4653. return broxton_cdclk_is_enabled(dev_priv);
  4654. }
  4655. void broxton_init_cdclk(struct drm_i915_private *dev_priv)
  4656. {
  4657. /* check if cd clock is enabled */
  4658. if (broxton_cdclk_is_enabled(dev_priv)) {
  4659. DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
  4660. return;
  4661. }
  4662. DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
  4663. /*
  4664. * FIXME:
  4665. * - The initial CDCLK needs to be read from VBT.
  4666. * Need to make this change after VBT has changes for BXT.
  4667. * - check if setting the max (or any) cdclk freq is really necessary
  4668. * here, it belongs to modeset time
  4669. */
  4670. broxton_set_cdclk(dev_priv, 624000);
  4671. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
  4672. POSTING_READ(DBUF_CTL);
  4673. udelay(10);
  4674. if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
  4675. DRM_ERROR("DBuf power enable timeout!\n");
  4676. }
  4677. void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
  4678. {
  4679. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
  4680. POSTING_READ(DBUF_CTL);
  4681. udelay(10);
  4682. if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
  4683. DRM_ERROR("DBuf power disable timeout!\n");
  4684. /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
  4685. broxton_set_cdclk(dev_priv, 19200);
  4686. }
  4687. static const struct skl_cdclk_entry {
  4688. unsigned int freq;
  4689. unsigned int vco;
  4690. } skl_cdclk_frequencies[] = {
  4691. { .freq = 308570, .vco = 8640 },
  4692. { .freq = 337500, .vco = 8100 },
  4693. { .freq = 432000, .vco = 8640 },
  4694. { .freq = 450000, .vco = 8100 },
  4695. { .freq = 540000, .vco = 8100 },
  4696. { .freq = 617140, .vco = 8640 },
  4697. { .freq = 675000, .vco = 8100 },
  4698. };
  4699. static unsigned int skl_cdclk_decimal(unsigned int freq)
  4700. {
  4701. return (freq - 1000) / 500;
  4702. }
  4703. static unsigned int skl_cdclk_get_vco(unsigned int freq)
  4704. {
  4705. unsigned int i;
  4706. for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
  4707. const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
  4708. if (e->freq == freq)
  4709. return e->vco;
  4710. }
  4711. return 8100;
  4712. }
  4713. static void
  4714. skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
  4715. {
  4716. unsigned int min_freq;
  4717. u32 val;
  4718. /* select the minimum CDCLK before enabling DPLL 0 */
  4719. val = I915_READ(CDCLK_CTL);
  4720. val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
  4721. val |= CDCLK_FREQ_337_308;
  4722. if (required_vco == 8640)
  4723. min_freq = 308570;
  4724. else
  4725. min_freq = 337500;
  4726. val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
  4727. I915_WRITE(CDCLK_CTL, val);
  4728. POSTING_READ(CDCLK_CTL);
  4729. /*
  4730. * We always enable DPLL0 with the lowest link rate possible, but still
  4731. * taking into account the VCO required to operate the eDP panel at the
  4732. * desired frequency. The usual DP link rates operate with a VCO of
  4733. * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
  4734. * The modeset code is responsible for the selection of the exact link
  4735. * rate later on, with the constraint of choosing a frequency that
  4736. * works with required_vco.
  4737. */
  4738. val = I915_READ(DPLL_CTRL1);
  4739. val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
  4740. DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
  4741. val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
  4742. if (required_vco == 8640)
  4743. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
  4744. SKL_DPLL0);
  4745. else
  4746. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
  4747. SKL_DPLL0);
  4748. I915_WRITE(DPLL_CTRL1, val);
  4749. POSTING_READ(DPLL_CTRL1);
  4750. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
  4751. if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
  4752. DRM_ERROR("DPLL0 not locked\n");
  4753. }
  4754. static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
  4755. {
  4756. int ret;
  4757. u32 val;
  4758. /* inform PCU we want to change CDCLK */
  4759. val = SKL_CDCLK_PREPARE_FOR_CHANGE;
  4760. mutex_lock(&dev_priv->rps.hw_lock);
  4761. ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
  4762. mutex_unlock(&dev_priv->rps.hw_lock);
  4763. return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
  4764. }
  4765. static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
  4766. {
  4767. unsigned int i;
  4768. for (i = 0; i < 15; i++) {
  4769. if (skl_cdclk_pcu_ready(dev_priv))
  4770. return true;
  4771. udelay(10);
  4772. }
  4773. return false;
  4774. }
  4775. static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
  4776. {
  4777. struct drm_device *dev = dev_priv->dev;
  4778. u32 freq_select, pcu_ack;
  4779. DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
  4780. if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
  4781. DRM_ERROR("failed to inform PCU about cdclk change\n");
  4782. return;
  4783. }
  4784. /* set CDCLK_CTL */
  4785. switch(freq) {
  4786. case 450000:
  4787. case 432000:
  4788. freq_select = CDCLK_FREQ_450_432;
  4789. pcu_ack = 1;
  4790. break;
  4791. case 540000:
  4792. freq_select = CDCLK_FREQ_540;
  4793. pcu_ack = 2;
  4794. break;
  4795. case 308570:
  4796. case 337500:
  4797. default:
  4798. freq_select = CDCLK_FREQ_337_308;
  4799. pcu_ack = 0;
  4800. break;
  4801. case 617140:
  4802. case 675000:
  4803. freq_select = CDCLK_FREQ_675_617;
  4804. pcu_ack = 3;
  4805. break;
  4806. }
  4807. I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
  4808. POSTING_READ(CDCLK_CTL);
  4809. /* inform PCU of the change */
  4810. mutex_lock(&dev_priv->rps.hw_lock);
  4811. sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
  4812. mutex_unlock(&dev_priv->rps.hw_lock);
  4813. intel_update_cdclk(dev);
  4814. }
  4815. void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
  4816. {
  4817. /* disable DBUF power */
  4818. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
  4819. POSTING_READ(DBUF_CTL);
  4820. udelay(10);
  4821. if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
  4822. DRM_ERROR("DBuf power disable timeout\n");
  4823. /* disable DPLL0 */
  4824. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
  4825. if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
  4826. DRM_ERROR("Couldn't disable DPLL0\n");
  4827. }
  4828. void skl_init_cdclk(struct drm_i915_private *dev_priv)
  4829. {
  4830. unsigned int required_vco;
  4831. /* DPLL0 not enabled (happens on early BIOS versions) */
  4832. if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
  4833. /* enable DPLL0 */
  4834. required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
  4835. skl_dpll0_enable(dev_priv, required_vco);
  4836. }
  4837. /* set CDCLK to the frequency the BIOS chose */
  4838. skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
  4839. /* enable DBUF power */
  4840. I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
  4841. POSTING_READ(DBUF_CTL);
  4842. udelay(10);
  4843. if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
  4844. DRM_ERROR("DBuf power enable timeout\n");
  4845. }
  4846. int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  4847. {
  4848. uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
  4849. uint32_t cdctl = I915_READ(CDCLK_CTL);
  4850. int freq = dev_priv->skl_boot_cdclk;
  4851. /*
  4852. * check if the pre-os intialized the display
  4853. * There is SWF18 scratchpad register defined which is set by the
  4854. * pre-os which can be used by the OS drivers to check the status
  4855. */
  4856. if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
  4857. goto sanitize;
  4858. /* Is PLL enabled and locked ? */
  4859. if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
  4860. goto sanitize;
  4861. /* DPLL okay; verify the cdclock
  4862. *
  4863. * Noticed in some instances that the freq selection is correct but
  4864. * decimal part is programmed wrong from BIOS where pre-os does not
  4865. * enable display. Verify the same as well.
  4866. */
  4867. if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
  4868. /* All well; nothing to sanitize */
  4869. return false;
  4870. sanitize:
  4871. /*
  4872. * As of now initialize with max cdclk till
  4873. * we get dynamic cdclk support
  4874. * */
  4875. dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
  4876. skl_init_cdclk(dev_priv);
  4877. /* we did have to sanitize */
  4878. return true;
  4879. }
  4880. /* Adjust CDclk dividers to allow high res or save power if possible */
  4881. static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
  4882. {
  4883. struct drm_i915_private *dev_priv = dev->dev_private;
  4884. u32 val, cmd;
  4885. WARN_ON(dev_priv->display.get_display_clock_speed(dev)
  4886. != dev_priv->cdclk_freq);
  4887. if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
  4888. cmd = 2;
  4889. else if (cdclk == 266667)
  4890. cmd = 1;
  4891. else
  4892. cmd = 0;
  4893. mutex_lock(&dev_priv->rps.hw_lock);
  4894. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  4895. val &= ~DSPFREQGUAR_MASK;
  4896. val |= (cmd << DSPFREQGUAR_SHIFT);
  4897. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  4898. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  4899. DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
  4900. 50)) {
  4901. DRM_ERROR("timed out waiting for CDclk change\n");
  4902. }
  4903. mutex_unlock(&dev_priv->rps.hw_lock);
  4904. mutex_lock(&dev_priv->sb_lock);
  4905. if (cdclk == 400000) {
  4906. u32 divider;
  4907. divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  4908. /* adjust cdclk divider */
  4909. val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
  4910. val &= ~CCK_FREQUENCY_VALUES;
  4911. val |= divider;
  4912. vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
  4913. if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
  4914. CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
  4915. 50))
  4916. DRM_ERROR("timed out waiting for CDclk change\n");
  4917. }
  4918. /* adjust self-refresh exit latency value */
  4919. val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
  4920. val &= ~0x7f;
  4921. /*
  4922. * For high bandwidth configs, we set a higher latency in the bunit
  4923. * so that the core display fetch happens in time to avoid underruns.
  4924. */
  4925. if (cdclk == 400000)
  4926. val |= 4500 / 250; /* 4.5 usec */
  4927. else
  4928. val |= 3000 / 250; /* 3.0 usec */
  4929. vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
  4930. mutex_unlock(&dev_priv->sb_lock);
  4931. intel_update_cdclk(dev);
  4932. }
  4933. static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
  4934. {
  4935. struct drm_i915_private *dev_priv = dev->dev_private;
  4936. u32 val, cmd;
  4937. WARN_ON(dev_priv->display.get_display_clock_speed(dev)
  4938. != dev_priv->cdclk_freq);
  4939. switch (cdclk) {
  4940. case 333333:
  4941. case 320000:
  4942. case 266667:
  4943. case 200000:
  4944. break;
  4945. default:
  4946. MISSING_CASE(cdclk);
  4947. return;
  4948. }
  4949. /*
  4950. * Specs are full of misinformation, but testing on actual
  4951. * hardware has shown that we just need to write the desired
  4952. * CCK divider into the Punit register.
  4953. */
  4954. cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  4955. mutex_lock(&dev_priv->rps.hw_lock);
  4956. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  4957. val &= ~DSPFREQGUAR_MASK_CHV;
  4958. val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
  4959. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  4960. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  4961. DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
  4962. 50)) {
  4963. DRM_ERROR("timed out waiting for CDclk change\n");
  4964. }
  4965. mutex_unlock(&dev_priv->rps.hw_lock);
  4966. intel_update_cdclk(dev);
  4967. }
  4968. static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
  4969. int max_pixclk)
  4970. {
  4971. int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
  4972. int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
  4973. /*
  4974. * Really only a few cases to deal with, as only 4 CDclks are supported:
  4975. * 200MHz
  4976. * 267MHz
  4977. * 320/333MHz (depends on HPLL freq)
  4978. * 400MHz (VLV only)
  4979. * So we check to see whether we're above 90% (VLV) or 95% (CHV)
  4980. * of the lower bin and adjust if needed.
  4981. *
  4982. * We seem to get an unstable or solid color picture at 200MHz.
  4983. * Not sure what's wrong. For now use 200MHz only when all pipes
  4984. * are off.
  4985. */
  4986. if (!IS_CHERRYVIEW(dev_priv) &&
  4987. max_pixclk > freq_320*limit/100)
  4988. return 400000;
  4989. else if (max_pixclk > 266667*limit/100)
  4990. return freq_320;
  4991. else if (max_pixclk > 0)
  4992. return 266667;
  4993. else
  4994. return 200000;
  4995. }
  4996. static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
  4997. int max_pixclk)
  4998. {
  4999. /*
  5000. * FIXME:
  5001. * - remove the guardband, it's not needed on BXT
  5002. * - set 19.2MHz bypass frequency if there are no active pipes
  5003. */
  5004. if (max_pixclk > 576000*9/10)
  5005. return 624000;
  5006. else if (max_pixclk > 384000*9/10)
  5007. return 576000;
  5008. else if (max_pixclk > 288000*9/10)
  5009. return 384000;
  5010. else if (max_pixclk > 144000*9/10)
  5011. return 288000;
  5012. else
  5013. return 144000;
  5014. }
  5015. /* Compute the max pixel clock for new configuration. */
  5016. static int intel_mode_max_pixclk(struct drm_device *dev,
  5017. struct drm_atomic_state *state)
  5018. {
  5019. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  5020. struct drm_i915_private *dev_priv = dev->dev_private;
  5021. struct drm_crtc *crtc;
  5022. struct drm_crtc_state *crtc_state;
  5023. unsigned max_pixclk = 0, i;
  5024. enum pipe pipe;
  5025. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  5026. sizeof(intel_state->min_pixclk));
  5027. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  5028. int pixclk = 0;
  5029. if (crtc_state->enable)
  5030. pixclk = crtc_state->adjusted_mode.crtc_clock;
  5031. intel_state->min_pixclk[i] = pixclk;
  5032. }
  5033. for_each_pipe(dev_priv, pipe)
  5034. max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
  5035. return max_pixclk;
  5036. }
  5037. static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
  5038. {
  5039. struct drm_device *dev = state->dev;
  5040. struct drm_i915_private *dev_priv = dev->dev_private;
  5041. int max_pixclk = intel_mode_max_pixclk(dev, state);
  5042. struct intel_atomic_state *intel_state =
  5043. to_intel_atomic_state(state);
  5044. if (max_pixclk < 0)
  5045. return max_pixclk;
  5046. intel_state->cdclk = intel_state->dev_cdclk =
  5047. valleyview_calc_cdclk(dev_priv, max_pixclk);
  5048. if (!intel_state->active_crtcs)
  5049. intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
  5050. return 0;
  5051. }
  5052. static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
  5053. {
  5054. struct drm_device *dev = state->dev;
  5055. struct drm_i915_private *dev_priv = dev->dev_private;
  5056. int max_pixclk = intel_mode_max_pixclk(dev, state);
  5057. struct intel_atomic_state *intel_state =
  5058. to_intel_atomic_state(state);
  5059. if (max_pixclk < 0)
  5060. return max_pixclk;
  5061. intel_state->cdclk = intel_state->dev_cdclk =
  5062. broxton_calc_cdclk(dev_priv, max_pixclk);
  5063. if (!intel_state->active_crtcs)
  5064. intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
  5065. return 0;
  5066. }
  5067. static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
  5068. {
  5069. unsigned int credits, default_credits;
  5070. if (IS_CHERRYVIEW(dev_priv))
  5071. default_credits = PFI_CREDIT(12);
  5072. else
  5073. default_credits = PFI_CREDIT(8);
  5074. if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
  5075. /* CHV suggested value is 31 or 63 */
  5076. if (IS_CHERRYVIEW(dev_priv))
  5077. credits = PFI_CREDIT_63;
  5078. else
  5079. credits = PFI_CREDIT(15);
  5080. } else {
  5081. credits = default_credits;
  5082. }
  5083. /*
  5084. * WA - write default credits before re-programming
  5085. * FIXME: should we also set the resend bit here?
  5086. */
  5087. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5088. default_credits);
  5089. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5090. credits | PFI_CREDIT_RESEND);
  5091. /*
  5092. * FIXME is this guaranteed to clear
  5093. * immediately or should we poll for it?
  5094. */
  5095. WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
  5096. }
  5097. static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  5098. {
  5099. struct drm_device *dev = old_state->dev;
  5100. struct drm_i915_private *dev_priv = dev->dev_private;
  5101. struct intel_atomic_state *old_intel_state =
  5102. to_intel_atomic_state(old_state);
  5103. unsigned req_cdclk = old_intel_state->dev_cdclk;
  5104. /*
  5105. * FIXME: We can end up here with all power domains off, yet
  5106. * with a CDCLK frequency other than the minimum. To account
  5107. * for this take the PIPE-A power domain, which covers the HW
  5108. * blocks needed for the following programming. This can be
  5109. * removed once it's guaranteed that we get here either with
  5110. * the minimum CDCLK set, or the required power domains
  5111. * enabled.
  5112. */
  5113. intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
  5114. if (IS_CHERRYVIEW(dev))
  5115. cherryview_set_cdclk(dev, req_cdclk);
  5116. else
  5117. valleyview_set_cdclk(dev, req_cdclk);
  5118. vlv_program_pfi_credits(dev_priv);
  5119. intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
  5120. }
  5121. static void valleyview_crtc_enable(struct drm_crtc *crtc)
  5122. {
  5123. struct drm_device *dev = crtc->dev;
  5124. struct drm_i915_private *dev_priv = to_i915(dev);
  5125. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5126. struct intel_encoder *encoder;
  5127. struct intel_crtc_state *pipe_config =
  5128. to_intel_crtc_state(crtc->state);
  5129. int pipe = intel_crtc->pipe;
  5130. if (WARN_ON(intel_crtc->active))
  5131. return;
  5132. if (intel_crtc->config->has_dp_encoder)
  5133. intel_dp_set_m_n(intel_crtc, M1_N1);
  5134. intel_set_pipe_timings(intel_crtc);
  5135. intel_set_pipe_src_size(intel_crtc);
  5136. if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
  5137. struct drm_i915_private *dev_priv = dev->dev_private;
  5138. I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
  5139. I915_WRITE(CHV_CANVAS(pipe), 0);
  5140. }
  5141. i9xx_set_pipeconf(intel_crtc);
  5142. intel_crtc->active = true;
  5143. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5144. for_each_encoder_on_crtc(dev, crtc, encoder)
  5145. if (encoder->pre_pll_enable)
  5146. encoder->pre_pll_enable(encoder);
  5147. if (IS_CHERRYVIEW(dev)) {
  5148. chv_prepare_pll(intel_crtc, intel_crtc->config);
  5149. chv_enable_pll(intel_crtc, intel_crtc->config);
  5150. } else {
  5151. vlv_prepare_pll(intel_crtc, intel_crtc->config);
  5152. vlv_enable_pll(intel_crtc, intel_crtc->config);
  5153. }
  5154. for_each_encoder_on_crtc(dev, crtc, encoder)
  5155. if (encoder->pre_enable)
  5156. encoder->pre_enable(encoder);
  5157. i9xx_pfit_enable(intel_crtc);
  5158. intel_color_load_luts(&pipe_config->base);
  5159. intel_update_watermarks(crtc);
  5160. intel_enable_pipe(intel_crtc);
  5161. assert_vblank_disabled(crtc);
  5162. drm_crtc_vblank_on(crtc);
  5163. for_each_encoder_on_crtc(dev, crtc, encoder)
  5164. encoder->enable(encoder);
  5165. }
  5166. static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
  5167. {
  5168. struct drm_device *dev = crtc->base.dev;
  5169. struct drm_i915_private *dev_priv = dev->dev_private;
  5170. I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
  5171. I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
  5172. }
  5173. static void i9xx_crtc_enable(struct drm_crtc *crtc)
  5174. {
  5175. struct drm_device *dev = crtc->dev;
  5176. struct drm_i915_private *dev_priv = to_i915(dev);
  5177. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5178. struct intel_encoder *encoder;
  5179. struct intel_crtc_state *pipe_config =
  5180. to_intel_crtc_state(crtc->state);
  5181. enum pipe pipe = intel_crtc->pipe;
  5182. if (WARN_ON(intel_crtc->active))
  5183. return;
  5184. i9xx_set_pll_dividers(intel_crtc);
  5185. if (intel_crtc->config->has_dp_encoder)
  5186. intel_dp_set_m_n(intel_crtc, M1_N1);
  5187. intel_set_pipe_timings(intel_crtc);
  5188. intel_set_pipe_src_size(intel_crtc);
  5189. i9xx_set_pipeconf(intel_crtc);
  5190. intel_crtc->active = true;
  5191. if (!IS_GEN2(dev))
  5192. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5193. for_each_encoder_on_crtc(dev, crtc, encoder)
  5194. if (encoder->pre_enable)
  5195. encoder->pre_enable(encoder);
  5196. i9xx_enable_pll(intel_crtc);
  5197. i9xx_pfit_enable(intel_crtc);
  5198. intel_color_load_luts(&pipe_config->base);
  5199. intel_update_watermarks(crtc);
  5200. intel_enable_pipe(intel_crtc);
  5201. assert_vblank_disabled(crtc);
  5202. drm_crtc_vblank_on(crtc);
  5203. for_each_encoder_on_crtc(dev, crtc, encoder)
  5204. encoder->enable(encoder);
  5205. }
  5206. static void i9xx_pfit_disable(struct intel_crtc *crtc)
  5207. {
  5208. struct drm_device *dev = crtc->base.dev;
  5209. struct drm_i915_private *dev_priv = dev->dev_private;
  5210. if (!crtc->config->gmch_pfit.control)
  5211. return;
  5212. assert_pipe_disabled(dev_priv, crtc->pipe);
  5213. DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
  5214. I915_READ(PFIT_CONTROL));
  5215. I915_WRITE(PFIT_CONTROL, 0);
  5216. }
  5217. static void i9xx_crtc_disable(struct drm_crtc *crtc)
  5218. {
  5219. struct drm_device *dev = crtc->dev;
  5220. struct drm_i915_private *dev_priv = dev->dev_private;
  5221. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5222. struct intel_encoder *encoder;
  5223. int pipe = intel_crtc->pipe;
  5224. /*
  5225. * On gen2 planes are double buffered but the pipe isn't, so we must
  5226. * wait for planes to fully turn off before disabling the pipe.
  5227. */
  5228. if (IS_GEN2(dev))
  5229. intel_wait_for_vblank(dev, pipe);
  5230. for_each_encoder_on_crtc(dev, crtc, encoder)
  5231. encoder->disable(encoder);
  5232. drm_crtc_vblank_off(crtc);
  5233. assert_vblank_disabled(crtc);
  5234. intel_disable_pipe(intel_crtc);
  5235. i9xx_pfit_disable(intel_crtc);
  5236. for_each_encoder_on_crtc(dev, crtc, encoder)
  5237. if (encoder->post_disable)
  5238. encoder->post_disable(encoder);
  5239. if (!intel_crtc->config->has_dsi_encoder) {
  5240. if (IS_CHERRYVIEW(dev))
  5241. chv_disable_pll(dev_priv, pipe);
  5242. else if (IS_VALLEYVIEW(dev))
  5243. vlv_disable_pll(dev_priv, pipe);
  5244. else
  5245. i9xx_disable_pll(intel_crtc);
  5246. }
  5247. for_each_encoder_on_crtc(dev, crtc, encoder)
  5248. if (encoder->post_pll_disable)
  5249. encoder->post_pll_disable(encoder);
  5250. if (!IS_GEN2(dev))
  5251. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  5252. }
  5253. static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
  5254. {
  5255. struct intel_encoder *encoder;
  5256. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5257. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  5258. enum intel_display_power_domain domain;
  5259. unsigned long domains;
  5260. if (!intel_crtc->active)
  5261. return;
  5262. if (to_intel_plane_state(crtc->primary->state)->visible) {
  5263. WARN_ON(intel_crtc->unpin_work);
  5264. intel_pre_disable_primary_noatomic(crtc);
  5265. intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
  5266. to_intel_plane_state(crtc->primary->state)->visible = false;
  5267. }
  5268. dev_priv->display.crtc_disable(crtc);
  5269. DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
  5270. crtc->base.id);
  5271. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
  5272. crtc->state->active = false;
  5273. intel_crtc->active = false;
  5274. crtc->enabled = false;
  5275. crtc->state->connector_mask = 0;
  5276. crtc->state->encoder_mask = 0;
  5277. for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
  5278. encoder->base.crtc = NULL;
  5279. intel_fbc_disable(intel_crtc);
  5280. intel_update_watermarks(crtc);
  5281. intel_disable_shared_dpll(intel_crtc);
  5282. domains = intel_crtc->enabled_power_domains;
  5283. for_each_power_domain(domain, domains)
  5284. intel_display_power_put(dev_priv, domain);
  5285. intel_crtc->enabled_power_domains = 0;
  5286. dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
  5287. dev_priv->min_pixclk[intel_crtc->pipe] = 0;
  5288. }
  5289. /*
  5290. * turn all crtc's off, but do not adjust state
  5291. * This has to be paired with a call to intel_modeset_setup_hw_state.
  5292. */
  5293. int intel_display_suspend(struct drm_device *dev)
  5294. {
  5295. struct drm_i915_private *dev_priv = to_i915(dev);
  5296. struct drm_atomic_state *state;
  5297. int ret;
  5298. state = drm_atomic_helper_suspend(dev);
  5299. ret = PTR_ERR_OR_ZERO(state);
  5300. if (ret)
  5301. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  5302. else
  5303. dev_priv->modeset_restore_state = state;
  5304. return ret;
  5305. }
  5306. void intel_encoder_destroy(struct drm_encoder *encoder)
  5307. {
  5308. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  5309. drm_encoder_cleanup(encoder);
  5310. kfree(intel_encoder);
  5311. }
  5312. /* Cross check the actual hw state with our own modeset state tracking (and it's
  5313. * internal consistency). */
  5314. static void intel_connector_verify_state(struct intel_connector *connector)
  5315. {
  5316. struct drm_crtc *crtc = connector->base.state->crtc;
  5317. DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  5318. connector->base.base.id,
  5319. connector->base.name);
  5320. if (connector->get_hw_state(connector)) {
  5321. struct intel_encoder *encoder = connector->encoder;
  5322. struct drm_connector_state *conn_state = connector->base.state;
  5323. I915_STATE_WARN(!crtc,
  5324. "connector enabled without attached crtc\n");
  5325. if (!crtc)
  5326. return;
  5327. I915_STATE_WARN(!crtc->state->active,
  5328. "connector is active, but attached crtc isn't\n");
  5329. if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
  5330. return;
  5331. I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
  5332. "atomic encoder doesn't match attached encoder\n");
  5333. I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
  5334. "attached encoder crtc differs from connector crtc\n");
  5335. } else {
  5336. I915_STATE_WARN(crtc && crtc->state->active,
  5337. "attached crtc is active, but connector isn't\n");
  5338. I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
  5339. "best encoder set without crtc!\n");
  5340. }
  5341. }
  5342. int intel_connector_init(struct intel_connector *connector)
  5343. {
  5344. drm_atomic_helper_connector_reset(&connector->base);
  5345. if (!connector->base.state)
  5346. return -ENOMEM;
  5347. return 0;
  5348. }
  5349. struct intel_connector *intel_connector_alloc(void)
  5350. {
  5351. struct intel_connector *connector;
  5352. connector = kzalloc(sizeof *connector, GFP_KERNEL);
  5353. if (!connector)
  5354. return NULL;
  5355. if (intel_connector_init(connector) < 0) {
  5356. kfree(connector);
  5357. return NULL;
  5358. }
  5359. return connector;
  5360. }
  5361. /* Simple connector->get_hw_state implementation for encoders that support only
  5362. * one connector and no cloning and hence the encoder state determines the state
  5363. * of the connector. */
  5364. bool intel_connector_get_hw_state(struct intel_connector *connector)
  5365. {
  5366. enum pipe pipe = 0;
  5367. struct intel_encoder *encoder = connector->encoder;
  5368. return encoder->get_hw_state(encoder, &pipe);
  5369. }
  5370. static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  5371. {
  5372. if (crtc_state->base.enable && crtc_state->has_pch_encoder)
  5373. return crtc_state->fdi_lanes;
  5374. return 0;
  5375. }
  5376. static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  5377. struct intel_crtc_state *pipe_config)
  5378. {
  5379. struct drm_atomic_state *state = pipe_config->base.state;
  5380. struct intel_crtc *other_crtc;
  5381. struct intel_crtc_state *other_crtc_state;
  5382. DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
  5383. pipe_name(pipe), pipe_config->fdi_lanes);
  5384. if (pipe_config->fdi_lanes > 4) {
  5385. DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
  5386. pipe_name(pipe), pipe_config->fdi_lanes);
  5387. return -EINVAL;
  5388. }
  5389. if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  5390. if (pipe_config->fdi_lanes > 2) {
  5391. DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
  5392. pipe_config->fdi_lanes);
  5393. return -EINVAL;
  5394. } else {
  5395. return 0;
  5396. }
  5397. }
  5398. if (INTEL_INFO(dev)->num_pipes == 2)
  5399. return 0;
  5400. /* Ivybridge 3 pipe is really complicated */
  5401. switch (pipe) {
  5402. case PIPE_A:
  5403. return 0;
  5404. case PIPE_B:
  5405. if (pipe_config->fdi_lanes <= 2)
  5406. return 0;
  5407. other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
  5408. other_crtc_state =
  5409. intel_atomic_get_crtc_state(state, other_crtc);
  5410. if (IS_ERR(other_crtc_state))
  5411. return PTR_ERR(other_crtc_state);
  5412. if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
  5413. DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
  5414. pipe_name(pipe), pipe_config->fdi_lanes);
  5415. return -EINVAL;
  5416. }
  5417. return 0;
  5418. case PIPE_C:
  5419. if (pipe_config->fdi_lanes > 2) {
  5420. DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
  5421. pipe_name(pipe), pipe_config->fdi_lanes);
  5422. return -EINVAL;
  5423. }
  5424. other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
  5425. other_crtc_state =
  5426. intel_atomic_get_crtc_state(state, other_crtc);
  5427. if (IS_ERR(other_crtc_state))
  5428. return PTR_ERR(other_crtc_state);
  5429. if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
  5430. DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
  5431. return -EINVAL;
  5432. }
  5433. return 0;
  5434. default:
  5435. BUG();
  5436. }
  5437. }
  5438. #define RETRY 1
  5439. static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
  5440. struct intel_crtc_state *pipe_config)
  5441. {
  5442. struct drm_device *dev = intel_crtc->base.dev;
  5443. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  5444. int lane, link_bw, fdi_dotclock, ret;
  5445. bool needs_recompute = false;
  5446. retry:
  5447. /* FDI is a binary signal running at ~2.7GHz, encoding
  5448. * each output octet as 10 bits. The actual frequency
  5449. * is stored as a divider into a 100MHz clock, and the
  5450. * mode pixel clock is stored in units of 1KHz.
  5451. * Hence the bw of each lane in terms of the mode signal
  5452. * is:
  5453. */
  5454. link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
  5455. fdi_dotclock = adjusted_mode->crtc_clock;
  5456. lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
  5457. pipe_config->pipe_bpp);
  5458. pipe_config->fdi_lanes = lane;
  5459. intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
  5460. link_bw, &pipe_config->fdi_m_n);
  5461. ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
  5462. if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
  5463. pipe_config->pipe_bpp -= 2*3;
  5464. DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
  5465. pipe_config->pipe_bpp);
  5466. needs_recompute = true;
  5467. pipe_config->bw_constrained = true;
  5468. goto retry;
  5469. }
  5470. if (needs_recompute)
  5471. return RETRY;
  5472. return ret;
  5473. }
  5474. static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
  5475. struct intel_crtc_state *pipe_config)
  5476. {
  5477. if (pipe_config->pipe_bpp > 24)
  5478. return false;
  5479. /* HSW can handle pixel rate up to cdclk? */
  5480. if (IS_HASWELL(dev_priv))
  5481. return true;
  5482. /*
  5483. * We compare against max which means we must take
  5484. * the increased cdclk requirement into account when
  5485. * calculating the new cdclk.
  5486. *
  5487. * Should measure whether using a lower cdclk w/o IPS
  5488. */
  5489. return ilk_pipe_pixel_rate(pipe_config) <=
  5490. dev_priv->max_cdclk_freq * 95 / 100;
  5491. }
  5492. static void hsw_compute_ips_config(struct intel_crtc *crtc,
  5493. struct intel_crtc_state *pipe_config)
  5494. {
  5495. struct drm_device *dev = crtc->base.dev;
  5496. struct drm_i915_private *dev_priv = dev->dev_private;
  5497. pipe_config->ips_enabled = i915.enable_ips &&
  5498. hsw_crtc_supports_ips(crtc) &&
  5499. pipe_config_supports_ips(dev_priv, pipe_config);
  5500. }
  5501. static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
  5502. {
  5503. const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  5504. /* GDG double wide on either pipe, otherwise pipe A only */
  5505. return INTEL_INFO(dev_priv)->gen < 4 &&
  5506. (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
  5507. }
  5508. static int intel_crtc_compute_config(struct intel_crtc *crtc,
  5509. struct intel_crtc_state *pipe_config)
  5510. {
  5511. struct drm_device *dev = crtc->base.dev;
  5512. struct drm_i915_private *dev_priv = dev->dev_private;
  5513. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  5514. /* FIXME should check pixel clock limits on all platforms */
  5515. if (INTEL_INFO(dev)->gen < 4) {
  5516. int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
  5517. /*
  5518. * Enable double wide mode when the dot clock
  5519. * is > 90% of the (display) core speed.
  5520. */
  5521. if (intel_crtc_supports_double_wide(crtc) &&
  5522. adjusted_mode->crtc_clock > clock_limit) {
  5523. clock_limit *= 2;
  5524. pipe_config->double_wide = true;
  5525. }
  5526. if (adjusted_mode->crtc_clock > clock_limit) {
  5527. DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
  5528. adjusted_mode->crtc_clock, clock_limit,
  5529. yesno(pipe_config->double_wide));
  5530. return -EINVAL;
  5531. }
  5532. }
  5533. /*
  5534. * Pipe horizontal size must be even in:
  5535. * - DVO ganged mode
  5536. * - LVDS dual channel mode
  5537. * - Double wide pipe
  5538. */
  5539. if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
  5540. intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
  5541. pipe_config->pipe_src_w &= ~1;
  5542. /* Cantiga+ cannot handle modes with a hsync front porch of 0.
  5543. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
  5544. */
  5545. if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
  5546. adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
  5547. return -EINVAL;
  5548. if (HAS_IPS(dev))
  5549. hsw_compute_ips_config(crtc, pipe_config);
  5550. if (pipe_config->has_pch_encoder)
  5551. return ironlake_fdi_compute_config(crtc, pipe_config);
  5552. return 0;
  5553. }
  5554. static int skylake_get_display_clock_speed(struct drm_device *dev)
  5555. {
  5556. struct drm_i915_private *dev_priv = to_i915(dev);
  5557. uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
  5558. uint32_t cdctl = I915_READ(CDCLK_CTL);
  5559. uint32_t linkrate;
  5560. if (!(lcpll1 & LCPLL_PLL_ENABLE))
  5561. return 24000; /* 24MHz is the cd freq with NSSC ref */
  5562. if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
  5563. return 540000;
  5564. linkrate = (I915_READ(DPLL_CTRL1) &
  5565. DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
  5566. if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
  5567. linkrate == DPLL_CTRL1_LINK_RATE_1080) {
  5568. /* vco 8640 */
  5569. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  5570. case CDCLK_FREQ_450_432:
  5571. return 432000;
  5572. case CDCLK_FREQ_337_308:
  5573. return 308570;
  5574. case CDCLK_FREQ_675_617:
  5575. return 617140;
  5576. default:
  5577. WARN(1, "Unknown cd freq selection\n");
  5578. }
  5579. } else {
  5580. /* vco 8100 */
  5581. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  5582. case CDCLK_FREQ_450_432:
  5583. return 450000;
  5584. case CDCLK_FREQ_337_308:
  5585. return 337500;
  5586. case CDCLK_FREQ_675_617:
  5587. return 675000;
  5588. default:
  5589. WARN(1, "Unknown cd freq selection\n");
  5590. }
  5591. }
  5592. /* error case, do as if DPLL0 isn't enabled */
  5593. return 24000;
  5594. }
  5595. static int broxton_get_display_clock_speed(struct drm_device *dev)
  5596. {
  5597. struct drm_i915_private *dev_priv = to_i915(dev);
  5598. uint32_t cdctl = I915_READ(CDCLK_CTL);
  5599. uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
  5600. uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
  5601. int cdclk;
  5602. if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
  5603. return 19200;
  5604. cdclk = 19200 * pll_ratio / 2;
  5605. switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
  5606. case BXT_CDCLK_CD2X_DIV_SEL_1:
  5607. return cdclk; /* 576MHz or 624MHz */
  5608. case BXT_CDCLK_CD2X_DIV_SEL_1_5:
  5609. return cdclk * 2 / 3; /* 384MHz */
  5610. case BXT_CDCLK_CD2X_DIV_SEL_2:
  5611. return cdclk / 2; /* 288MHz */
  5612. case BXT_CDCLK_CD2X_DIV_SEL_4:
  5613. return cdclk / 4; /* 144MHz */
  5614. }
  5615. /* error case, do as if DE PLL isn't enabled */
  5616. return 19200;
  5617. }
  5618. static int broadwell_get_display_clock_speed(struct drm_device *dev)
  5619. {
  5620. struct drm_i915_private *dev_priv = dev->dev_private;
  5621. uint32_t lcpll = I915_READ(LCPLL_CTL);
  5622. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  5623. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  5624. return 800000;
  5625. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  5626. return 450000;
  5627. else if (freq == LCPLL_CLK_FREQ_450)
  5628. return 450000;
  5629. else if (freq == LCPLL_CLK_FREQ_54O_BDW)
  5630. return 540000;
  5631. else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
  5632. return 337500;
  5633. else
  5634. return 675000;
  5635. }
  5636. static int haswell_get_display_clock_speed(struct drm_device *dev)
  5637. {
  5638. struct drm_i915_private *dev_priv = dev->dev_private;
  5639. uint32_t lcpll = I915_READ(LCPLL_CTL);
  5640. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  5641. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  5642. return 800000;
  5643. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  5644. return 450000;
  5645. else if (freq == LCPLL_CLK_FREQ_450)
  5646. return 450000;
  5647. else if (IS_HSW_ULT(dev))
  5648. return 337500;
  5649. else
  5650. return 540000;
  5651. }
  5652. static int valleyview_get_display_clock_speed(struct drm_device *dev)
  5653. {
  5654. return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
  5655. CCK_DISPLAY_CLOCK_CONTROL);
  5656. }
  5657. static int ilk_get_display_clock_speed(struct drm_device *dev)
  5658. {
  5659. return 450000;
  5660. }
  5661. static int i945_get_display_clock_speed(struct drm_device *dev)
  5662. {
  5663. return 400000;
  5664. }
  5665. static int i915_get_display_clock_speed(struct drm_device *dev)
  5666. {
  5667. return 333333;
  5668. }
  5669. static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
  5670. {
  5671. return 200000;
  5672. }
  5673. static int pnv_get_display_clock_speed(struct drm_device *dev)
  5674. {
  5675. u16 gcfgc = 0;
  5676. pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
  5677. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  5678. case GC_DISPLAY_CLOCK_267_MHZ_PNV:
  5679. return 266667;
  5680. case GC_DISPLAY_CLOCK_333_MHZ_PNV:
  5681. return 333333;
  5682. case GC_DISPLAY_CLOCK_444_MHZ_PNV:
  5683. return 444444;
  5684. case GC_DISPLAY_CLOCK_200_MHZ_PNV:
  5685. return 200000;
  5686. default:
  5687. DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
  5688. case GC_DISPLAY_CLOCK_133_MHZ_PNV:
  5689. return 133333;
  5690. case GC_DISPLAY_CLOCK_167_MHZ_PNV:
  5691. return 166667;
  5692. }
  5693. }
  5694. static int i915gm_get_display_clock_speed(struct drm_device *dev)
  5695. {
  5696. u16 gcfgc = 0;
  5697. pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
  5698. if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
  5699. return 133333;
  5700. else {
  5701. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  5702. case GC_DISPLAY_CLOCK_333_MHZ:
  5703. return 333333;
  5704. default:
  5705. case GC_DISPLAY_CLOCK_190_200_MHZ:
  5706. return 190000;
  5707. }
  5708. }
  5709. }
  5710. static int i865_get_display_clock_speed(struct drm_device *dev)
  5711. {
  5712. return 266667;
  5713. }
  5714. static int i85x_get_display_clock_speed(struct drm_device *dev)
  5715. {
  5716. u16 hpllcc = 0;
  5717. /*
  5718. * 852GM/852GMV only supports 133 MHz and the HPLLCC
  5719. * encoding is different :(
  5720. * FIXME is this the right way to detect 852GM/852GMV?
  5721. */
  5722. if (dev->pdev->revision == 0x1)
  5723. return 133333;
  5724. pci_bus_read_config_word(dev->pdev->bus,
  5725. PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
  5726. /* Assume that the hardware is in the high speed state. This
  5727. * should be the default.
  5728. */
  5729. switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
  5730. case GC_CLOCK_133_200:
  5731. case GC_CLOCK_133_200_2:
  5732. case GC_CLOCK_100_200:
  5733. return 200000;
  5734. case GC_CLOCK_166_250:
  5735. return 250000;
  5736. case GC_CLOCK_100_133:
  5737. return 133333;
  5738. case GC_CLOCK_133_266:
  5739. case GC_CLOCK_133_266_2:
  5740. case GC_CLOCK_166_266:
  5741. return 266667;
  5742. }
  5743. /* Shouldn't happen */
  5744. return 0;
  5745. }
  5746. static int i830_get_display_clock_speed(struct drm_device *dev)
  5747. {
  5748. return 133333;
  5749. }
  5750. static unsigned int intel_hpll_vco(struct drm_device *dev)
  5751. {
  5752. struct drm_i915_private *dev_priv = dev->dev_private;
  5753. static const unsigned int blb_vco[8] = {
  5754. [0] = 3200000,
  5755. [1] = 4000000,
  5756. [2] = 5333333,
  5757. [3] = 4800000,
  5758. [4] = 6400000,
  5759. };
  5760. static const unsigned int pnv_vco[8] = {
  5761. [0] = 3200000,
  5762. [1] = 4000000,
  5763. [2] = 5333333,
  5764. [3] = 4800000,
  5765. [4] = 2666667,
  5766. };
  5767. static const unsigned int cl_vco[8] = {
  5768. [0] = 3200000,
  5769. [1] = 4000000,
  5770. [2] = 5333333,
  5771. [3] = 6400000,
  5772. [4] = 3333333,
  5773. [5] = 3566667,
  5774. [6] = 4266667,
  5775. };
  5776. static const unsigned int elk_vco[8] = {
  5777. [0] = 3200000,
  5778. [1] = 4000000,
  5779. [2] = 5333333,
  5780. [3] = 4800000,
  5781. };
  5782. static const unsigned int ctg_vco[8] = {
  5783. [0] = 3200000,
  5784. [1] = 4000000,
  5785. [2] = 5333333,
  5786. [3] = 6400000,
  5787. [4] = 2666667,
  5788. [5] = 4266667,
  5789. };
  5790. const unsigned int *vco_table;
  5791. unsigned int vco;
  5792. uint8_t tmp = 0;
  5793. /* FIXME other chipsets? */
  5794. if (IS_GM45(dev))
  5795. vco_table = ctg_vco;
  5796. else if (IS_G4X(dev))
  5797. vco_table = elk_vco;
  5798. else if (IS_CRESTLINE(dev))
  5799. vco_table = cl_vco;
  5800. else if (IS_PINEVIEW(dev))
  5801. vco_table = pnv_vco;
  5802. else if (IS_G33(dev))
  5803. vco_table = blb_vco;
  5804. else
  5805. return 0;
  5806. tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
  5807. vco = vco_table[tmp & 0x7];
  5808. if (vco == 0)
  5809. DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
  5810. else
  5811. DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
  5812. return vco;
  5813. }
  5814. static int gm45_get_display_clock_speed(struct drm_device *dev)
  5815. {
  5816. unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
  5817. uint16_t tmp = 0;
  5818. pci_read_config_word(dev->pdev, GCFGC, &tmp);
  5819. cdclk_sel = (tmp >> 12) & 0x1;
  5820. switch (vco) {
  5821. case 2666667:
  5822. case 4000000:
  5823. case 5333333:
  5824. return cdclk_sel ? 333333 : 222222;
  5825. case 3200000:
  5826. return cdclk_sel ? 320000 : 228571;
  5827. default:
  5828. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
  5829. return 222222;
  5830. }
  5831. }
  5832. static int i965gm_get_display_clock_speed(struct drm_device *dev)
  5833. {
  5834. static const uint8_t div_3200[] = { 16, 10, 8 };
  5835. static const uint8_t div_4000[] = { 20, 12, 10 };
  5836. static const uint8_t div_5333[] = { 24, 16, 14 };
  5837. const uint8_t *div_table;
  5838. unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
  5839. uint16_t tmp = 0;
  5840. pci_read_config_word(dev->pdev, GCFGC, &tmp);
  5841. cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
  5842. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  5843. goto fail;
  5844. switch (vco) {
  5845. case 3200000:
  5846. div_table = div_3200;
  5847. break;
  5848. case 4000000:
  5849. div_table = div_4000;
  5850. break;
  5851. case 5333333:
  5852. div_table = div_5333;
  5853. break;
  5854. default:
  5855. goto fail;
  5856. }
  5857. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  5858. fail:
  5859. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
  5860. return 200000;
  5861. }
  5862. static int g33_get_display_clock_speed(struct drm_device *dev)
  5863. {
  5864. static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
  5865. static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
  5866. static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
  5867. static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
  5868. const uint8_t *div_table;
  5869. unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
  5870. uint16_t tmp = 0;
  5871. pci_read_config_word(dev->pdev, GCFGC, &tmp);
  5872. cdclk_sel = (tmp >> 4) & 0x7;
  5873. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  5874. goto fail;
  5875. switch (vco) {
  5876. case 3200000:
  5877. div_table = div_3200;
  5878. break;
  5879. case 4000000:
  5880. div_table = div_4000;
  5881. break;
  5882. case 4800000:
  5883. div_table = div_4800;
  5884. break;
  5885. case 5333333:
  5886. div_table = div_5333;
  5887. break;
  5888. default:
  5889. goto fail;
  5890. }
  5891. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  5892. fail:
  5893. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
  5894. return 190476;
  5895. }
  5896. static void
  5897. intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
  5898. {
  5899. while (*num > DATA_LINK_M_N_MASK ||
  5900. *den > DATA_LINK_M_N_MASK) {
  5901. *num >>= 1;
  5902. *den >>= 1;
  5903. }
  5904. }
  5905. static void compute_m_n(unsigned int m, unsigned int n,
  5906. uint32_t *ret_m, uint32_t *ret_n)
  5907. {
  5908. *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
  5909. *ret_m = div_u64((uint64_t) m * *ret_n, n);
  5910. intel_reduce_m_n_ratio(ret_m, ret_n);
  5911. }
  5912. void
  5913. intel_link_compute_m_n(int bits_per_pixel, int nlanes,
  5914. int pixel_clock, int link_clock,
  5915. struct intel_link_m_n *m_n)
  5916. {
  5917. m_n->tu = 64;
  5918. compute_m_n(bits_per_pixel * pixel_clock,
  5919. link_clock * nlanes * 8,
  5920. &m_n->gmch_m, &m_n->gmch_n);
  5921. compute_m_n(pixel_clock, link_clock,
  5922. &m_n->link_m, &m_n->link_n);
  5923. }
  5924. static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  5925. {
  5926. if (i915.panel_use_ssc >= 0)
  5927. return i915.panel_use_ssc != 0;
  5928. return dev_priv->vbt.lvds_use_ssc
  5929. && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  5930. }
  5931. static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
  5932. {
  5933. return (1 << dpll->n) << 16 | dpll->m2;
  5934. }
  5935. static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
  5936. {
  5937. return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
  5938. }
  5939. static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
  5940. struct intel_crtc_state *crtc_state,
  5941. intel_clock_t *reduced_clock)
  5942. {
  5943. struct drm_device *dev = crtc->base.dev;
  5944. u32 fp, fp2 = 0;
  5945. if (IS_PINEVIEW(dev)) {
  5946. fp = pnv_dpll_compute_fp(&crtc_state->dpll);
  5947. if (reduced_clock)
  5948. fp2 = pnv_dpll_compute_fp(reduced_clock);
  5949. } else {
  5950. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  5951. if (reduced_clock)
  5952. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  5953. }
  5954. crtc_state->dpll_hw_state.fp0 = fp;
  5955. crtc->lowfreq_avail = false;
  5956. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  5957. reduced_clock) {
  5958. crtc_state->dpll_hw_state.fp1 = fp2;
  5959. crtc->lowfreq_avail = true;
  5960. } else {
  5961. crtc_state->dpll_hw_state.fp1 = fp;
  5962. }
  5963. }
  5964. static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
  5965. pipe)
  5966. {
  5967. u32 reg_val;
  5968. /*
  5969. * PLLB opamp always calibrates to max value of 0x3f, force enable it
  5970. * and set it to a reasonable value instead.
  5971. */
  5972. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  5973. reg_val &= 0xffffff00;
  5974. reg_val |= 0x00000030;
  5975. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  5976. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  5977. reg_val &= 0x8cffffff;
  5978. reg_val = 0x8c000000;
  5979. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  5980. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  5981. reg_val &= 0xffffff00;
  5982. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  5983. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  5984. reg_val &= 0x00ffffff;
  5985. reg_val |= 0xb0000000;
  5986. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  5987. }
  5988. static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
  5989. struct intel_link_m_n *m_n)
  5990. {
  5991. struct drm_device *dev = crtc->base.dev;
  5992. struct drm_i915_private *dev_priv = dev->dev_private;
  5993. int pipe = crtc->pipe;
  5994. I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  5995. I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
  5996. I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
  5997. I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
  5998. }
  5999. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  6000. struct intel_link_m_n *m_n,
  6001. struct intel_link_m_n *m2_n2)
  6002. {
  6003. struct drm_device *dev = crtc->base.dev;
  6004. struct drm_i915_private *dev_priv = dev->dev_private;
  6005. int pipe = crtc->pipe;
  6006. enum transcoder transcoder = crtc->config->cpu_transcoder;
  6007. if (INTEL_INFO(dev)->gen >= 5) {
  6008. I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6009. I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
  6010. I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
  6011. I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
  6012. /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
  6013. * for gen < 8) and if DRRS is supported (to make sure the
  6014. * registers are not unnecessarily accessed).
  6015. */
  6016. if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
  6017. crtc->config->has_drrs) {
  6018. I915_WRITE(PIPE_DATA_M2(transcoder),
  6019. TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
  6020. I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
  6021. I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
  6022. I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
  6023. }
  6024. } else {
  6025. I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6026. I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
  6027. I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
  6028. I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
  6029. }
  6030. }
  6031. void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
  6032. {
  6033. struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
  6034. if (m_n == M1_N1) {
  6035. dp_m_n = &crtc->config->dp_m_n;
  6036. dp_m2_n2 = &crtc->config->dp_m2_n2;
  6037. } else if (m_n == M2_N2) {
  6038. /*
  6039. * M2_N2 registers are not supported. Hence m2_n2 divider value
  6040. * needs to be programmed into M1_N1.
  6041. */
  6042. dp_m_n = &crtc->config->dp_m2_n2;
  6043. } else {
  6044. DRM_ERROR("Unsupported divider value\n");
  6045. return;
  6046. }
  6047. if (crtc->config->has_pch_encoder)
  6048. intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
  6049. else
  6050. intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
  6051. }
  6052. static void vlv_compute_dpll(struct intel_crtc *crtc,
  6053. struct intel_crtc_state *pipe_config)
  6054. {
  6055. pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
  6056. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6057. if (crtc->pipe != PIPE_A)
  6058. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6059. /* DPLL not used with DSI, but still need the rest set up */
  6060. if (!pipe_config->has_dsi_encoder)
  6061. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
  6062. DPLL_EXT_BUFFER_ENABLE_VLV;
  6063. pipe_config->dpll_hw_state.dpll_md =
  6064. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6065. }
  6066. static void chv_compute_dpll(struct intel_crtc *crtc,
  6067. struct intel_crtc_state *pipe_config)
  6068. {
  6069. pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
  6070. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6071. if (crtc->pipe != PIPE_A)
  6072. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6073. /* DPLL not used with DSI, but still need the rest set up */
  6074. if (!pipe_config->has_dsi_encoder)
  6075. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
  6076. pipe_config->dpll_hw_state.dpll_md =
  6077. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6078. }
  6079. static void vlv_prepare_pll(struct intel_crtc *crtc,
  6080. const struct intel_crtc_state *pipe_config)
  6081. {
  6082. struct drm_device *dev = crtc->base.dev;
  6083. struct drm_i915_private *dev_priv = dev->dev_private;
  6084. enum pipe pipe = crtc->pipe;
  6085. u32 mdiv;
  6086. u32 bestn, bestm1, bestm2, bestp1, bestp2;
  6087. u32 coreclk, reg_val;
  6088. /* Enable Refclk */
  6089. I915_WRITE(DPLL(pipe),
  6090. pipe_config->dpll_hw_state.dpll &
  6091. ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
  6092. /* No need to actually set up the DPLL with DSI */
  6093. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6094. return;
  6095. mutex_lock(&dev_priv->sb_lock);
  6096. bestn = pipe_config->dpll.n;
  6097. bestm1 = pipe_config->dpll.m1;
  6098. bestm2 = pipe_config->dpll.m2;
  6099. bestp1 = pipe_config->dpll.p1;
  6100. bestp2 = pipe_config->dpll.p2;
  6101. /* See eDP HDMI DPIO driver vbios notes doc */
  6102. /* PLL B needs special handling */
  6103. if (pipe == PIPE_B)
  6104. vlv_pllb_recal_opamp(dev_priv, pipe);
  6105. /* Set up Tx target for periodic Rcomp update */
  6106. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
  6107. /* Disable target IRef on PLL */
  6108. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
  6109. reg_val &= 0x00ffffff;
  6110. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
  6111. /* Disable fast lock */
  6112. vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
  6113. /* Set idtafcrecal before PLL is enabled */
  6114. mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
  6115. mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
  6116. mdiv |= ((bestn << DPIO_N_SHIFT));
  6117. mdiv |= (1 << DPIO_K_SHIFT);
  6118. /*
  6119. * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
  6120. * but we don't support that).
  6121. * Note: don't use the DAC post divider as it seems unstable.
  6122. */
  6123. mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
  6124. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6125. mdiv |= DPIO_ENABLE_CALIBRATION;
  6126. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6127. /* Set HBR and RBR LPF coefficients */
  6128. if (pipe_config->port_clock == 162000 ||
  6129. intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
  6130. intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
  6131. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6132. 0x009f0003);
  6133. else
  6134. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6135. 0x00d0000f);
  6136. if (pipe_config->has_dp_encoder) {
  6137. /* Use SSC source */
  6138. if (pipe == PIPE_A)
  6139. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6140. 0x0df40000);
  6141. else
  6142. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6143. 0x0df70000);
  6144. } else { /* HDMI or VGA */
  6145. /* Use bend source */
  6146. if (pipe == PIPE_A)
  6147. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6148. 0x0df70000);
  6149. else
  6150. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6151. 0x0df40000);
  6152. }
  6153. coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
  6154. coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
  6155. if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
  6156. intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
  6157. coreclk |= 0x01000000;
  6158. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  6159. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
  6160. mutex_unlock(&dev_priv->sb_lock);
  6161. }
  6162. static void chv_prepare_pll(struct intel_crtc *crtc,
  6163. const struct intel_crtc_state *pipe_config)
  6164. {
  6165. struct drm_device *dev = crtc->base.dev;
  6166. struct drm_i915_private *dev_priv = dev->dev_private;
  6167. enum pipe pipe = crtc->pipe;
  6168. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  6169. u32 loopfilter, tribuf_calcntr;
  6170. u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
  6171. u32 dpio_val;
  6172. int vco;
  6173. /* Enable Refclk and SSC */
  6174. I915_WRITE(DPLL(pipe),
  6175. pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
  6176. /* No need to actually set up the DPLL with DSI */
  6177. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6178. return;
  6179. bestn = pipe_config->dpll.n;
  6180. bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
  6181. bestm1 = pipe_config->dpll.m1;
  6182. bestm2 = pipe_config->dpll.m2 >> 22;
  6183. bestp1 = pipe_config->dpll.p1;
  6184. bestp2 = pipe_config->dpll.p2;
  6185. vco = pipe_config->dpll.vco;
  6186. dpio_val = 0;
  6187. loopfilter = 0;
  6188. mutex_lock(&dev_priv->sb_lock);
  6189. /* p1 and p2 divider */
  6190. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
  6191. 5 << DPIO_CHV_S1_DIV_SHIFT |
  6192. bestp1 << DPIO_CHV_P1_DIV_SHIFT |
  6193. bestp2 << DPIO_CHV_P2_DIV_SHIFT |
  6194. 1 << DPIO_CHV_K_DIV_SHIFT);
  6195. /* Feedback post-divider - m2 */
  6196. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
  6197. /* Feedback refclk divider - n and m1 */
  6198. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
  6199. DPIO_CHV_M1_DIV_BY_2 |
  6200. 1 << DPIO_CHV_N_DIV_SHIFT);
  6201. /* M2 fraction division */
  6202. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
  6203. /* M2 fraction division enable */
  6204. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  6205. dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
  6206. dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
  6207. if (bestm2_frac)
  6208. dpio_val |= DPIO_CHV_FRAC_DIV_EN;
  6209. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
  6210. /* Program digital lock detect threshold */
  6211. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
  6212. dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
  6213. DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
  6214. dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
  6215. if (!bestm2_frac)
  6216. dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
  6217. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
  6218. /* Loop filter */
  6219. if (vco == 5400000) {
  6220. loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
  6221. loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
  6222. loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6223. tribuf_calcntr = 0x9;
  6224. } else if (vco <= 6200000) {
  6225. loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
  6226. loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
  6227. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6228. tribuf_calcntr = 0x9;
  6229. } else if (vco <= 6480000) {
  6230. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6231. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6232. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6233. tribuf_calcntr = 0x8;
  6234. } else {
  6235. /* Not supported. Apply the same limits as in the max case */
  6236. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6237. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6238. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6239. tribuf_calcntr = 0;
  6240. }
  6241. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
  6242. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
  6243. dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
  6244. dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
  6245. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
  6246. /* AFC Recal */
  6247. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
  6248. vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
  6249. DPIO_AFC_RECAL);
  6250. mutex_unlock(&dev_priv->sb_lock);
  6251. }
  6252. /**
  6253. * vlv_force_pll_on - forcibly enable just the PLL
  6254. * @dev_priv: i915 private structure
  6255. * @pipe: pipe PLL to enable
  6256. * @dpll: PLL configuration
  6257. *
  6258. * Enable the PLL for @pipe using the supplied @dpll config. To be used
  6259. * in cases where we need the PLL enabled even when @pipe is not going to
  6260. * be enabled.
  6261. */
  6262. int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
  6263. const struct dpll *dpll)
  6264. {
  6265. struct intel_crtc *crtc =
  6266. to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
  6267. struct intel_crtc_state *pipe_config;
  6268. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  6269. if (!pipe_config)
  6270. return -ENOMEM;
  6271. pipe_config->base.crtc = &crtc->base;
  6272. pipe_config->pixel_multiplier = 1;
  6273. pipe_config->dpll = *dpll;
  6274. if (IS_CHERRYVIEW(dev)) {
  6275. chv_compute_dpll(crtc, pipe_config);
  6276. chv_prepare_pll(crtc, pipe_config);
  6277. chv_enable_pll(crtc, pipe_config);
  6278. } else {
  6279. vlv_compute_dpll(crtc, pipe_config);
  6280. vlv_prepare_pll(crtc, pipe_config);
  6281. vlv_enable_pll(crtc, pipe_config);
  6282. }
  6283. kfree(pipe_config);
  6284. return 0;
  6285. }
  6286. /**
  6287. * vlv_force_pll_off - forcibly disable just the PLL
  6288. * @dev_priv: i915 private structure
  6289. * @pipe: pipe PLL to disable
  6290. *
  6291. * Disable the PLL for @pipe. To be used in cases where we need
  6292. * the PLL enabled even when @pipe is not going to be enabled.
  6293. */
  6294. void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
  6295. {
  6296. if (IS_CHERRYVIEW(dev))
  6297. chv_disable_pll(to_i915(dev), pipe);
  6298. else
  6299. vlv_disable_pll(to_i915(dev), pipe);
  6300. }
  6301. static void i9xx_compute_dpll(struct intel_crtc *crtc,
  6302. struct intel_crtc_state *crtc_state,
  6303. intel_clock_t *reduced_clock)
  6304. {
  6305. struct drm_device *dev = crtc->base.dev;
  6306. struct drm_i915_private *dev_priv = dev->dev_private;
  6307. u32 dpll;
  6308. bool is_sdvo;
  6309. struct dpll *clock = &crtc_state->dpll;
  6310. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6311. is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  6312. intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
  6313. dpll = DPLL_VGA_MODE_DIS;
  6314. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
  6315. dpll |= DPLLB_MODE_LVDS;
  6316. else
  6317. dpll |= DPLLB_MODE_DAC_SERIAL;
  6318. if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
  6319. dpll |= (crtc_state->pixel_multiplier - 1)
  6320. << SDVO_MULTIPLIER_SHIFT_HIRES;
  6321. }
  6322. if (is_sdvo)
  6323. dpll |= DPLL_SDVO_HIGH_SPEED;
  6324. if (crtc_state->has_dp_encoder)
  6325. dpll |= DPLL_SDVO_HIGH_SPEED;
  6326. /* compute bitmask from p1 value */
  6327. if (IS_PINEVIEW(dev))
  6328. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
  6329. else {
  6330. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6331. if (IS_G4X(dev) && reduced_clock)
  6332. dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  6333. }
  6334. switch (clock->p2) {
  6335. case 5:
  6336. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  6337. break;
  6338. case 7:
  6339. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  6340. break;
  6341. case 10:
  6342. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  6343. break;
  6344. case 14:
  6345. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  6346. break;
  6347. }
  6348. if (INTEL_INFO(dev)->gen >= 4)
  6349. dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
  6350. if (crtc_state->sdvo_tv_clock)
  6351. dpll |= PLL_REF_INPUT_TVCLKINBC;
  6352. else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6353. intel_panel_use_ssc(dev_priv))
  6354. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6355. else
  6356. dpll |= PLL_REF_INPUT_DREFCLK;
  6357. dpll |= DPLL_VCO_ENABLE;
  6358. crtc_state->dpll_hw_state.dpll = dpll;
  6359. if (INTEL_INFO(dev)->gen >= 4) {
  6360. u32 dpll_md = (crtc_state->pixel_multiplier - 1)
  6361. << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6362. crtc_state->dpll_hw_state.dpll_md = dpll_md;
  6363. }
  6364. }
  6365. static void i8xx_compute_dpll(struct intel_crtc *crtc,
  6366. struct intel_crtc_state *crtc_state,
  6367. intel_clock_t *reduced_clock)
  6368. {
  6369. struct drm_device *dev = crtc->base.dev;
  6370. struct drm_i915_private *dev_priv = dev->dev_private;
  6371. u32 dpll;
  6372. struct dpll *clock = &crtc_state->dpll;
  6373. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6374. dpll = DPLL_VGA_MODE_DIS;
  6375. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6376. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6377. } else {
  6378. if (clock->p1 == 2)
  6379. dpll |= PLL_P1_DIVIDE_BY_TWO;
  6380. else
  6381. dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6382. if (clock->p2 == 4)
  6383. dpll |= PLL_P2_DIVIDE_BY_4;
  6384. }
  6385. if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
  6386. dpll |= DPLL_DVO_2X_MODE;
  6387. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6388. intel_panel_use_ssc(dev_priv))
  6389. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6390. else
  6391. dpll |= PLL_REF_INPUT_DREFCLK;
  6392. dpll |= DPLL_VCO_ENABLE;
  6393. crtc_state->dpll_hw_state.dpll = dpll;
  6394. }
  6395. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
  6396. {
  6397. struct drm_device *dev = intel_crtc->base.dev;
  6398. struct drm_i915_private *dev_priv = dev->dev_private;
  6399. enum pipe pipe = intel_crtc->pipe;
  6400. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  6401. const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
  6402. uint32_t crtc_vtotal, crtc_vblank_end;
  6403. int vsyncshift = 0;
  6404. /* We need to be careful not to changed the adjusted mode, for otherwise
  6405. * the hw state checker will get angry at the mismatch. */
  6406. crtc_vtotal = adjusted_mode->crtc_vtotal;
  6407. crtc_vblank_end = adjusted_mode->crtc_vblank_end;
  6408. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  6409. /* the chip adds 2 halflines automatically */
  6410. crtc_vtotal -= 1;
  6411. crtc_vblank_end -= 1;
  6412. if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
  6413. vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
  6414. else
  6415. vsyncshift = adjusted_mode->crtc_hsync_start -
  6416. adjusted_mode->crtc_htotal / 2;
  6417. if (vsyncshift < 0)
  6418. vsyncshift += adjusted_mode->crtc_htotal;
  6419. }
  6420. if (INTEL_INFO(dev)->gen > 3)
  6421. I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
  6422. I915_WRITE(HTOTAL(cpu_transcoder),
  6423. (adjusted_mode->crtc_hdisplay - 1) |
  6424. ((adjusted_mode->crtc_htotal - 1) << 16));
  6425. I915_WRITE(HBLANK(cpu_transcoder),
  6426. (adjusted_mode->crtc_hblank_start - 1) |
  6427. ((adjusted_mode->crtc_hblank_end - 1) << 16));
  6428. I915_WRITE(HSYNC(cpu_transcoder),
  6429. (adjusted_mode->crtc_hsync_start - 1) |
  6430. ((adjusted_mode->crtc_hsync_end - 1) << 16));
  6431. I915_WRITE(VTOTAL(cpu_transcoder),
  6432. (adjusted_mode->crtc_vdisplay - 1) |
  6433. ((crtc_vtotal - 1) << 16));
  6434. I915_WRITE(VBLANK(cpu_transcoder),
  6435. (adjusted_mode->crtc_vblank_start - 1) |
  6436. ((crtc_vblank_end - 1) << 16));
  6437. I915_WRITE(VSYNC(cpu_transcoder),
  6438. (adjusted_mode->crtc_vsync_start - 1) |
  6439. ((adjusted_mode->crtc_vsync_end - 1) << 16));
  6440. /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
  6441. * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
  6442. * documented on the DDI_FUNC_CTL register description, EDP Input Select
  6443. * bits. */
  6444. if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
  6445. (pipe == PIPE_B || pipe == PIPE_C))
  6446. I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
  6447. }
  6448. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
  6449. {
  6450. struct drm_device *dev = intel_crtc->base.dev;
  6451. struct drm_i915_private *dev_priv = dev->dev_private;
  6452. enum pipe pipe = intel_crtc->pipe;
  6453. /* pipesrc controls the size that is scaled from, which should
  6454. * always be the user's requested size.
  6455. */
  6456. I915_WRITE(PIPESRC(pipe),
  6457. ((intel_crtc->config->pipe_src_w - 1) << 16) |
  6458. (intel_crtc->config->pipe_src_h - 1));
  6459. }
  6460. static void intel_get_pipe_timings(struct intel_crtc *crtc,
  6461. struct intel_crtc_state *pipe_config)
  6462. {
  6463. struct drm_device *dev = crtc->base.dev;
  6464. struct drm_i915_private *dev_priv = dev->dev_private;
  6465. enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
  6466. uint32_t tmp;
  6467. tmp = I915_READ(HTOTAL(cpu_transcoder));
  6468. pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
  6469. pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
  6470. tmp = I915_READ(HBLANK(cpu_transcoder));
  6471. pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
  6472. pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
  6473. tmp = I915_READ(HSYNC(cpu_transcoder));
  6474. pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
  6475. pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
  6476. tmp = I915_READ(VTOTAL(cpu_transcoder));
  6477. pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
  6478. pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
  6479. tmp = I915_READ(VBLANK(cpu_transcoder));
  6480. pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
  6481. pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
  6482. tmp = I915_READ(VSYNC(cpu_transcoder));
  6483. pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
  6484. pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
  6485. if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
  6486. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
  6487. pipe_config->base.adjusted_mode.crtc_vtotal += 1;
  6488. pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
  6489. }
  6490. }
  6491. static void intel_get_pipe_src_size(struct intel_crtc *crtc,
  6492. struct intel_crtc_state *pipe_config)
  6493. {
  6494. struct drm_device *dev = crtc->base.dev;
  6495. struct drm_i915_private *dev_priv = dev->dev_private;
  6496. u32 tmp;
  6497. tmp = I915_READ(PIPESRC(crtc->pipe));
  6498. pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
  6499. pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
  6500. pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
  6501. pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
  6502. }
  6503. void intel_mode_from_pipe_config(struct drm_display_mode *mode,
  6504. struct intel_crtc_state *pipe_config)
  6505. {
  6506. mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
  6507. mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
  6508. mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
  6509. mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
  6510. mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
  6511. mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
  6512. mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
  6513. mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
  6514. mode->flags = pipe_config->base.adjusted_mode.flags;
  6515. mode->type = DRM_MODE_TYPE_DRIVER;
  6516. mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
  6517. mode->flags |= pipe_config->base.adjusted_mode.flags;
  6518. mode->hsync = drm_mode_hsync(mode);
  6519. mode->vrefresh = drm_mode_vrefresh(mode);
  6520. drm_mode_set_name(mode);
  6521. }
  6522. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
  6523. {
  6524. struct drm_device *dev = intel_crtc->base.dev;
  6525. struct drm_i915_private *dev_priv = dev->dev_private;
  6526. uint32_t pipeconf;
  6527. pipeconf = 0;
  6528. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  6529. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  6530. pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
  6531. if (intel_crtc->config->double_wide)
  6532. pipeconf |= PIPECONF_DOUBLE_WIDE;
  6533. /* only g4x and later have fancy bpc/dither controls */
  6534. if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  6535. /* Bspec claims that we can't use dithering for 30bpp pipes. */
  6536. if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
  6537. pipeconf |= PIPECONF_DITHER_EN |
  6538. PIPECONF_DITHER_TYPE_SP;
  6539. switch (intel_crtc->config->pipe_bpp) {
  6540. case 18:
  6541. pipeconf |= PIPECONF_6BPC;
  6542. break;
  6543. case 24:
  6544. pipeconf |= PIPECONF_8BPC;
  6545. break;
  6546. case 30:
  6547. pipeconf |= PIPECONF_10BPC;
  6548. break;
  6549. default:
  6550. /* Case prevented by intel_choose_pipe_bpp_dither. */
  6551. BUG();
  6552. }
  6553. }
  6554. if (HAS_PIPE_CXSR(dev)) {
  6555. if (intel_crtc->lowfreq_avail) {
  6556. DRM_DEBUG_KMS("enabling CxSR downclocking\n");
  6557. pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
  6558. } else {
  6559. DRM_DEBUG_KMS("disabling CxSR downclocking\n");
  6560. }
  6561. }
  6562. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  6563. if (INTEL_INFO(dev)->gen < 4 ||
  6564. intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
  6565. pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  6566. else
  6567. pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
  6568. } else
  6569. pipeconf |= PIPECONF_PROGRESSIVE;
  6570. if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
  6571. intel_crtc->config->limited_color_range)
  6572. pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
  6573. I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
  6574. POSTING_READ(PIPECONF(intel_crtc->pipe));
  6575. }
  6576. static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  6577. struct intel_crtc_state *crtc_state)
  6578. {
  6579. struct drm_device *dev = crtc->base.dev;
  6580. struct drm_i915_private *dev_priv = dev->dev_private;
  6581. const intel_limit_t *limit;
  6582. int refclk = 48000;
  6583. memset(&crtc_state->dpll_hw_state, 0,
  6584. sizeof(crtc_state->dpll_hw_state));
  6585. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6586. if (intel_panel_use_ssc(dev_priv)) {
  6587. refclk = dev_priv->vbt.lvds_ssc_freq;
  6588. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  6589. }
  6590. limit = &intel_limits_i8xx_lvds;
  6591. } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
  6592. limit = &intel_limits_i8xx_dvo;
  6593. } else {
  6594. limit = &intel_limits_i8xx_dac;
  6595. }
  6596. if (!crtc_state->clock_set &&
  6597. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6598. refclk, NULL, &crtc_state->dpll)) {
  6599. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6600. return -EINVAL;
  6601. }
  6602. i8xx_compute_dpll(crtc, crtc_state, NULL);
  6603. return 0;
  6604. }
  6605. static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
  6606. struct intel_crtc_state *crtc_state)
  6607. {
  6608. struct drm_device *dev = crtc->base.dev;
  6609. struct drm_i915_private *dev_priv = dev->dev_private;
  6610. const intel_limit_t *limit;
  6611. int refclk = 96000;
  6612. memset(&crtc_state->dpll_hw_state, 0,
  6613. sizeof(crtc_state->dpll_hw_state));
  6614. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6615. if (intel_panel_use_ssc(dev_priv)) {
  6616. refclk = dev_priv->vbt.lvds_ssc_freq;
  6617. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  6618. }
  6619. if (intel_is_dual_link_lvds(dev))
  6620. limit = &intel_limits_g4x_dual_channel_lvds;
  6621. else
  6622. limit = &intel_limits_g4x_single_channel_lvds;
  6623. } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
  6624. intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
  6625. limit = &intel_limits_g4x_hdmi;
  6626. } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
  6627. limit = &intel_limits_g4x_sdvo;
  6628. } else {
  6629. /* The option is for other outputs */
  6630. limit = &intel_limits_i9xx_sdvo;
  6631. }
  6632. if (!crtc_state->clock_set &&
  6633. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6634. refclk, NULL, &crtc_state->dpll)) {
  6635. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6636. return -EINVAL;
  6637. }
  6638. i9xx_compute_dpll(crtc, crtc_state, NULL);
  6639. return 0;
  6640. }
  6641. static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
  6642. struct intel_crtc_state *crtc_state)
  6643. {
  6644. struct drm_device *dev = crtc->base.dev;
  6645. struct drm_i915_private *dev_priv = dev->dev_private;
  6646. const intel_limit_t *limit;
  6647. int refclk = 96000;
  6648. memset(&crtc_state->dpll_hw_state, 0,
  6649. sizeof(crtc_state->dpll_hw_state));
  6650. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6651. if (intel_panel_use_ssc(dev_priv)) {
  6652. refclk = dev_priv->vbt.lvds_ssc_freq;
  6653. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  6654. }
  6655. limit = &intel_limits_pineview_lvds;
  6656. } else {
  6657. limit = &intel_limits_pineview_sdvo;
  6658. }
  6659. if (!crtc_state->clock_set &&
  6660. !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6661. refclk, NULL, &crtc_state->dpll)) {
  6662. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6663. return -EINVAL;
  6664. }
  6665. i9xx_compute_dpll(crtc, crtc_state, NULL);
  6666. return 0;
  6667. }
  6668. static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
  6669. struct intel_crtc_state *crtc_state)
  6670. {
  6671. struct drm_device *dev = crtc->base.dev;
  6672. struct drm_i915_private *dev_priv = dev->dev_private;
  6673. const intel_limit_t *limit;
  6674. int refclk = 96000;
  6675. memset(&crtc_state->dpll_hw_state, 0,
  6676. sizeof(crtc_state->dpll_hw_state));
  6677. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6678. if (intel_panel_use_ssc(dev_priv)) {
  6679. refclk = dev_priv->vbt.lvds_ssc_freq;
  6680. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  6681. }
  6682. limit = &intel_limits_i9xx_lvds;
  6683. } else {
  6684. limit = &intel_limits_i9xx_sdvo;
  6685. }
  6686. if (!crtc_state->clock_set &&
  6687. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6688. refclk, NULL, &crtc_state->dpll)) {
  6689. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6690. return -EINVAL;
  6691. }
  6692. i9xx_compute_dpll(crtc, crtc_state, NULL);
  6693. return 0;
  6694. }
  6695. static int chv_crtc_compute_clock(struct intel_crtc *crtc,
  6696. struct intel_crtc_state *crtc_state)
  6697. {
  6698. int refclk = 100000;
  6699. const intel_limit_t *limit = &intel_limits_chv;
  6700. memset(&crtc_state->dpll_hw_state, 0,
  6701. sizeof(crtc_state->dpll_hw_state));
  6702. if (!crtc_state->clock_set &&
  6703. !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6704. refclk, NULL, &crtc_state->dpll)) {
  6705. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6706. return -EINVAL;
  6707. }
  6708. chv_compute_dpll(crtc, crtc_state);
  6709. return 0;
  6710. }
  6711. static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
  6712. struct intel_crtc_state *crtc_state)
  6713. {
  6714. int refclk = 100000;
  6715. const intel_limit_t *limit = &intel_limits_vlv;
  6716. memset(&crtc_state->dpll_hw_state, 0,
  6717. sizeof(crtc_state->dpll_hw_state));
  6718. if (!crtc_state->clock_set &&
  6719. !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6720. refclk, NULL, &crtc_state->dpll)) {
  6721. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6722. return -EINVAL;
  6723. }
  6724. vlv_compute_dpll(crtc, crtc_state);
  6725. return 0;
  6726. }
  6727. static void i9xx_get_pfit_config(struct intel_crtc *crtc,
  6728. struct intel_crtc_state *pipe_config)
  6729. {
  6730. struct drm_device *dev = crtc->base.dev;
  6731. struct drm_i915_private *dev_priv = dev->dev_private;
  6732. uint32_t tmp;
  6733. if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
  6734. return;
  6735. tmp = I915_READ(PFIT_CONTROL);
  6736. if (!(tmp & PFIT_ENABLE))
  6737. return;
  6738. /* Check whether the pfit is attached to our pipe. */
  6739. if (INTEL_INFO(dev)->gen < 4) {
  6740. if (crtc->pipe != PIPE_B)
  6741. return;
  6742. } else {
  6743. if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
  6744. return;
  6745. }
  6746. pipe_config->gmch_pfit.control = tmp;
  6747. pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
  6748. }
  6749. static void vlv_crtc_clock_get(struct intel_crtc *crtc,
  6750. struct intel_crtc_state *pipe_config)
  6751. {
  6752. struct drm_device *dev = crtc->base.dev;
  6753. struct drm_i915_private *dev_priv = dev->dev_private;
  6754. int pipe = pipe_config->cpu_transcoder;
  6755. intel_clock_t clock;
  6756. u32 mdiv;
  6757. int refclk = 100000;
  6758. /* In case of DSI, DPLL will not be used */
  6759. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6760. return;
  6761. mutex_lock(&dev_priv->sb_lock);
  6762. mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
  6763. mutex_unlock(&dev_priv->sb_lock);
  6764. clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
  6765. clock.m2 = mdiv & DPIO_M2DIV_MASK;
  6766. clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
  6767. clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
  6768. clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
  6769. pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
  6770. }
  6771. static void
  6772. i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  6773. struct intel_initial_plane_config *plane_config)
  6774. {
  6775. struct drm_device *dev = crtc->base.dev;
  6776. struct drm_i915_private *dev_priv = dev->dev_private;
  6777. u32 val, base, offset;
  6778. int pipe = crtc->pipe, plane = crtc->plane;
  6779. int fourcc, pixel_format;
  6780. unsigned int aligned_height;
  6781. struct drm_framebuffer *fb;
  6782. struct intel_framebuffer *intel_fb;
  6783. val = I915_READ(DSPCNTR(plane));
  6784. if (!(val & DISPLAY_PLANE_ENABLE))
  6785. return;
  6786. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  6787. if (!intel_fb) {
  6788. DRM_DEBUG_KMS("failed to alloc fb\n");
  6789. return;
  6790. }
  6791. fb = &intel_fb->base;
  6792. if (INTEL_INFO(dev)->gen >= 4) {
  6793. if (val & DISPPLANE_TILED) {
  6794. plane_config->tiling = I915_TILING_X;
  6795. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  6796. }
  6797. }
  6798. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  6799. fourcc = i9xx_format_to_fourcc(pixel_format);
  6800. fb->pixel_format = fourcc;
  6801. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  6802. if (INTEL_INFO(dev)->gen >= 4) {
  6803. if (plane_config->tiling)
  6804. offset = I915_READ(DSPTILEOFF(plane));
  6805. else
  6806. offset = I915_READ(DSPLINOFF(plane));
  6807. base = I915_READ(DSPSURF(plane)) & 0xfffff000;
  6808. } else {
  6809. base = I915_READ(DSPADDR(plane));
  6810. }
  6811. plane_config->base = base;
  6812. val = I915_READ(PIPESRC(pipe));
  6813. fb->width = ((val >> 16) & 0xfff) + 1;
  6814. fb->height = ((val >> 0) & 0xfff) + 1;
  6815. val = I915_READ(DSPSTRIDE(pipe));
  6816. fb->pitches[0] = val & 0xffffffc0;
  6817. aligned_height = intel_fb_align_height(dev, fb->height,
  6818. fb->pixel_format,
  6819. fb->modifier[0]);
  6820. plane_config->size = fb->pitches[0] * aligned_height;
  6821. DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  6822. pipe_name(pipe), plane, fb->width, fb->height,
  6823. fb->bits_per_pixel, base, fb->pitches[0],
  6824. plane_config->size);
  6825. plane_config->fb = intel_fb;
  6826. }
  6827. static void chv_crtc_clock_get(struct intel_crtc *crtc,
  6828. struct intel_crtc_state *pipe_config)
  6829. {
  6830. struct drm_device *dev = crtc->base.dev;
  6831. struct drm_i915_private *dev_priv = dev->dev_private;
  6832. int pipe = pipe_config->cpu_transcoder;
  6833. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  6834. intel_clock_t clock;
  6835. u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
  6836. int refclk = 100000;
  6837. /* In case of DSI, DPLL will not be used */
  6838. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6839. return;
  6840. mutex_lock(&dev_priv->sb_lock);
  6841. cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
  6842. pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
  6843. pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
  6844. pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
  6845. pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  6846. mutex_unlock(&dev_priv->sb_lock);
  6847. clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
  6848. clock.m2 = (pll_dw0 & 0xff) << 22;
  6849. if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
  6850. clock.m2 |= pll_dw2 & 0x3fffff;
  6851. clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
  6852. clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
  6853. clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
  6854. pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
  6855. }
  6856. static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  6857. struct intel_crtc_state *pipe_config)
  6858. {
  6859. struct drm_device *dev = crtc->base.dev;
  6860. struct drm_i915_private *dev_priv = dev->dev_private;
  6861. enum intel_display_power_domain power_domain;
  6862. uint32_t tmp;
  6863. bool ret;
  6864. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  6865. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  6866. return false;
  6867. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  6868. pipe_config->shared_dpll = NULL;
  6869. ret = false;
  6870. tmp = I915_READ(PIPECONF(crtc->pipe));
  6871. if (!(tmp & PIPECONF_ENABLE))
  6872. goto out;
  6873. if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  6874. switch (tmp & PIPECONF_BPC_MASK) {
  6875. case PIPECONF_6BPC:
  6876. pipe_config->pipe_bpp = 18;
  6877. break;
  6878. case PIPECONF_8BPC:
  6879. pipe_config->pipe_bpp = 24;
  6880. break;
  6881. case PIPECONF_10BPC:
  6882. pipe_config->pipe_bpp = 30;
  6883. break;
  6884. default:
  6885. break;
  6886. }
  6887. }
  6888. if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
  6889. (tmp & PIPECONF_COLOR_RANGE_SELECT))
  6890. pipe_config->limited_color_range = true;
  6891. if (INTEL_INFO(dev)->gen < 4)
  6892. pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
  6893. intel_get_pipe_timings(crtc, pipe_config);
  6894. intel_get_pipe_src_size(crtc, pipe_config);
  6895. i9xx_get_pfit_config(crtc, pipe_config);
  6896. if (INTEL_INFO(dev)->gen >= 4) {
  6897. /* No way to read it out on pipes B and C */
  6898. if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
  6899. tmp = dev_priv->chv_dpll_md[crtc->pipe];
  6900. else
  6901. tmp = I915_READ(DPLL_MD(crtc->pipe));
  6902. pipe_config->pixel_multiplier =
  6903. ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
  6904. >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
  6905. pipe_config->dpll_hw_state.dpll_md = tmp;
  6906. } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
  6907. tmp = I915_READ(DPLL(crtc->pipe));
  6908. pipe_config->pixel_multiplier =
  6909. ((tmp & SDVO_MULTIPLIER_MASK)
  6910. >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
  6911. } else {
  6912. /* Note that on i915G/GM the pixel multiplier is in the sdvo
  6913. * port and will be fixed up in the encoder->get_config
  6914. * function. */
  6915. pipe_config->pixel_multiplier = 1;
  6916. }
  6917. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
  6918. if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
  6919. /*
  6920. * DPLL_DVO_2X_MODE must be enabled for both DPLLs
  6921. * on 830. Filter it out here so that we don't
  6922. * report errors due to that.
  6923. */
  6924. if (IS_I830(dev))
  6925. pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
  6926. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
  6927. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
  6928. } else {
  6929. /* Mask out read-only status bits. */
  6930. pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
  6931. DPLL_PORTC_READY_MASK |
  6932. DPLL_PORTB_READY_MASK);
  6933. }
  6934. if (IS_CHERRYVIEW(dev))
  6935. chv_crtc_clock_get(crtc, pipe_config);
  6936. else if (IS_VALLEYVIEW(dev))
  6937. vlv_crtc_clock_get(crtc, pipe_config);
  6938. else
  6939. i9xx_crtc_clock_get(crtc, pipe_config);
  6940. /*
  6941. * Normally the dotclock is filled in by the encoder .get_config()
  6942. * but in case the pipe is enabled w/o any ports we need a sane
  6943. * default.
  6944. */
  6945. pipe_config->base.adjusted_mode.crtc_clock =
  6946. pipe_config->port_clock / pipe_config->pixel_multiplier;
  6947. ret = true;
  6948. out:
  6949. intel_display_power_put(dev_priv, power_domain);
  6950. return ret;
  6951. }
  6952. static void ironlake_init_pch_refclk(struct drm_device *dev)
  6953. {
  6954. struct drm_i915_private *dev_priv = dev->dev_private;
  6955. struct intel_encoder *encoder;
  6956. int i;
  6957. u32 val, final;
  6958. bool has_lvds = false;
  6959. bool has_cpu_edp = false;
  6960. bool has_panel = false;
  6961. bool has_ck505 = false;
  6962. bool can_ssc = false;
  6963. bool using_ssc_source = false;
  6964. /* We need to take the global config into account */
  6965. for_each_intel_encoder(dev, encoder) {
  6966. switch (encoder->type) {
  6967. case INTEL_OUTPUT_LVDS:
  6968. has_panel = true;
  6969. has_lvds = true;
  6970. break;
  6971. case INTEL_OUTPUT_EDP:
  6972. has_panel = true;
  6973. if (enc_to_dig_port(&encoder->base)->port == PORT_A)
  6974. has_cpu_edp = true;
  6975. break;
  6976. default:
  6977. break;
  6978. }
  6979. }
  6980. if (HAS_PCH_IBX(dev)) {
  6981. has_ck505 = dev_priv->vbt.display_clock_mode;
  6982. can_ssc = has_ck505;
  6983. } else {
  6984. has_ck505 = false;
  6985. can_ssc = true;
  6986. }
  6987. /* Check if any DPLLs are using the SSC source */
  6988. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  6989. u32 temp = I915_READ(PCH_DPLL(i));
  6990. if (!(temp & DPLL_VCO_ENABLE))
  6991. continue;
  6992. if ((temp & PLL_REF_INPUT_MASK) ==
  6993. PLLB_REF_INPUT_SPREADSPECTRUMIN) {
  6994. using_ssc_source = true;
  6995. break;
  6996. }
  6997. }
  6998. DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
  6999. has_panel, has_lvds, has_ck505, using_ssc_source);
  7000. /* Ironlake: try to setup display ref clock before DPLL
  7001. * enabling. This is only under driver's control after
  7002. * PCH B stepping, previous chipset stepping should be
  7003. * ignoring this setting.
  7004. */
  7005. val = I915_READ(PCH_DREF_CONTROL);
  7006. /* As we must carefully and slowly disable/enable each source in turn,
  7007. * compute the final state we want first and check if we need to
  7008. * make any changes at all.
  7009. */
  7010. final = val;
  7011. final &= ~DREF_NONSPREAD_SOURCE_MASK;
  7012. if (has_ck505)
  7013. final |= DREF_NONSPREAD_CK505_ENABLE;
  7014. else
  7015. final |= DREF_NONSPREAD_SOURCE_ENABLE;
  7016. final &= ~DREF_SSC_SOURCE_MASK;
  7017. final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7018. final &= ~DREF_SSC1_ENABLE;
  7019. if (has_panel) {
  7020. final |= DREF_SSC_SOURCE_ENABLE;
  7021. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7022. final |= DREF_SSC1_ENABLE;
  7023. if (has_cpu_edp) {
  7024. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7025. final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7026. else
  7027. final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7028. } else
  7029. final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7030. } else if (using_ssc_source) {
  7031. final |= DREF_SSC_SOURCE_ENABLE;
  7032. final |= DREF_SSC1_ENABLE;
  7033. }
  7034. if (final == val)
  7035. return;
  7036. /* Always enable nonspread source */
  7037. val &= ~DREF_NONSPREAD_SOURCE_MASK;
  7038. if (has_ck505)
  7039. val |= DREF_NONSPREAD_CK505_ENABLE;
  7040. else
  7041. val |= DREF_NONSPREAD_SOURCE_ENABLE;
  7042. if (has_panel) {
  7043. val &= ~DREF_SSC_SOURCE_MASK;
  7044. val |= DREF_SSC_SOURCE_ENABLE;
  7045. /* SSC must be turned on before enabling the CPU output */
  7046. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7047. DRM_DEBUG_KMS("Using SSC on panel\n");
  7048. val |= DREF_SSC1_ENABLE;
  7049. } else
  7050. val &= ~DREF_SSC1_ENABLE;
  7051. /* Get SSC going before enabling the outputs */
  7052. I915_WRITE(PCH_DREF_CONTROL, val);
  7053. POSTING_READ(PCH_DREF_CONTROL);
  7054. udelay(200);
  7055. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7056. /* Enable CPU source on CPU attached eDP */
  7057. if (has_cpu_edp) {
  7058. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7059. DRM_DEBUG_KMS("Using SSC on eDP\n");
  7060. val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7061. } else
  7062. val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7063. } else
  7064. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7065. I915_WRITE(PCH_DREF_CONTROL, val);
  7066. POSTING_READ(PCH_DREF_CONTROL);
  7067. udelay(200);
  7068. } else {
  7069. DRM_DEBUG_KMS("Disabling CPU source output\n");
  7070. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7071. /* Turn off CPU output */
  7072. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7073. I915_WRITE(PCH_DREF_CONTROL, val);
  7074. POSTING_READ(PCH_DREF_CONTROL);
  7075. udelay(200);
  7076. if (!using_ssc_source) {
  7077. DRM_DEBUG_KMS("Disabling SSC source\n");
  7078. /* Turn off the SSC source */
  7079. val &= ~DREF_SSC_SOURCE_MASK;
  7080. val |= DREF_SSC_SOURCE_DISABLE;
  7081. /* Turn off SSC1 */
  7082. val &= ~DREF_SSC1_ENABLE;
  7083. I915_WRITE(PCH_DREF_CONTROL, val);
  7084. POSTING_READ(PCH_DREF_CONTROL);
  7085. udelay(200);
  7086. }
  7087. }
  7088. BUG_ON(val != final);
  7089. }
  7090. static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
  7091. {
  7092. uint32_t tmp;
  7093. tmp = I915_READ(SOUTH_CHICKEN2);
  7094. tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
  7095. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7096. if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
  7097. FDI_MPHY_IOSFSB_RESET_STATUS, 100))
  7098. DRM_ERROR("FDI mPHY reset assert timeout\n");
  7099. tmp = I915_READ(SOUTH_CHICKEN2);
  7100. tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
  7101. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7102. if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
  7103. FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
  7104. DRM_ERROR("FDI mPHY reset de-assert timeout\n");
  7105. }
  7106. /* WaMPhyProgramming:hsw */
  7107. static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
  7108. {
  7109. uint32_t tmp;
  7110. tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
  7111. tmp &= ~(0xFF << 24);
  7112. tmp |= (0x12 << 24);
  7113. intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
  7114. tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
  7115. tmp |= (1 << 11);
  7116. intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
  7117. tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
  7118. tmp |= (1 << 11);
  7119. intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
  7120. tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
  7121. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7122. intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
  7123. tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
  7124. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7125. intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
  7126. tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
  7127. tmp &= ~(7 << 13);
  7128. tmp |= (5 << 13);
  7129. intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
  7130. tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
  7131. tmp &= ~(7 << 13);
  7132. tmp |= (5 << 13);
  7133. intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
  7134. tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
  7135. tmp &= ~0xFF;
  7136. tmp |= 0x1C;
  7137. intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
  7138. tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
  7139. tmp &= ~0xFF;
  7140. tmp |= 0x1C;
  7141. intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
  7142. tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
  7143. tmp &= ~(0xFF << 16);
  7144. tmp |= (0x1C << 16);
  7145. intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
  7146. tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
  7147. tmp &= ~(0xFF << 16);
  7148. tmp |= (0x1C << 16);
  7149. intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
  7150. tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
  7151. tmp |= (1 << 27);
  7152. intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
  7153. tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
  7154. tmp |= (1 << 27);
  7155. intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
  7156. tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
  7157. tmp &= ~(0xF << 28);
  7158. tmp |= (4 << 28);
  7159. intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
  7160. tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
  7161. tmp &= ~(0xF << 28);
  7162. tmp |= (4 << 28);
  7163. intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
  7164. }
  7165. /* Implements 3 different sequences from BSpec chapter "Display iCLK
  7166. * Programming" based on the parameters passed:
  7167. * - Sequence to enable CLKOUT_DP
  7168. * - Sequence to enable CLKOUT_DP without spread
  7169. * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
  7170. */
  7171. static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
  7172. bool with_fdi)
  7173. {
  7174. struct drm_i915_private *dev_priv = dev->dev_private;
  7175. uint32_t reg, tmp;
  7176. if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
  7177. with_spread = true;
  7178. if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
  7179. with_fdi = false;
  7180. mutex_lock(&dev_priv->sb_lock);
  7181. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7182. tmp &= ~SBI_SSCCTL_DISABLE;
  7183. tmp |= SBI_SSCCTL_PATHALT;
  7184. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7185. udelay(24);
  7186. if (with_spread) {
  7187. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7188. tmp &= ~SBI_SSCCTL_PATHALT;
  7189. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7190. if (with_fdi) {
  7191. lpt_reset_fdi_mphy(dev_priv);
  7192. lpt_program_fdi_mphy(dev_priv);
  7193. }
  7194. }
  7195. reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
  7196. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7197. tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7198. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7199. mutex_unlock(&dev_priv->sb_lock);
  7200. }
  7201. /* Sequence to disable CLKOUT_DP */
  7202. static void lpt_disable_clkout_dp(struct drm_device *dev)
  7203. {
  7204. struct drm_i915_private *dev_priv = dev->dev_private;
  7205. uint32_t reg, tmp;
  7206. mutex_lock(&dev_priv->sb_lock);
  7207. reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
  7208. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7209. tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7210. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7211. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7212. if (!(tmp & SBI_SSCCTL_DISABLE)) {
  7213. if (!(tmp & SBI_SSCCTL_PATHALT)) {
  7214. tmp |= SBI_SSCCTL_PATHALT;
  7215. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7216. udelay(32);
  7217. }
  7218. tmp |= SBI_SSCCTL_DISABLE;
  7219. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7220. }
  7221. mutex_unlock(&dev_priv->sb_lock);
  7222. }
  7223. #define BEND_IDX(steps) ((50 + (steps)) / 5)
  7224. static const uint16_t sscdivintphase[] = {
  7225. [BEND_IDX( 50)] = 0x3B23,
  7226. [BEND_IDX( 45)] = 0x3B23,
  7227. [BEND_IDX( 40)] = 0x3C23,
  7228. [BEND_IDX( 35)] = 0x3C23,
  7229. [BEND_IDX( 30)] = 0x3D23,
  7230. [BEND_IDX( 25)] = 0x3D23,
  7231. [BEND_IDX( 20)] = 0x3E23,
  7232. [BEND_IDX( 15)] = 0x3E23,
  7233. [BEND_IDX( 10)] = 0x3F23,
  7234. [BEND_IDX( 5)] = 0x3F23,
  7235. [BEND_IDX( 0)] = 0x0025,
  7236. [BEND_IDX( -5)] = 0x0025,
  7237. [BEND_IDX(-10)] = 0x0125,
  7238. [BEND_IDX(-15)] = 0x0125,
  7239. [BEND_IDX(-20)] = 0x0225,
  7240. [BEND_IDX(-25)] = 0x0225,
  7241. [BEND_IDX(-30)] = 0x0325,
  7242. [BEND_IDX(-35)] = 0x0325,
  7243. [BEND_IDX(-40)] = 0x0425,
  7244. [BEND_IDX(-45)] = 0x0425,
  7245. [BEND_IDX(-50)] = 0x0525,
  7246. };
  7247. /*
  7248. * Bend CLKOUT_DP
  7249. * steps -50 to 50 inclusive, in steps of 5
  7250. * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
  7251. * change in clock period = -(steps / 10) * 5.787 ps
  7252. */
  7253. static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
  7254. {
  7255. uint32_t tmp;
  7256. int idx = BEND_IDX(steps);
  7257. if (WARN_ON(steps % 5 != 0))
  7258. return;
  7259. if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
  7260. return;
  7261. mutex_lock(&dev_priv->sb_lock);
  7262. if (steps % 10 != 0)
  7263. tmp = 0xAAAAAAAB;
  7264. else
  7265. tmp = 0x00000000;
  7266. intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
  7267. tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
  7268. tmp &= 0xffff0000;
  7269. tmp |= sscdivintphase[idx];
  7270. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
  7271. mutex_unlock(&dev_priv->sb_lock);
  7272. }
  7273. #undef BEND_IDX
  7274. static void lpt_init_pch_refclk(struct drm_device *dev)
  7275. {
  7276. struct intel_encoder *encoder;
  7277. bool has_vga = false;
  7278. for_each_intel_encoder(dev, encoder) {
  7279. switch (encoder->type) {
  7280. case INTEL_OUTPUT_ANALOG:
  7281. has_vga = true;
  7282. break;
  7283. default:
  7284. break;
  7285. }
  7286. }
  7287. if (has_vga) {
  7288. lpt_bend_clkout_dp(to_i915(dev), 0);
  7289. lpt_enable_clkout_dp(dev, true, true);
  7290. } else {
  7291. lpt_disable_clkout_dp(dev);
  7292. }
  7293. }
  7294. /*
  7295. * Initialize reference clocks when the driver loads
  7296. */
  7297. void intel_init_pch_refclk(struct drm_device *dev)
  7298. {
  7299. if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
  7300. ironlake_init_pch_refclk(dev);
  7301. else if (HAS_PCH_LPT(dev))
  7302. lpt_init_pch_refclk(dev);
  7303. }
  7304. static void ironlake_set_pipeconf(struct drm_crtc *crtc)
  7305. {
  7306. struct drm_i915_private *dev_priv = crtc->dev->dev_private;
  7307. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7308. int pipe = intel_crtc->pipe;
  7309. uint32_t val;
  7310. val = 0;
  7311. switch (intel_crtc->config->pipe_bpp) {
  7312. case 18:
  7313. val |= PIPECONF_6BPC;
  7314. break;
  7315. case 24:
  7316. val |= PIPECONF_8BPC;
  7317. break;
  7318. case 30:
  7319. val |= PIPECONF_10BPC;
  7320. break;
  7321. case 36:
  7322. val |= PIPECONF_12BPC;
  7323. break;
  7324. default:
  7325. /* Case prevented by intel_choose_pipe_bpp_dither. */
  7326. BUG();
  7327. }
  7328. if (intel_crtc->config->dither)
  7329. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7330. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7331. val |= PIPECONF_INTERLACED_ILK;
  7332. else
  7333. val |= PIPECONF_PROGRESSIVE;
  7334. if (intel_crtc->config->limited_color_range)
  7335. val |= PIPECONF_COLOR_RANGE_SELECT;
  7336. I915_WRITE(PIPECONF(pipe), val);
  7337. POSTING_READ(PIPECONF(pipe));
  7338. }
  7339. static void haswell_set_pipeconf(struct drm_crtc *crtc)
  7340. {
  7341. struct drm_i915_private *dev_priv = crtc->dev->dev_private;
  7342. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7343. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  7344. u32 val = 0;
  7345. if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
  7346. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7347. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7348. val |= PIPECONF_INTERLACED_ILK;
  7349. else
  7350. val |= PIPECONF_PROGRESSIVE;
  7351. I915_WRITE(PIPECONF(cpu_transcoder), val);
  7352. POSTING_READ(PIPECONF(cpu_transcoder));
  7353. }
  7354. static void haswell_set_pipemisc(struct drm_crtc *crtc)
  7355. {
  7356. struct drm_i915_private *dev_priv = crtc->dev->dev_private;
  7357. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7358. if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
  7359. u32 val = 0;
  7360. switch (intel_crtc->config->pipe_bpp) {
  7361. case 18:
  7362. val |= PIPEMISC_DITHER_6_BPC;
  7363. break;
  7364. case 24:
  7365. val |= PIPEMISC_DITHER_8_BPC;
  7366. break;
  7367. case 30:
  7368. val |= PIPEMISC_DITHER_10_BPC;
  7369. break;
  7370. case 36:
  7371. val |= PIPEMISC_DITHER_12_BPC;
  7372. break;
  7373. default:
  7374. /* Case prevented by pipe_config_set_bpp. */
  7375. BUG();
  7376. }
  7377. if (intel_crtc->config->dither)
  7378. val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
  7379. I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
  7380. }
  7381. }
  7382. int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
  7383. {
  7384. /*
  7385. * Account for spread spectrum to avoid
  7386. * oversubscribing the link. Max center spread
  7387. * is 2.5%; use 5% for safety's sake.
  7388. */
  7389. u32 bps = target_clock * bpp * 21 / 20;
  7390. return DIV_ROUND_UP(bps, link_bw * 8);
  7391. }
  7392. static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
  7393. {
  7394. return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
  7395. }
  7396. static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
  7397. struct intel_crtc_state *crtc_state,
  7398. intel_clock_t *reduced_clock)
  7399. {
  7400. struct drm_crtc *crtc = &intel_crtc->base;
  7401. struct drm_device *dev = crtc->dev;
  7402. struct drm_i915_private *dev_priv = dev->dev_private;
  7403. struct drm_atomic_state *state = crtc_state->base.state;
  7404. struct drm_connector *connector;
  7405. struct drm_connector_state *connector_state;
  7406. struct intel_encoder *encoder;
  7407. u32 dpll, fp, fp2;
  7408. int factor, i;
  7409. bool is_lvds = false, is_sdvo = false;
  7410. for_each_connector_in_state(state, connector, connector_state, i) {
  7411. if (connector_state->crtc != crtc_state->base.crtc)
  7412. continue;
  7413. encoder = to_intel_encoder(connector_state->best_encoder);
  7414. switch (encoder->type) {
  7415. case INTEL_OUTPUT_LVDS:
  7416. is_lvds = true;
  7417. break;
  7418. case INTEL_OUTPUT_SDVO:
  7419. case INTEL_OUTPUT_HDMI:
  7420. is_sdvo = true;
  7421. break;
  7422. default:
  7423. break;
  7424. }
  7425. }
  7426. /* Enable autotuning of the PLL clock (if permissible) */
  7427. factor = 21;
  7428. if (is_lvds) {
  7429. if ((intel_panel_use_ssc(dev_priv) &&
  7430. dev_priv->vbt.lvds_ssc_freq == 100000) ||
  7431. (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
  7432. factor = 25;
  7433. } else if (crtc_state->sdvo_tv_clock)
  7434. factor = 20;
  7435. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  7436. if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
  7437. fp |= FP_CB_TUNE;
  7438. if (reduced_clock) {
  7439. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  7440. if (reduced_clock->m < factor * reduced_clock->n)
  7441. fp2 |= FP_CB_TUNE;
  7442. } else {
  7443. fp2 = fp;
  7444. }
  7445. dpll = 0;
  7446. if (is_lvds)
  7447. dpll |= DPLLB_MODE_LVDS;
  7448. else
  7449. dpll |= DPLLB_MODE_DAC_SERIAL;
  7450. dpll |= (crtc_state->pixel_multiplier - 1)
  7451. << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
  7452. if (is_sdvo)
  7453. dpll |= DPLL_SDVO_HIGH_SPEED;
  7454. if (crtc_state->has_dp_encoder)
  7455. dpll |= DPLL_SDVO_HIGH_SPEED;
  7456. /* compute bitmask from p1 value */
  7457. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  7458. /* also FPA1 */
  7459. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  7460. switch (crtc_state->dpll.p2) {
  7461. case 5:
  7462. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  7463. break;
  7464. case 7:
  7465. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  7466. break;
  7467. case 10:
  7468. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  7469. break;
  7470. case 14:
  7471. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  7472. break;
  7473. }
  7474. if (is_lvds && intel_panel_use_ssc(dev_priv))
  7475. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  7476. else
  7477. dpll |= PLL_REF_INPUT_DREFCLK;
  7478. dpll |= DPLL_VCO_ENABLE;
  7479. crtc_state->dpll_hw_state.dpll = dpll;
  7480. crtc_state->dpll_hw_state.fp0 = fp;
  7481. crtc_state->dpll_hw_state.fp1 = fp2;
  7482. }
  7483. static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
  7484. struct intel_crtc_state *crtc_state)
  7485. {
  7486. struct drm_device *dev = crtc->base.dev;
  7487. struct drm_i915_private *dev_priv = dev->dev_private;
  7488. intel_clock_t reduced_clock;
  7489. bool has_reduced_clock = false;
  7490. struct intel_shared_dpll *pll;
  7491. const intel_limit_t *limit;
  7492. int refclk = 120000;
  7493. memset(&crtc_state->dpll_hw_state, 0,
  7494. sizeof(crtc_state->dpll_hw_state));
  7495. crtc->lowfreq_avail = false;
  7496. /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
  7497. if (!crtc_state->has_pch_encoder)
  7498. return 0;
  7499. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7500. if (intel_panel_use_ssc(dev_priv)) {
  7501. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
  7502. dev_priv->vbt.lvds_ssc_freq);
  7503. refclk = dev_priv->vbt.lvds_ssc_freq;
  7504. }
  7505. if (intel_is_dual_link_lvds(dev)) {
  7506. if (refclk == 100000)
  7507. limit = &intel_limits_ironlake_dual_lvds_100m;
  7508. else
  7509. limit = &intel_limits_ironlake_dual_lvds;
  7510. } else {
  7511. if (refclk == 100000)
  7512. limit = &intel_limits_ironlake_single_lvds_100m;
  7513. else
  7514. limit = &intel_limits_ironlake_single_lvds;
  7515. }
  7516. } else {
  7517. limit = &intel_limits_ironlake_dac;
  7518. }
  7519. if (!crtc_state->clock_set &&
  7520. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7521. refclk, NULL, &crtc_state->dpll)) {
  7522. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7523. return -EINVAL;
  7524. }
  7525. ironlake_compute_dpll(crtc, crtc_state,
  7526. has_reduced_clock ? &reduced_clock : NULL);
  7527. pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
  7528. if (pll == NULL) {
  7529. DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
  7530. pipe_name(crtc->pipe));
  7531. return -EINVAL;
  7532. }
  7533. if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  7534. has_reduced_clock)
  7535. crtc->lowfreq_avail = true;
  7536. return 0;
  7537. }
  7538. static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
  7539. struct intel_link_m_n *m_n)
  7540. {
  7541. struct drm_device *dev = crtc->base.dev;
  7542. struct drm_i915_private *dev_priv = dev->dev_private;
  7543. enum pipe pipe = crtc->pipe;
  7544. m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
  7545. m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
  7546. m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
  7547. & ~TU_SIZE_MASK;
  7548. m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
  7549. m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
  7550. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  7551. }
  7552. static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
  7553. enum transcoder transcoder,
  7554. struct intel_link_m_n *m_n,
  7555. struct intel_link_m_n *m2_n2)
  7556. {
  7557. struct drm_device *dev = crtc->base.dev;
  7558. struct drm_i915_private *dev_priv = dev->dev_private;
  7559. enum pipe pipe = crtc->pipe;
  7560. if (INTEL_INFO(dev)->gen >= 5) {
  7561. m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
  7562. m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
  7563. m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
  7564. & ~TU_SIZE_MASK;
  7565. m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
  7566. m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
  7567. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  7568. /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
  7569. * gen < 8) and if DRRS is supported (to make sure the
  7570. * registers are not unnecessarily read).
  7571. */
  7572. if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
  7573. crtc->config->has_drrs) {
  7574. m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
  7575. m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
  7576. m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
  7577. & ~TU_SIZE_MASK;
  7578. m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
  7579. m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
  7580. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  7581. }
  7582. } else {
  7583. m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
  7584. m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
  7585. m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
  7586. & ~TU_SIZE_MASK;
  7587. m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
  7588. m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
  7589. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  7590. }
  7591. }
  7592. void intel_dp_get_m_n(struct intel_crtc *crtc,
  7593. struct intel_crtc_state *pipe_config)
  7594. {
  7595. if (pipe_config->has_pch_encoder)
  7596. intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
  7597. else
  7598. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  7599. &pipe_config->dp_m_n,
  7600. &pipe_config->dp_m2_n2);
  7601. }
  7602. static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
  7603. struct intel_crtc_state *pipe_config)
  7604. {
  7605. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  7606. &pipe_config->fdi_m_n, NULL);
  7607. }
  7608. static void skylake_get_pfit_config(struct intel_crtc *crtc,
  7609. struct intel_crtc_state *pipe_config)
  7610. {
  7611. struct drm_device *dev = crtc->base.dev;
  7612. struct drm_i915_private *dev_priv = dev->dev_private;
  7613. struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
  7614. uint32_t ps_ctrl = 0;
  7615. int id = -1;
  7616. int i;
  7617. /* find scaler attached to this pipe */
  7618. for (i = 0; i < crtc->num_scalers; i++) {
  7619. ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
  7620. if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
  7621. id = i;
  7622. pipe_config->pch_pfit.enabled = true;
  7623. pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
  7624. pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
  7625. break;
  7626. }
  7627. }
  7628. scaler_state->scaler_id = id;
  7629. if (id >= 0) {
  7630. scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
  7631. } else {
  7632. scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
  7633. }
  7634. }
  7635. static void
  7636. skylake_get_initial_plane_config(struct intel_crtc *crtc,
  7637. struct intel_initial_plane_config *plane_config)
  7638. {
  7639. struct drm_device *dev = crtc->base.dev;
  7640. struct drm_i915_private *dev_priv = dev->dev_private;
  7641. u32 val, base, offset, stride_mult, tiling;
  7642. int pipe = crtc->pipe;
  7643. int fourcc, pixel_format;
  7644. unsigned int aligned_height;
  7645. struct drm_framebuffer *fb;
  7646. struct intel_framebuffer *intel_fb;
  7647. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7648. if (!intel_fb) {
  7649. DRM_DEBUG_KMS("failed to alloc fb\n");
  7650. return;
  7651. }
  7652. fb = &intel_fb->base;
  7653. val = I915_READ(PLANE_CTL(pipe, 0));
  7654. if (!(val & PLANE_CTL_ENABLE))
  7655. goto error;
  7656. pixel_format = val & PLANE_CTL_FORMAT_MASK;
  7657. fourcc = skl_format_to_fourcc(pixel_format,
  7658. val & PLANE_CTL_ORDER_RGBX,
  7659. val & PLANE_CTL_ALPHA_MASK);
  7660. fb->pixel_format = fourcc;
  7661. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  7662. tiling = val & PLANE_CTL_TILED_MASK;
  7663. switch (tiling) {
  7664. case PLANE_CTL_TILED_LINEAR:
  7665. fb->modifier[0] = DRM_FORMAT_MOD_NONE;
  7666. break;
  7667. case PLANE_CTL_TILED_X:
  7668. plane_config->tiling = I915_TILING_X;
  7669. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  7670. break;
  7671. case PLANE_CTL_TILED_Y:
  7672. fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
  7673. break;
  7674. case PLANE_CTL_TILED_YF:
  7675. fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
  7676. break;
  7677. default:
  7678. MISSING_CASE(tiling);
  7679. goto error;
  7680. }
  7681. base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
  7682. plane_config->base = base;
  7683. offset = I915_READ(PLANE_OFFSET(pipe, 0));
  7684. val = I915_READ(PLANE_SIZE(pipe, 0));
  7685. fb->height = ((val >> 16) & 0xfff) + 1;
  7686. fb->width = ((val >> 0) & 0x1fff) + 1;
  7687. val = I915_READ(PLANE_STRIDE(pipe, 0));
  7688. stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
  7689. fb->pixel_format);
  7690. fb->pitches[0] = (val & 0x3ff) * stride_mult;
  7691. aligned_height = intel_fb_align_height(dev, fb->height,
  7692. fb->pixel_format,
  7693. fb->modifier[0]);
  7694. plane_config->size = fb->pitches[0] * aligned_height;
  7695. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  7696. pipe_name(pipe), fb->width, fb->height,
  7697. fb->bits_per_pixel, base, fb->pitches[0],
  7698. plane_config->size);
  7699. plane_config->fb = intel_fb;
  7700. return;
  7701. error:
  7702. kfree(fb);
  7703. }
  7704. static void ironlake_get_pfit_config(struct intel_crtc *crtc,
  7705. struct intel_crtc_state *pipe_config)
  7706. {
  7707. struct drm_device *dev = crtc->base.dev;
  7708. struct drm_i915_private *dev_priv = dev->dev_private;
  7709. uint32_t tmp;
  7710. tmp = I915_READ(PF_CTL(crtc->pipe));
  7711. if (tmp & PF_ENABLE) {
  7712. pipe_config->pch_pfit.enabled = true;
  7713. pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
  7714. pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
  7715. /* We currently do not free assignements of panel fitters on
  7716. * ivb/hsw (since we don't use the higher upscaling modes which
  7717. * differentiates them) so just WARN about this case for now. */
  7718. if (IS_GEN7(dev)) {
  7719. WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
  7720. PF_PIPE_SEL_IVB(crtc->pipe));
  7721. }
  7722. }
  7723. }
  7724. static void
  7725. ironlake_get_initial_plane_config(struct intel_crtc *crtc,
  7726. struct intel_initial_plane_config *plane_config)
  7727. {
  7728. struct drm_device *dev = crtc->base.dev;
  7729. struct drm_i915_private *dev_priv = dev->dev_private;
  7730. u32 val, base, offset;
  7731. int pipe = crtc->pipe;
  7732. int fourcc, pixel_format;
  7733. unsigned int aligned_height;
  7734. struct drm_framebuffer *fb;
  7735. struct intel_framebuffer *intel_fb;
  7736. val = I915_READ(DSPCNTR(pipe));
  7737. if (!(val & DISPLAY_PLANE_ENABLE))
  7738. return;
  7739. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7740. if (!intel_fb) {
  7741. DRM_DEBUG_KMS("failed to alloc fb\n");
  7742. return;
  7743. }
  7744. fb = &intel_fb->base;
  7745. if (INTEL_INFO(dev)->gen >= 4) {
  7746. if (val & DISPPLANE_TILED) {
  7747. plane_config->tiling = I915_TILING_X;
  7748. fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
  7749. }
  7750. }
  7751. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  7752. fourcc = i9xx_format_to_fourcc(pixel_format);
  7753. fb->pixel_format = fourcc;
  7754. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  7755. base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
  7756. if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  7757. offset = I915_READ(DSPOFFSET(pipe));
  7758. } else {
  7759. if (plane_config->tiling)
  7760. offset = I915_READ(DSPTILEOFF(pipe));
  7761. else
  7762. offset = I915_READ(DSPLINOFF(pipe));
  7763. }
  7764. plane_config->base = base;
  7765. val = I915_READ(PIPESRC(pipe));
  7766. fb->width = ((val >> 16) & 0xfff) + 1;
  7767. fb->height = ((val >> 0) & 0xfff) + 1;
  7768. val = I915_READ(DSPSTRIDE(pipe));
  7769. fb->pitches[0] = val & 0xffffffc0;
  7770. aligned_height = intel_fb_align_height(dev, fb->height,
  7771. fb->pixel_format,
  7772. fb->modifier[0]);
  7773. plane_config->size = fb->pitches[0] * aligned_height;
  7774. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  7775. pipe_name(pipe), fb->width, fb->height,
  7776. fb->bits_per_pixel, base, fb->pitches[0],
  7777. plane_config->size);
  7778. plane_config->fb = intel_fb;
  7779. }
  7780. static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  7781. struct intel_crtc_state *pipe_config)
  7782. {
  7783. struct drm_device *dev = crtc->base.dev;
  7784. struct drm_i915_private *dev_priv = dev->dev_private;
  7785. enum intel_display_power_domain power_domain;
  7786. uint32_t tmp;
  7787. bool ret;
  7788. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  7789. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7790. return false;
  7791. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  7792. pipe_config->shared_dpll = NULL;
  7793. ret = false;
  7794. tmp = I915_READ(PIPECONF(crtc->pipe));
  7795. if (!(tmp & PIPECONF_ENABLE))
  7796. goto out;
  7797. switch (tmp & PIPECONF_BPC_MASK) {
  7798. case PIPECONF_6BPC:
  7799. pipe_config->pipe_bpp = 18;
  7800. break;
  7801. case PIPECONF_8BPC:
  7802. pipe_config->pipe_bpp = 24;
  7803. break;
  7804. case PIPECONF_10BPC:
  7805. pipe_config->pipe_bpp = 30;
  7806. break;
  7807. case PIPECONF_12BPC:
  7808. pipe_config->pipe_bpp = 36;
  7809. break;
  7810. default:
  7811. break;
  7812. }
  7813. if (tmp & PIPECONF_COLOR_RANGE_SELECT)
  7814. pipe_config->limited_color_range = true;
  7815. if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
  7816. struct intel_shared_dpll *pll;
  7817. enum intel_dpll_id pll_id;
  7818. pipe_config->has_pch_encoder = true;
  7819. tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
  7820. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  7821. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  7822. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  7823. if (HAS_PCH_IBX(dev_priv)) {
  7824. pll_id = (enum intel_dpll_id) crtc->pipe;
  7825. } else {
  7826. tmp = I915_READ(PCH_DPLL_SEL);
  7827. if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
  7828. pll_id = DPLL_ID_PCH_PLL_B;
  7829. else
  7830. pll_id= DPLL_ID_PCH_PLL_A;
  7831. }
  7832. pipe_config->shared_dpll =
  7833. intel_get_shared_dpll_by_id(dev_priv, pll_id);
  7834. pll = pipe_config->shared_dpll;
  7835. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  7836. &pipe_config->dpll_hw_state));
  7837. tmp = pipe_config->dpll_hw_state.dpll;
  7838. pipe_config->pixel_multiplier =
  7839. ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
  7840. >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
  7841. ironlake_pch_clock_get(crtc, pipe_config);
  7842. } else {
  7843. pipe_config->pixel_multiplier = 1;
  7844. }
  7845. intel_get_pipe_timings(crtc, pipe_config);
  7846. intel_get_pipe_src_size(crtc, pipe_config);
  7847. ironlake_get_pfit_config(crtc, pipe_config);
  7848. ret = true;
  7849. out:
  7850. intel_display_power_put(dev_priv, power_domain);
  7851. return ret;
  7852. }
  7853. static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  7854. {
  7855. struct drm_device *dev = dev_priv->dev;
  7856. struct intel_crtc *crtc;
  7857. for_each_intel_crtc(dev, crtc)
  7858. I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
  7859. pipe_name(crtc->pipe));
  7860. I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
  7861. I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
  7862. I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
  7863. I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
  7864. I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
  7865. I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
  7866. "CPU PWM1 enabled\n");
  7867. if (IS_HASWELL(dev))
  7868. I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
  7869. "CPU PWM2 enabled\n");
  7870. I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
  7871. "PCH PWM1 enabled\n");
  7872. I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  7873. "Utility pin enabled\n");
  7874. I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
  7875. /*
  7876. * In theory we can still leave IRQs enabled, as long as only the HPD
  7877. * interrupts remain enabled. We used to check for that, but since it's
  7878. * gen-specific and since we only disable LCPLL after we fully disable
  7879. * the interrupts, the check below should be enough.
  7880. */
  7881. I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
  7882. }
  7883. static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
  7884. {
  7885. struct drm_device *dev = dev_priv->dev;
  7886. if (IS_HASWELL(dev))
  7887. return I915_READ(D_COMP_HSW);
  7888. else
  7889. return I915_READ(D_COMP_BDW);
  7890. }
  7891. static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
  7892. {
  7893. struct drm_device *dev = dev_priv->dev;
  7894. if (IS_HASWELL(dev)) {
  7895. mutex_lock(&dev_priv->rps.hw_lock);
  7896. if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
  7897. val))
  7898. DRM_ERROR("Failed to write to D_COMP\n");
  7899. mutex_unlock(&dev_priv->rps.hw_lock);
  7900. } else {
  7901. I915_WRITE(D_COMP_BDW, val);
  7902. POSTING_READ(D_COMP_BDW);
  7903. }
  7904. }
  7905. /*
  7906. * This function implements pieces of two sequences from BSpec:
  7907. * - Sequence for display software to disable LCPLL
  7908. * - Sequence for display software to allow package C8+
  7909. * The steps implemented here are just the steps that actually touch the LCPLL
  7910. * register. Callers should take care of disabling all the display engine
  7911. * functions, doing the mode unset, fixing interrupts, etc.
  7912. */
  7913. static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  7914. bool switch_to_fclk, bool allow_power_down)
  7915. {
  7916. uint32_t val;
  7917. assert_can_disable_lcpll(dev_priv);
  7918. val = I915_READ(LCPLL_CTL);
  7919. if (switch_to_fclk) {
  7920. val |= LCPLL_CD_SOURCE_FCLK;
  7921. I915_WRITE(LCPLL_CTL, val);
  7922. if (wait_for_us(I915_READ(LCPLL_CTL) &
  7923. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  7924. DRM_ERROR("Switching to FCLK failed\n");
  7925. val = I915_READ(LCPLL_CTL);
  7926. }
  7927. val |= LCPLL_PLL_DISABLE;
  7928. I915_WRITE(LCPLL_CTL, val);
  7929. POSTING_READ(LCPLL_CTL);
  7930. if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
  7931. DRM_ERROR("LCPLL still locked\n");
  7932. val = hsw_read_dcomp(dev_priv);
  7933. val |= D_COMP_COMP_DISABLE;
  7934. hsw_write_dcomp(dev_priv, val);
  7935. ndelay(100);
  7936. if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
  7937. 1))
  7938. DRM_ERROR("D_COMP RCOMP still in progress\n");
  7939. if (allow_power_down) {
  7940. val = I915_READ(LCPLL_CTL);
  7941. val |= LCPLL_POWER_DOWN_ALLOW;
  7942. I915_WRITE(LCPLL_CTL, val);
  7943. POSTING_READ(LCPLL_CTL);
  7944. }
  7945. }
  7946. /*
  7947. * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  7948. * source.
  7949. */
  7950. static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  7951. {
  7952. uint32_t val;
  7953. val = I915_READ(LCPLL_CTL);
  7954. if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
  7955. LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
  7956. return;
  7957. /*
  7958. * Make sure we're not on PC8 state before disabling PC8, otherwise
  7959. * we'll hang the machine. To prevent PC8 state, just enable force_wake.
  7960. */
  7961. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  7962. if (val & LCPLL_POWER_DOWN_ALLOW) {
  7963. val &= ~LCPLL_POWER_DOWN_ALLOW;
  7964. I915_WRITE(LCPLL_CTL, val);
  7965. POSTING_READ(LCPLL_CTL);
  7966. }
  7967. val = hsw_read_dcomp(dev_priv);
  7968. val |= D_COMP_COMP_FORCE;
  7969. val &= ~D_COMP_COMP_DISABLE;
  7970. hsw_write_dcomp(dev_priv, val);
  7971. val = I915_READ(LCPLL_CTL);
  7972. val &= ~LCPLL_PLL_DISABLE;
  7973. I915_WRITE(LCPLL_CTL, val);
  7974. if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
  7975. DRM_ERROR("LCPLL not locked yet\n");
  7976. if (val & LCPLL_CD_SOURCE_FCLK) {
  7977. val = I915_READ(LCPLL_CTL);
  7978. val &= ~LCPLL_CD_SOURCE_FCLK;
  7979. I915_WRITE(LCPLL_CTL, val);
  7980. if (wait_for_us((I915_READ(LCPLL_CTL) &
  7981. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  7982. DRM_ERROR("Switching back to LCPLL failed\n");
  7983. }
  7984. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  7985. intel_update_cdclk(dev_priv->dev);
  7986. }
  7987. /*
  7988. * Package states C8 and deeper are really deep PC states that can only be
  7989. * reached when all the devices on the system allow it, so even if the graphics
  7990. * device allows PC8+, it doesn't mean the system will actually get to these
  7991. * states. Our driver only allows PC8+ when going into runtime PM.
  7992. *
  7993. * The requirements for PC8+ are that all the outputs are disabled, the power
  7994. * well is disabled and most interrupts are disabled, and these are also
  7995. * requirements for runtime PM. When these conditions are met, we manually do
  7996. * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
  7997. * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
  7998. * hang the machine.
  7999. *
  8000. * When we really reach PC8 or deeper states (not just when we allow it) we lose
  8001. * the state of some registers, so when we come back from PC8+ we need to
  8002. * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
  8003. * need to take care of the registers kept by RC6. Notice that this happens even
  8004. * if we don't put the device in PCI D3 state (which is what currently happens
  8005. * because of the runtime PM support).
  8006. *
  8007. * For more, read "Display Sequences for Package C8" on the hardware
  8008. * documentation.
  8009. */
  8010. void hsw_enable_pc8(struct drm_i915_private *dev_priv)
  8011. {
  8012. struct drm_device *dev = dev_priv->dev;
  8013. uint32_t val;
  8014. DRM_DEBUG_KMS("Enabling package C8+\n");
  8015. if (HAS_PCH_LPT_LP(dev)) {
  8016. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8017. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  8018. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8019. }
  8020. lpt_disable_clkout_dp(dev);
  8021. hsw_disable_lcpll(dev_priv, true, true);
  8022. }
  8023. void hsw_disable_pc8(struct drm_i915_private *dev_priv)
  8024. {
  8025. struct drm_device *dev = dev_priv->dev;
  8026. uint32_t val;
  8027. DRM_DEBUG_KMS("Disabling package C8+\n");
  8028. hsw_restore_lcpll(dev_priv);
  8029. lpt_init_pch_refclk(dev);
  8030. if (HAS_PCH_LPT_LP(dev)) {
  8031. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8032. val |= PCH_LP_PARTITION_LEVEL_DISABLE;
  8033. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8034. }
  8035. }
  8036. static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8037. {
  8038. struct drm_device *dev = old_state->dev;
  8039. struct intel_atomic_state *old_intel_state =
  8040. to_intel_atomic_state(old_state);
  8041. unsigned int req_cdclk = old_intel_state->dev_cdclk;
  8042. broxton_set_cdclk(to_i915(dev), req_cdclk);
  8043. }
  8044. /* compute the max rate for new configuration */
  8045. static int ilk_max_pixel_rate(struct drm_atomic_state *state)
  8046. {
  8047. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8048. struct drm_i915_private *dev_priv = state->dev->dev_private;
  8049. struct drm_crtc *crtc;
  8050. struct drm_crtc_state *cstate;
  8051. struct intel_crtc_state *crtc_state;
  8052. unsigned max_pixel_rate = 0, i;
  8053. enum pipe pipe;
  8054. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  8055. sizeof(intel_state->min_pixclk));
  8056. for_each_crtc_in_state(state, crtc, cstate, i) {
  8057. int pixel_rate;
  8058. crtc_state = to_intel_crtc_state(cstate);
  8059. if (!crtc_state->base.enable) {
  8060. intel_state->min_pixclk[i] = 0;
  8061. continue;
  8062. }
  8063. pixel_rate = ilk_pipe_pixel_rate(crtc_state);
  8064. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  8065. if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
  8066. pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
  8067. intel_state->min_pixclk[i] = pixel_rate;
  8068. }
  8069. for_each_pipe(dev_priv, pipe)
  8070. max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
  8071. return max_pixel_rate;
  8072. }
  8073. static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
  8074. {
  8075. struct drm_i915_private *dev_priv = dev->dev_private;
  8076. uint32_t val, data;
  8077. int ret;
  8078. if (WARN((I915_READ(LCPLL_CTL) &
  8079. (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
  8080. LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
  8081. LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
  8082. LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
  8083. "trying to change cdclk frequency with cdclk not enabled\n"))
  8084. return;
  8085. mutex_lock(&dev_priv->rps.hw_lock);
  8086. ret = sandybridge_pcode_write(dev_priv,
  8087. BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
  8088. mutex_unlock(&dev_priv->rps.hw_lock);
  8089. if (ret) {
  8090. DRM_ERROR("failed to inform pcode about cdclk change\n");
  8091. return;
  8092. }
  8093. val = I915_READ(LCPLL_CTL);
  8094. val |= LCPLL_CD_SOURCE_FCLK;
  8095. I915_WRITE(LCPLL_CTL, val);
  8096. if (wait_for_us(I915_READ(LCPLL_CTL) &
  8097. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  8098. DRM_ERROR("Switching to FCLK failed\n");
  8099. val = I915_READ(LCPLL_CTL);
  8100. val &= ~LCPLL_CLK_FREQ_MASK;
  8101. switch (cdclk) {
  8102. case 450000:
  8103. val |= LCPLL_CLK_FREQ_450;
  8104. data = 0;
  8105. break;
  8106. case 540000:
  8107. val |= LCPLL_CLK_FREQ_54O_BDW;
  8108. data = 1;
  8109. break;
  8110. case 337500:
  8111. val |= LCPLL_CLK_FREQ_337_5_BDW;
  8112. data = 2;
  8113. break;
  8114. case 675000:
  8115. val |= LCPLL_CLK_FREQ_675_BDW;
  8116. data = 3;
  8117. break;
  8118. default:
  8119. WARN(1, "invalid cdclk frequency\n");
  8120. return;
  8121. }
  8122. I915_WRITE(LCPLL_CTL, val);
  8123. val = I915_READ(LCPLL_CTL);
  8124. val &= ~LCPLL_CD_SOURCE_FCLK;
  8125. I915_WRITE(LCPLL_CTL, val);
  8126. if (wait_for_us((I915_READ(LCPLL_CTL) &
  8127. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  8128. DRM_ERROR("Switching back to LCPLL failed\n");
  8129. mutex_lock(&dev_priv->rps.hw_lock);
  8130. sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
  8131. mutex_unlock(&dev_priv->rps.hw_lock);
  8132. I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
  8133. intel_update_cdclk(dev);
  8134. WARN(cdclk != dev_priv->cdclk_freq,
  8135. "cdclk requested %d kHz but got %d kHz\n",
  8136. cdclk, dev_priv->cdclk_freq);
  8137. }
  8138. static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
  8139. {
  8140. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8141. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8142. int max_pixclk = ilk_max_pixel_rate(state);
  8143. int cdclk;
  8144. /*
  8145. * FIXME should also account for plane ratio
  8146. * once 64bpp pixel formats are supported.
  8147. */
  8148. if (max_pixclk > 540000)
  8149. cdclk = 675000;
  8150. else if (max_pixclk > 450000)
  8151. cdclk = 540000;
  8152. else if (max_pixclk > 337500)
  8153. cdclk = 450000;
  8154. else
  8155. cdclk = 337500;
  8156. if (cdclk > dev_priv->max_cdclk_freq) {
  8157. DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  8158. cdclk, dev_priv->max_cdclk_freq);
  8159. return -EINVAL;
  8160. }
  8161. intel_state->cdclk = intel_state->dev_cdclk = cdclk;
  8162. if (!intel_state->active_crtcs)
  8163. intel_state->dev_cdclk = 337500;
  8164. return 0;
  8165. }
  8166. static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8167. {
  8168. struct drm_device *dev = old_state->dev;
  8169. struct intel_atomic_state *old_intel_state =
  8170. to_intel_atomic_state(old_state);
  8171. unsigned req_cdclk = old_intel_state->dev_cdclk;
  8172. broadwell_set_cdclk(dev, req_cdclk);
  8173. }
  8174. static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
  8175. struct intel_crtc_state *crtc_state)
  8176. {
  8177. struct intel_encoder *intel_encoder =
  8178. intel_ddi_get_crtc_new_encoder(crtc_state);
  8179. if (intel_encoder->type != INTEL_OUTPUT_DSI) {
  8180. if (!intel_ddi_pll_select(crtc, crtc_state))
  8181. return -EINVAL;
  8182. }
  8183. crtc->lowfreq_avail = false;
  8184. return 0;
  8185. }
  8186. static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
  8187. enum port port,
  8188. struct intel_crtc_state *pipe_config)
  8189. {
  8190. enum intel_dpll_id id;
  8191. switch (port) {
  8192. case PORT_A:
  8193. pipe_config->ddi_pll_sel = SKL_DPLL0;
  8194. id = DPLL_ID_SKL_DPLL0;
  8195. break;
  8196. case PORT_B:
  8197. pipe_config->ddi_pll_sel = SKL_DPLL1;
  8198. id = DPLL_ID_SKL_DPLL1;
  8199. break;
  8200. case PORT_C:
  8201. pipe_config->ddi_pll_sel = SKL_DPLL2;
  8202. id = DPLL_ID_SKL_DPLL2;
  8203. break;
  8204. default:
  8205. DRM_ERROR("Incorrect port type\n");
  8206. return;
  8207. }
  8208. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8209. }
  8210. static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
  8211. enum port port,
  8212. struct intel_crtc_state *pipe_config)
  8213. {
  8214. enum intel_dpll_id id;
  8215. u32 temp;
  8216. temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
  8217. pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
  8218. switch (pipe_config->ddi_pll_sel) {
  8219. case SKL_DPLL0:
  8220. id = DPLL_ID_SKL_DPLL0;
  8221. break;
  8222. case SKL_DPLL1:
  8223. id = DPLL_ID_SKL_DPLL1;
  8224. break;
  8225. case SKL_DPLL2:
  8226. id = DPLL_ID_SKL_DPLL2;
  8227. break;
  8228. case SKL_DPLL3:
  8229. id = DPLL_ID_SKL_DPLL3;
  8230. break;
  8231. default:
  8232. MISSING_CASE(pipe_config->ddi_pll_sel);
  8233. return;
  8234. }
  8235. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8236. }
  8237. static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
  8238. enum port port,
  8239. struct intel_crtc_state *pipe_config)
  8240. {
  8241. enum intel_dpll_id id;
  8242. pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
  8243. switch (pipe_config->ddi_pll_sel) {
  8244. case PORT_CLK_SEL_WRPLL1:
  8245. id = DPLL_ID_WRPLL1;
  8246. break;
  8247. case PORT_CLK_SEL_WRPLL2:
  8248. id = DPLL_ID_WRPLL2;
  8249. break;
  8250. case PORT_CLK_SEL_SPLL:
  8251. id = DPLL_ID_SPLL;
  8252. break;
  8253. case PORT_CLK_SEL_LCPLL_810:
  8254. id = DPLL_ID_LCPLL_810;
  8255. break;
  8256. case PORT_CLK_SEL_LCPLL_1350:
  8257. id = DPLL_ID_LCPLL_1350;
  8258. break;
  8259. case PORT_CLK_SEL_LCPLL_2700:
  8260. id = DPLL_ID_LCPLL_2700;
  8261. break;
  8262. default:
  8263. MISSING_CASE(pipe_config->ddi_pll_sel);
  8264. /* fall through */
  8265. case PORT_CLK_SEL_NONE:
  8266. return;
  8267. }
  8268. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8269. }
  8270. static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  8271. struct intel_crtc_state *pipe_config,
  8272. unsigned long *power_domain_mask)
  8273. {
  8274. struct drm_device *dev = crtc->base.dev;
  8275. struct drm_i915_private *dev_priv = dev->dev_private;
  8276. enum intel_display_power_domain power_domain;
  8277. u32 tmp;
  8278. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  8279. /*
  8280. * XXX: Do intel_display_power_get_if_enabled before reading this (for
  8281. * consistency and less surprising code; it's in always on power).
  8282. */
  8283. tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
  8284. if (tmp & TRANS_DDI_FUNC_ENABLE) {
  8285. enum pipe trans_edp_pipe;
  8286. switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
  8287. default:
  8288. WARN(1, "unknown pipe linked to edp transcoder\n");
  8289. case TRANS_DDI_EDP_INPUT_A_ONOFF:
  8290. case TRANS_DDI_EDP_INPUT_A_ON:
  8291. trans_edp_pipe = PIPE_A;
  8292. break;
  8293. case TRANS_DDI_EDP_INPUT_B_ONOFF:
  8294. trans_edp_pipe = PIPE_B;
  8295. break;
  8296. case TRANS_DDI_EDP_INPUT_C_ONOFF:
  8297. trans_edp_pipe = PIPE_C;
  8298. break;
  8299. }
  8300. if (trans_edp_pipe == crtc->pipe)
  8301. pipe_config->cpu_transcoder = TRANSCODER_EDP;
  8302. }
  8303. power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
  8304. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8305. return false;
  8306. *power_domain_mask |= BIT(power_domain);
  8307. tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
  8308. return tmp & PIPECONF_ENABLE;
  8309. }
  8310. static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  8311. struct intel_crtc_state *pipe_config,
  8312. unsigned long *power_domain_mask)
  8313. {
  8314. struct drm_device *dev = crtc->base.dev;
  8315. struct drm_i915_private *dev_priv = dev->dev_private;
  8316. enum intel_display_power_domain power_domain;
  8317. enum port port;
  8318. enum transcoder cpu_transcoder;
  8319. u32 tmp;
  8320. pipe_config->has_dsi_encoder = false;
  8321. for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
  8322. if (port == PORT_A)
  8323. cpu_transcoder = TRANSCODER_DSI_A;
  8324. else
  8325. cpu_transcoder = TRANSCODER_DSI_C;
  8326. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  8327. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8328. continue;
  8329. *power_domain_mask |= BIT(power_domain);
  8330. /*
  8331. * The PLL needs to be enabled with a valid divider
  8332. * configuration, otherwise accessing DSI registers will hang
  8333. * the machine. See BSpec North Display Engine
  8334. * registers/MIPI[BXT]. We can break out here early, since we
  8335. * need the same DSI PLL to be enabled for both DSI ports.
  8336. */
  8337. if (!intel_dsi_pll_is_enabled(dev_priv))
  8338. break;
  8339. /* XXX: this works for video mode only */
  8340. tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
  8341. if (!(tmp & DPI_ENABLE))
  8342. continue;
  8343. tmp = I915_READ(MIPI_CTRL(port));
  8344. if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
  8345. continue;
  8346. pipe_config->cpu_transcoder = cpu_transcoder;
  8347. pipe_config->has_dsi_encoder = true;
  8348. break;
  8349. }
  8350. return pipe_config->has_dsi_encoder;
  8351. }
  8352. static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  8353. struct intel_crtc_state *pipe_config)
  8354. {
  8355. struct drm_device *dev = crtc->base.dev;
  8356. struct drm_i915_private *dev_priv = dev->dev_private;
  8357. struct intel_shared_dpll *pll;
  8358. enum port port;
  8359. uint32_t tmp;
  8360. tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
  8361. port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
  8362. if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
  8363. skylake_get_ddi_pll(dev_priv, port, pipe_config);
  8364. else if (IS_BROXTON(dev))
  8365. bxt_get_ddi_pll(dev_priv, port, pipe_config);
  8366. else
  8367. haswell_get_ddi_pll(dev_priv, port, pipe_config);
  8368. pll = pipe_config->shared_dpll;
  8369. if (pll) {
  8370. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  8371. &pipe_config->dpll_hw_state));
  8372. }
  8373. /*
  8374. * Haswell has only FDI/PCH transcoder A. It is which is connected to
  8375. * DDI E. So just check whether this pipe is wired to DDI E and whether
  8376. * the PCH transcoder is on.
  8377. */
  8378. if (INTEL_INFO(dev)->gen < 9 &&
  8379. (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
  8380. pipe_config->has_pch_encoder = true;
  8381. tmp = I915_READ(FDI_RX_CTL(PIPE_A));
  8382. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  8383. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  8384. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  8385. }
  8386. }
  8387. static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  8388. struct intel_crtc_state *pipe_config)
  8389. {
  8390. struct drm_device *dev = crtc->base.dev;
  8391. struct drm_i915_private *dev_priv = dev->dev_private;
  8392. enum intel_display_power_domain power_domain;
  8393. unsigned long power_domain_mask;
  8394. bool active;
  8395. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  8396. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8397. return false;
  8398. power_domain_mask = BIT(power_domain);
  8399. pipe_config->shared_dpll = NULL;
  8400. active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
  8401. if (IS_BROXTON(dev_priv)) {
  8402. bxt_get_dsi_transcoder_state(crtc, pipe_config,
  8403. &power_domain_mask);
  8404. WARN_ON(active && pipe_config->has_dsi_encoder);
  8405. if (pipe_config->has_dsi_encoder)
  8406. active = true;
  8407. }
  8408. if (!active)
  8409. goto out;
  8410. if (!pipe_config->has_dsi_encoder) {
  8411. haswell_get_ddi_port_state(crtc, pipe_config);
  8412. intel_get_pipe_timings(crtc, pipe_config);
  8413. }
  8414. intel_get_pipe_src_size(crtc, pipe_config);
  8415. pipe_config->gamma_mode =
  8416. I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
  8417. if (INTEL_INFO(dev)->gen >= 9) {
  8418. skl_init_scalers(dev, crtc, pipe_config);
  8419. }
  8420. if (INTEL_INFO(dev)->gen >= 9) {
  8421. pipe_config->scaler_state.scaler_id = -1;
  8422. pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
  8423. }
  8424. power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
  8425. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  8426. power_domain_mask |= BIT(power_domain);
  8427. if (INTEL_INFO(dev)->gen >= 9)
  8428. skylake_get_pfit_config(crtc, pipe_config);
  8429. else
  8430. ironlake_get_pfit_config(crtc, pipe_config);
  8431. }
  8432. if (IS_HASWELL(dev))
  8433. pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
  8434. (I915_READ(IPS_CTL) & IPS_ENABLE);
  8435. if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
  8436. !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  8437. pipe_config->pixel_multiplier =
  8438. I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
  8439. } else {
  8440. pipe_config->pixel_multiplier = 1;
  8441. }
  8442. out:
  8443. for_each_power_domain(power_domain, power_domain_mask)
  8444. intel_display_power_put(dev_priv, power_domain);
  8445. return active;
  8446. }
  8447. static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
  8448. const struct intel_plane_state *plane_state)
  8449. {
  8450. struct drm_device *dev = crtc->dev;
  8451. struct drm_i915_private *dev_priv = dev->dev_private;
  8452. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8453. uint32_t cntl = 0, size = 0;
  8454. if (plane_state && plane_state->visible) {
  8455. unsigned int width = plane_state->base.crtc_w;
  8456. unsigned int height = plane_state->base.crtc_h;
  8457. unsigned int stride = roundup_pow_of_two(width) * 4;
  8458. switch (stride) {
  8459. default:
  8460. WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
  8461. width, stride);
  8462. stride = 256;
  8463. /* fallthrough */
  8464. case 256:
  8465. case 512:
  8466. case 1024:
  8467. case 2048:
  8468. break;
  8469. }
  8470. cntl |= CURSOR_ENABLE |
  8471. CURSOR_GAMMA_ENABLE |
  8472. CURSOR_FORMAT_ARGB |
  8473. CURSOR_STRIDE(stride);
  8474. size = (height << 12) | width;
  8475. }
  8476. if (intel_crtc->cursor_cntl != 0 &&
  8477. (intel_crtc->cursor_base != base ||
  8478. intel_crtc->cursor_size != size ||
  8479. intel_crtc->cursor_cntl != cntl)) {
  8480. /* On these chipsets we can only modify the base/size/stride
  8481. * whilst the cursor is disabled.
  8482. */
  8483. I915_WRITE(CURCNTR(PIPE_A), 0);
  8484. POSTING_READ(CURCNTR(PIPE_A));
  8485. intel_crtc->cursor_cntl = 0;
  8486. }
  8487. if (intel_crtc->cursor_base != base) {
  8488. I915_WRITE(CURBASE(PIPE_A), base);
  8489. intel_crtc->cursor_base = base;
  8490. }
  8491. if (intel_crtc->cursor_size != size) {
  8492. I915_WRITE(CURSIZE, size);
  8493. intel_crtc->cursor_size = size;
  8494. }
  8495. if (intel_crtc->cursor_cntl != cntl) {
  8496. I915_WRITE(CURCNTR(PIPE_A), cntl);
  8497. POSTING_READ(CURCNTR(PIPE_A));
  8498. intel_crtc->cursor_cntl = cntl;
  8499. }
  8500. }
  8501. static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
  8502. const struct intel_plane_state *plane_state)
  8503. {
  8504. struct drm_device *dev = crtc->dev;
  8505. struct drm_i915_private *dev_priv = dev->dev_private;
  8506. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8507. int pipe = intel_crtc->pipe;
  8508. uint32_t cntl = 0;
  8509. if (plane_state && plane_state->visible) {
  8510. cntl = MCURSOR_GAMMA_ENABLE;
  8511. switch (plane_state->base.crtc_w) {
  8512. case 64:
  8513. cntl |= CURSOR_MODE_64_ARGB_AX;
  8514. break;
  8515. case 128:
  8516. cntl |= CURSOR_MODE_128_ARGB_AX;
  8517. break;
  8518. case 256:
  8519. cntl |= CURSOR_MODE_256_ARGB_AX;
  8520. break;
  8521. default:
  8522. MISSING_CASE(plane_state->base.crtc_w);
  8523. return;
  8524. }
  8525. cntl |= pipe << 28; /* Connect to correct pipe */
  8526. if (HAS_DDI(dev))
  8527. cntl |= CURSOR_PIPE_CSC_ENABLE;
  8528. if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
  8529. cntl |= CURSOR_ROTATE_180;
  8530. }
  8531. if (intel_crtc->cursor_cntl != cntl) {
  8532. I915_WRITE(CURCNTR(pipe), cntl);
  8533. POSTING_READ(CURCNTR(pipe));
  8534. intel_crtc->cursor_cntl = cntl;
  8535. }
  8536. /* and commit changes on next vblank */
  8537. I915_WRITE(CURBASE(pipe), base);
  8538. POSTING_READ(CURBASE(pipe));
  8539. intel_crtc->cursor_base = base;
  8540. }
  8541. /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
  8542. static void intel_crtc_update_cursor(struct drm_crtc *crtc,
  8543. const struct intel_plane_state *plane_state)
  8544. {
  8545. struct drm_device *dev = crtc->dev;
  8546. struct drm_i915_private *dev_priv = dev->dev_private;
  8547. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8548. int pipe = intel_crtc->pipe;
  8549. u32 base = intel_crtc->cursor_addr;
  8550. u32 pos = 0;
  8551. if (plane_state) {
  8552. int x = plane_state->base.crtc_x;
  8553. int y = plane_state->base.crtc_y;
  8554. if (x < 0) {
  8555. pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
  8556. x = -x;
  8557. }
  8558. pos |= x << CURSOR_X_SHIFT;
  8559. if (y < 0) {
  8560. pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
  8561. y = -y;
  8562. }
  8563. pos |= y << CURSOR_Y_SHIFT;
  8564. /* ILK+ do this automagically */
  8565. if (HAS_GMCH_DISPLAY(dev) &&
  8566. plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
  8567. base += (plane_state->base.crtc_h *
  8568. plane_state->base.crtc_w - 1) * 4;
  8569. }
  8570. }
  8571. I915_WRITE(CURPOS(pipe), pos);
  8572. if (IS_845G(dev) || IS_I865G(dev))
  8573. i845_update_cursor(crtc, base, plane_state);
  8574. else
  8575. i9xx_update_cursor(crtc, base, plane_state);
  8576. }
  8577. static bool cursor_size_ok(struct drm_device *dev,
  8578. uint32_t width, uint32_t height)
  8579. {
  8580. if (width == 0 || height == 0)
  8581. return false;
  8582. /*
  8583. * 845g/865g are special in that they are only limited by
  8584. * the width of their cursors, the height is arbitrary up to
  8585. * the precision of the register. Everything else requires
  8586. * square cursors, limited to a few power-of-two sizes.
  8587. */
  8588. if (IS_845G(dev) || IS_I865G(dev)) {
  8589. if ((width & 63) != 0)
  8590. return false;
  8591. if (width > (IS_845G(dev) ? 64 : 512))
  8592. return false;
  8593. if (height > 1023)
  8594. return false;
  8595. } else {
  8596. switch (width | height) {
  8597. case 256:
  8598. case 128:
  8599. if (IS_GEN2(dev))
  8600. return false;
  8601. case 64:
  8602. break;
  8603. default:
  8604. return false;
  8605. }
  8606. }
  8607. return true;
  8608. }
  8609. /* VESA 640x480x72Hz mode to set on the pipe */
  8610. static struct drm_display_mode load_detect_mode = {
  8611. DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
  8612. 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
  8613. };
  8614. struct drm_framebuffer *
  8615. __intel_framebuffer_create(struct drm_device *dev,
  8616. struct drm_mode_fb_cmd2 *mode_cmd,
  8617. struct drm_i915_gem_object *obj)
  8618. {
  8619. struct intel_framebuffer *intel_fb;
  8620. int ret;
  8621. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  8622. if (!intel_fb)
  8623. return ERR_PTR(-ENOMEM);
  8624. ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
  8625. if (ret)
  8626. goto err;
  8627. return &intel_fb->base;
  8628. err:
  8629. kfree(intel_fb);
  8630. return ERR_PTR(ret);
  8631. }
  8632. static struct drm_framebuffer *
  8633. intel_framebuffer_create(struct drm_device *dev,
  8634. struct drm_mode_fb_cmd2 *mode_cmd,
  8635. struct drm_i915_gem_object *obj)
  8636. {
  8637. struct drm_framebuffer *fb;
  8638. int ret;
  8639. ret = i915_mutex_lock_interruptible(dev);
  8640. if (ret)
  8641. return ERR_PTR(ret);
  8642. fb = __intel_framebuffer_create(dev, mode_cmd, obj);
  8643. mutex_unlock(&dev->struct_mutex);
  8644. return fb;
  8645. }
  8646. static u32
  8647. intel_framebuffer_pitch_for_width(int width, int bpp)
  8648. {
  8649. u32 pitch = DIV_ROUND_UP(width * bpp, 8);
  8650. return ALIGN(pitch, 64);
  8651. }
  8652. static u32
  8653. intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
  8654. {
  8655. u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
  8656. return PAGE_ALIGN(pitch * mode->vdisplay);
  8657. }
  8658. static struct drm_framebuffer *
  8659. intel_framebuffer_create_for_mode(struct drm_device *dev,
  8660. struct drm_display_mode *mode,
  8661. int depth, int bpp)
  8662. {
  8663. struct drm_framebuffer *fb;
  8664. struct drm_i915_gem_object *obj;
  8665. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  8666. obj = i915_gem_alloc_object(dev,
  8667. intel_framebuffer_size_for_mode(mode, bpp));
  8668. if (obj == NULL)
  8669. return ERR_PTR(-ENOMEM);
  8670. mode_cmd.width = mode->hdisplay;
  8671. mode_cmd.height = mode->vdisplay;
  8672. mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
  8673. bpp);
  8674. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  8675. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  8676. if (IS_ERR(fb))
  8677. drm_gem_object_unreference_unlocked(&obj->base);
  8678. return fb;
  8679. }
  8680. static struct drm_framebuffer *
  8681. mode_fits_in_fbdev(struct drm_device *dev,
  8682. struct drm_display_mode *mode)
  8683. {
  8684. #ifdef CONFIG_DRM_FBDEV_EMULATION
  8685. struct drm_i915_private *dev_priv = dev->dev_private;
  8686. struct drm_i915_gem_object *obj;
  8687. struct drm_framebuffer *fb;
  8688. if (!dev_priv->fbdev)
  8689. return NULL;
  8690. if (!dev_priv->fbdev->fb)
  8691. return NULL;
  8692. obj = dev_priv->fbdev->fb->obj;
  8693. BUG_ON(!obj);
  8694. fb = &dev_priv->fbdev->fb->base;
  8695. if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
  8696. fb->bits_per_pixel))
  8697. return NULL;
  8698. if (obj->base.size < mode->vdisplay * fb->pitches[0])
  8699. return NULL;
  8700. drm_framebuffer_reference(fb);
  8701. return fb;
  8702. #else
  8703. return NULL;
  8704. #endif
  8705. }
  8706. static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
  8707. struct drm_crtc *crtc,
  8708. struct drm_display_mode *mode,
  8709. struct drm_framebuffer *fb,
  8710. int x, int y)
  8711. {
  8712. struct drm_plane_state *plane_state;
  8713. int hdisplay, vdisplay;
  8714. int ret;
  8715. plane_state = drm_atomic_get_plane_state(state, crtc->primary);
  8716. if (IS_ERR(plane_state))
  8717. return PTR_ERR(plane_state);
  8718. if (mode)
  8719. drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
  8720. else
  8721. hdisplay = vdisplay = 0;
  8722. ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
  8723. if (ret)
  8724. return ret;
  8725. drm_atomic_set_fb_for_plane(plane_state, fb);
  8726. plane_state->crtc_x = 0;
  8727. plane_state->crtc_y = 0;
  8728. plane_state->crtc_w = hdisplay;
  8729. plane_state->crtc_h = vdisplay;
  8730. plane_state->src_x = x << 16;
  8731. plane_state->src_y = y << 16;
  8732. plane_state->src_w = hdisplay << 16;
  8733. plane_state->src_h = vdisplay << 16;
  8734. return 0;
  8735. }
  8736. bool intel_get_load_detect_pipe(struct drm_connector *connector,
  8737. struct drm_display_mode *mode,
  8738. struct intel_load_detect_pipe *old,
  8739. struct drm_modeset_acquire_ctx *ctx)
  8740. {
  8741. struct intel_crtc *intel_crtc;
  8742. struct intel_encoder *intel_encoder =
  8743. intel_attached_encoder(connector);
  8744. struct drm_crtc *possible_crtc;
  8745. struct drm_encoder *encoder = &intel_encoder->base;
  8746. struct drm_crtc *crtc = NULL;
  8747. struct drm_device *dev = encoder->dev;
  8748. struct drm_framebuffer *fb;
  8749. struct drm_mode_config *config = &dev->mode_config;
  8750. struct drm_atomic_state *state = NULL, *restore_state = NULL;
  8751. struct drm_connector_state *connector_state;
  8752. struct intel_crtc_state *crtc_state;
  8753. int ret, i = -1;
  8754. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  8755. connector->base.id, connector->name,
  8756. encoder->base.id, encoder->name);
  8757. old->restore_state = NULL;
  8758. retry:
  8759. ret = drm_modeset_lock(&config->connection_mutex, ctx);
  8760. if (ret)
  8761. goto fail;
  8762. /*
  8763. * Algorithm gets a little messy:
  8764. *
  8765. * - if the connector already has an assigned crtc, use it (but make
  8766. * sure it's on first)
  8767. *
  8768. * - try to find the first unused crtc that can drive this connector,
  8769. * and use that if we find one
  8770. */
  8771. /* See if we already have a CRTC for this connector */
  8772. if (connector->state->crtc) {
  8773. crtc = connector->state->crtc;
  8774. ret = drm_modeset_lock(&crtc->mutex, ctx);
  8775. if (ret)
  8776. goto fail;
  8777. /* Make sure the crtc and connector are running */
  8778. goto found;
  8779. }
  8780. /* Find an unused one (if possible) */
  8781. for_each_crtc(dev, possible_crtc) {
  8782. i++;
  8783. if (!(encoder->possible_crtcs & (1 << i)))
  8784. continue;
  8785. ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
  8786. if (ret)
  8787. goto fail;
  8788. if (possible_crtc->state->enable) {
  8789. drm_modeset_unlock(&possible_crtc->mutex);
  8790. continue;
  8791. }
  8792. crtc = possible_crtc;
  8793. break;
  8794. }
  8795. /*
  8796. * If we didn't find an unused CRTC, don't use any.
  8797. */
  8798. if (!crtc) {
  8799. DRM_DEBUG_KMS("no pipe available for load-detect\n");
  8800. goto fail;
  8801. }
  8802. found:
  8803. intel_crtc = to_intel_crtc(crtc);
  8804. ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
  8805. if (ret)
  8806. goto fail;
  8807. state = drm_atomic_state_alloc(dev);
  8808. restore_state = drm_atomic_state_alloc(dev);
  8809. if (!state || !restore_state) {
  8810. ret = -ENOMEM;
  8811. goto fail;
  8812. }
  8813. state->acquire_ctx = ctx;
  8814. restore_state->acquire_ctx = ctx;
  8815. connector_state = drm_atomic_get_connector_state(state, connector);
  8816. if (IS_ERR(connector_state)) {
  8817. ret = PTR_ERR(connector_state);
  8818. goto fail;
  8819. }
  8820. ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
  8821. if (ret)
  8822. goto fail;
  8823. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  8824. if (IS_ERR(crtc_state)) {
  8825. ret = PTR_ERR(crtc_state);
  8826. goto fail;
  8827. }
  8828. crtc_state->base.active = crtc_state->base.enable = true;
  8829. if (!mode)
  8830. mode = &load_detect_mode;
  8831. /* We need a framebuffer large enough to accommodate all accesses
  8832. * that the plane may generate whilst we perform load detection.
  8833. * We can not rely on the fbcon either being present (we get called
  8834. * during its initialisation to detect all boot displays, or it may
  8835. * not even exist) or that it is large enough to satisfy the
  8836. * requested mode.
  8837. */
  8838. fb = mode_fits_in_fbdev(dev, mode);
  8839. if (fb == NULL) {
  8840. DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
  8841. fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
  8842. } else
  8843. DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
  8844. if (IS_ERR(fb)) {
  8845. DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
  8846. goto fail;
  8847. }
  8848. ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
  8849. if (ret)
  8850. goto fail;
  8851. drm_framebuffer_unreference(fb);
  8852. ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
  8853. if (ret)
  8854. goto fail;
  8855. ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
  8856. if (!ret)
  8857. ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
  8858. if (!ret)
  8859. ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
  8860. if (ret) {
  8861. DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
  8862. goto fail;
  8863. }
  8864. ret = drm_atomic_commit(state);
  8865. if (ret) {
  8866. DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
  8867. goto fail;
  8868. }
  8869. old->restore_state = restore_state;
  8870. /* let the connector get through one full cycle before testing */
  8871. intel_wait_for_vblank(dev, intel_crtc->pipe);
  8872. return true;
  8873. fail:
  8874. drm_atomic_state_free(state);
  8875. drm_atomic_state_free(restore_state);
  8876. restore_state = state = NULL;
  8877. if (ret == -EDEADLK) {
  8878. drm_modeset_backoff(ctx);
  8879. goto retry;
  8880. }
  8881. return false;
  8882. }
  8883. void intel_release_load_detect_pipe(struct drm_connector *connector,
  8884. struct intel_load_detect_pipe *old,
  8885. struct drm_modeset_acquire_ctx *ctx)
  8886. {
  8887. struct intel_encoder *intel_encoder =
  8888. intel_attached_encoder(connector);
  8889. struct drm_encoder *encoder = &intel_encoder->base;
  8890. struct drm_atomic_state *state = old->restore_state;
  8891. int ret;
  8892. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  8893. connector->base.id, connector->name,
  8894. encoder->base.id, encoder->name);
  8895. if (!state)
  8896. return;
  8897. ret = drm_atomic_commit(state);
  8898. if (ret) {
  8899. DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
  8900. drm_atomic_state_free(state);
  8901. }
  8902. }
  8903. static int i9xx_pll_refclk(struct drm_device *dev,
  8904. const struct intel_crtc_state *pipe_config)
  8905. {
  8906. struct drm_i915_private *dev_priv = dev->dev_private;
  8907. u32 dpll = pipe_config->dpll_hw_state.dpll;
  8908. if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
  8909. return dev_priv->vbt.lvds_ssc_freq;
  8910. else if (HAS_PCH_SPLIT(dev))
  8911. return 120000;
  8912. else if (!IS_GEN2(dev))
  8913. return 96000;
  8914. else
  8915. return 48000;
  8916. }
  8917. /* Returns the clock of the currently programmed mode of the given pipe. */
  8918. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  8919. struct intel_crtc_state *pipe_config)
  8920. {
  8921. struct drm_device *dev = crtc->base.dev;
  8922. struct drm_i915_private *dev_priv = dev->dev_private;
  8923. int pipe = pipe_config->cpu_transcoder;
  8924. u32 dpll = pipe_config->dpll_hw_state.dpll;
  8925. u32 fp;
  8926. intel_clock_t clock;
  8927. int port_clock;
  8928. int refclk = i9xx_pll_refclk(dev, pipe_config);
  8929. if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
  8930. fp = pipe_config->dpll_hw_state.fp0;
  8931. else
  8932. fp = pipe_config->dpll_hw_state.fp1;
  8933. clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
  8934. if (IS_PINEVIEW(dev)) {
  8935. clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
  8936. clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
  8937. } else {
  8938. clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
  8939. clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
  8940. }
  8941. if (!IS_GEN2(dev)) {
  8942. if (IS_PINEVIEW(dev))
  8943. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
  8944. DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
  8945. else
  8946. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
  8947. DPLL_FPA01_P1_POST_DIV_SHIFT);
  8948. switch (dpll & DPLL_MODE_MASK) {
  8949. case DPLLB_MODE_DAC_SERIAL:
  8950. clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
  8951. 5 : 10;
  8952. break;
  8953. case DPLLB_MODE_LVDS:
  8954. clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
  8955. 7 : 14;
  8956. break;
  8957. default:
  8958. DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
  8959. "mode\n", (int)(dpll & DPLL_MODE_MASK));
  8960. return;
  8961. }
  8962. if (IS_PINEVIEW(dev))
  8963. port_clock = pnv_calc_dpll_params(refclk, &clock);
  8964. else
  8965. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  8966. } else {
  8967. u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
  8968. bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
  8969. if (is_lvds) {
  8970. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
  8971. DPLL_FPA01_P1_POST_DIV_SHIFT);
  8972. if (lvds & LVDS_CLKB_POWER_UP)
  8973. clock.p2 = 7;
  8974. else
  8975. clock.p2 = 14;
  8976. } else {
  8977. if (dpll & PLL_P1_DIVIDE_BY_TWO)
  8978. clock.p1 = 2;
  8979. else {
  8980. clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
  8981. DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
  8982. }
  8983. if (dpll & PLL_P2_DIVIDE_BY_4)
  8984. clock.p2 = 4;
  8985. else
  8986. clock.p2 = 2;
  8987. }
  8988. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  8989. }
  8990. /*
  8991. * This value includes pixel_multiplier. We will use
  8992. * port_clock to compute adjusted_mode.crtc_clock in the
  8993. * encoder's get_config() function.
  8994. */
  8995. pipe_config->port_clock = port_clock;
  8996. }
  8997. int intel_dotclock_calculate(int link_freq,
  8998. const struct intel_link_m_n *m_n)
  8999. {
  9000. /*
  9001. * The calculation for the data clock is:
  9002. * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
  9003. * But we want to avoid losing precison if possible, so:
  9004. * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
  9005. *
  9006. * and the link clock is simpler:
  9007. * link_clock = (m * link_clock) / n
  9008. */
  9009. if (!m_n->link_n)
  9010. return 0;
  9011. return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
  9012. }
  9013. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  9014. struct intel_crtc_state *pipe_config)
  9015. {
  9016. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  9017. /* read out port_clock from the DPLL */
  9018. i9xx_crtc_clock_get(crtc, pipe_config);
  9019. /*
  9020. * In case there is an active pipe without active ports,
  9021. * we may need some idea for the dotclock anyway.
  9022. * Calculate one based on the FDI configuration.
  9023. */
  9024. pipe_config->base.adjusted_mode.crtc_clock =
  9025. intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  9026. &pipe_config->fdi_m_n);
  9027. }
  9028. /** Returns the currently programmed mode of the given pipe. */
  9029. struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
  9030. struct drm_crtc *crtc)
  9031. {
  9032. struct drm_i915_private *dev_priv = dev->dev_private;
  9033. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9034. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  9035. struct drm_display_mode *mode;
  9036. struct intel_crtc_state *pipe_config;
  9037. int htot = I915_READ(HTOTAL(cpu_transcoder));
  9038. int hsync = I915_READ(HSYNC(cpu_transcoder));
  9039. int vtot = I915_READ(VTOTAL(cpu_transcoder));
  9040. int vsync = I915_READ(VSYNC(cpu_transcoder));
  9041. enum pipe pipe = intel_crtc->pipe;
  9042. mode = kzalloc(sizeof(*mode), GFP_KERNEL);
  9043. if (!mode)
  9044. return NULL;
  9045. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  9046. if (!pipe_config) {
  9047. kfree(mode);
  9048. return NULL;
  9049. }
  9050. /*
  9051. * Construct a pipe_config sufficient for getting the clock info
  9052. * back out of crtc_clock_get.
  9053. *
  9054. * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
  9055. * to use a real value here instead.
  9056. */
  9057. pipe_config->cpu_transcoder = (enum transcoder) pipe;
  9058. pipe_config->pixel_multiplier = 1;
  9059. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
  9060. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
  9061. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
  9062. i9xx_crtc_clock_get(intel_crtc, pipe_config);
  9063. mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
  9064. mode->hdisplay = (htot & 0xffff) + 1;
  9065. mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
  9066. mode->hsync_start = (hsync & 0xffff) + 1;
  9067. mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
  9068. mode->vdisplay = (vtot & 0xffff) + 1;
  9069. mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
  9070. mode->vsync_start = (vsync & 0xffff) + 1;
  9071. mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
  9072. drm_mode_set_name(mode);
  9073. kfree(pipe_config);
  9074. return mode;
  9075. }
  9076. void intel_mark_busy(struct drm_device *dev)
  9077. {
  9078. struct drm_i915_private *dev_priv = dev->dev_private;
  9079. if (dev_priv->mm.busy)
  9080. return;
  9081. intel_runtime_pm_get(dev_priv);
  9082. i915_update_gfx_val(dev_priv);
  9083. if (INTEL_INFO(dev)->gen >= 6)
  9084. gen6_rps_busy(dev_priv);
  9085. dev_priv->mm.busy = true;
  9086. }
  9087. void intel_mark_idle(struct drm_device *dev)
  9088. {
  9089. struct drm_i915_private *dev_priv = dev->dev_private;
  9090. if (!dev_priv->mm.busy)
  9091. return;
  9092. dev_priv->mm.busy = false;
  9093. if (INTEL_INFO(dev)->gen >= 6)
  9094. gen6_rps_idle(dev->dev_private);
  9095. intel_runtime_pm_put(dev_priv);
  9096. }
  9097. static void intel_crtc_destroy(struct drm_crtc *crtc)
  9098. {
  9099. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9100. struct drm_device *dev = crtc->dev;
  9101. struct intel_unpin_work *work;
  9102. spin_lock_irq(&dev->event_lock);
  9103. work = intel_crtc->unpin_work;
  9104. intel_crtc->unpin_work = NULL;
  9105. spin_unlock_irq(&dev->event_lock);
  9106. if (work) {
  9107. cancel_work_sync(&work->work);
  9108. kfree(work);
  9109. }
  9110. drm_crtc_cleanup(crtc);
  9111. kfree(intel_crtc);
  9112. }
  9113. static void intel_unpin_work_fn(struct work_struct *__work)
  9114. {
  9115. struct intel_unpin_work *work =
  9116. container_of(__work, struct intel_unpin_work, work);
  9117. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  9118. struct drm_device *dev = crtc->base.dev;
  9119. struct drm_plane *primary = crtc->base.primary;
  9120. mutex_lock(&dev->struct_mutex);
  9121. intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
  9122. drm_gem_object_unreference(&work->pending_flip_obj->base);
  9123. if (work->flip_queued_req)
  9124. i915_gem_request_assign(&work->flip_queued_req, NULL);
  9125. mutex_unlock(&dev->struct_mutex);
  9126. intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
  9127. intel_fbc_post_update(crtc);
  9128. drm_framebuffer_unreference(work->old_fb);
  9129. BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
  9130. atomic_dec(&crtc->unpin_work_count);
  9131. kfree(work);
  9132. }
  9133. static void do_intel_finish_page_flip(struct drm_device *dev,
  9134. struct drm_crtc *crtc)
  9135. {
  9136. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9137. struct intel_unpin_work *work;
  9138. unsigned long flags;
  9139. /* Ignore early vblank irqs */
  9140. if (intel_crtc == NULL)
  9141. return;
  9142. /*
  9143. * This is called both by irq handlers and the reset code (to complete
  9144. * lost pageflips) so needs the full irqsave spinlocks.
  9145. */
  9146. spin_lock_irqsave(&dev->event_lock, flags);
  9147. work = intel_crtc->unpin_work;
  9148. /* Ensure we don't miss a work->pending update ... */
  9149. smp_rmb();
  9150. if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
  9151. spin_unlock_irqrestore(&dev->event_lock, flags);
  9152. return;
  9153. }
  9154. page_flip_completed(intel_crtc);
  9155. spin_unlock_irqrestore(&dev->event_lock, flags);
  9156. }
  9157. void intel_finish_page_flip(struct drm_device *dev, int pipe)
  9158. {
  9159. struct drm_i915_private *dev_priv = dev->dev_private;
  9160. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  9161. do_intel_finish_page_flip(dev, crtc);
  9162. }
  9163. void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
  9164. {
  9165. struct drm_i915_private *dev_priv = dev->dev_private;
  9166. struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
  9167. do_intel_finish_page_flip(dev, crtc);
  9168. }
  9169. /* Is 'a' after or equal to 'b'? */
  9170. static bool g4x_flip_count_after_eq(u32 a, u32 b)
  9171. {
  9172. return !((a - b) & 0x80000000);
  9173. }
  9174. static bool page_flip_finished(struct intel_crtc *crtc)
  9175. {
  9176. struct drm_device *dev = crtc->base.dev;
  9177. struct drm_i915_private *dev_priv = dev->dev_private;
  9178. unsigned reset_counter;
  9179. reset_counter = i915_reset_counter(&dev_priv->gpu_error);
  9180. if (crtc->reset_counter != reset_counter)
  9181. return true;
  9182. /*
  9183. * The relevant registers doen't exist on pre-ctg.
  9184. * As the flip done interrupt doesn't trigger for mmio
  9185. * flips on gmch platforms, a flip count check isn't
  9186. * really needed there. But since ctg has the registers,
  9187. * include it in the check anyway.
  9188. */
  9189. if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
  9190. return true;
  9191. /*
  9192. * BDW signals flip done immediately if the plane
  9193. * is disabled, even if the plane enable is already
  9194. * armed to occur at the next vblank :(
  9195. */
  9196. /*
  9197. * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
  9198. * used the same base address. In that case the mmio flip might
  9199. * have completed, but the CS hasn't even executed the flip yet.
  9200. *
  9201. * A flip count check isn't enough as the CS might have updated
  9202. * the base address just after start of vblank, but before we
  9203. * managed to process the interrupt. This means we'd complete the
  9204. * CS flip too soon.
  9205. *
  9206. * Combining both checks should get us a good enough result. It may
  9207. * still happen that the CS flip has been executed, but has not
  9208. * yet actually completed. But in case the base address is the same
  9209. * anyway, we don't really care.
  9210. */
  9211. return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
  9212. crtc->unpin_work->gtt_offset &&
  9213. g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
  9214. crtc->unpin_work->flip_count);
  9215. }
  9216. void intel_prepare_page_flip(struct drm_device *dev, int plane)
  9217. {
  9218. struct drm_i915_private *dev_priv = dev->dev_private;
  9219. struct intel_crtc *intel_crtc =
  9220. to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
  9221. unsigned long flags;
  9222. /*
  9223. * This is called both by irq handlers and the reset code (to complete
  9224. * lost pageflips) so needs the full irqsave spinlocks.
  9225. *
  9226. * NB: An MMIO update of the plane base pointer will also
  9227. * generate a page-flip completion irq, i.e. every modeset
  9228. * is also accompanied by a spurious intel_prepare_page_flip().
  9229. */
  9230. spin_lock_irqsave(&dev->event_lock, flags);
  9231. if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
  9232. atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
  9233. spin_unlock_irqrestore(&dev->event_lock, flags);
  9234. }
  9235. static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
  9236. {
  9237. /* Ensure that the work item is consistent when activating it ... */
  9238. smp_wmb();
  9239. atomic_set(&work->pending, INTEL_FLIP_PENDING);
  9240. /* and that it is marked active as soon as the irq could fire. */
  9241. smp_wmb();
  9242. }
  9243. static int intel_gen2_queue_flip(struct drm_device *dev,
  9244. struct drm_crtc *crtc,
  9245. struct drm_framebuffer *fb,
  9246. struct drm_i915_gem_object *obj,
  9247. struct drm_i915_gem_request *req,
  9248. uint32_t flags)
  9249. {
  9250. struct intel_engine_cs *engine = req->engine;
  9251. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9252. u32 flip_mask;
  9253. int ret;
  9254. ret = intel_ring_begin(req, 6);
  9255. if (ret)
  9256. return ret;
  9257. /* Can't queue multiple flips, so wait for the previous
  9258. * one to finish before executing the next.
  9259. */
  9260. if (intel_crtc->plane)
  9261. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9262. else
  9263. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9264. intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
  9265. intel_ring_emit(engine, MI_NOOP);
  9266. intel_ring_emit(engine, MI_DISPLAY_FLIP |
  9267. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9268. intel_ring_emit(engine, fb->pitches[0]);
  9269. intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
  9270. intel_ring_emit(engine, 0); /* aux display base address, unused */
  9271. intel_mark_page_flip_active(intel_crtc->unpin_work);
  9272. return 0;
  9273. }
  9274. static int intel_gen3_queue_flip(struct drm_device *dev,
  9275. struct drm_crtc *crtc,
  9276. struct drm_framebuffer *fb,
  9277. struct drm_i915_gem_object *obj,
  9278. struct drm_i915_gem_request *req,
  9279. uint32_t flags)
  9280. {
  9281. struct intel_engine_cs *engine = req->engine;
  9282. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9283. u32 flip_mask;
  9284. int ret;
  9285. ret = intel_ring_begin(req, 6);
  9286. if (ret)
  9287. return ret;
  9288. if (intel_crtc->plane)
  9289. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9290. else
  9291. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9292. intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
  9293. intel_ring_emit(engine, MI_NOOP);
  9294. intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
  9295. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9296. intel_ring_emit(engine, fb->pitches[0]);
  9297. intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
  9298. intel_ring_emit(engine, MI_NOOP);
  9299. intel_mark_page_flip_active(intel_crtc->unpin_work);
  9300. return 0;
  9301. }
  9302. static int intel_gen4_queue_flip(struct drm_device *dev,
  9303. struct drm_crtc *crtc,
  9304. struct drm_framebuffer *fb,
  9305. struct drm_i915_gem_object *obj,
  9306. struct drm_i915_gem_request *req,
  9307. uint32_t flags)
  9308. {
  9309. struct intel_engine_cs *engine = req->engine;
  9310. struct drm_i915_private *dev_priv = dev->dev_private;
  9311. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9312. uint32_t pf, pipesrc;
  9313. int ret;
  9314. ret = intel_ring_begin(req, 4);
  9315. if (ret)
  9316. return ret;
  9317. /* i965+ uses the linear or tiled offsets from the
  9318. * Display Registers (which do not change across a page-flip)
  9319. * so we need only reprogram the base address.
  9320. */
  9321. intel_ring_emit(engine, MI_DISPLAY_FLIP |
  9322. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9323. intel_ring_emit(engine, fb->pitches[0]);
  9324. intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
  9325. obj->tiling_mode);
  9326. /* XXX Enabling the panel-fitter across page-flip is so far
  9327. * untested on non-native modes, so ignore it for now.
  9328. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
  9329. */
  9330. pf = 0;
  9331. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9332. intel_ring_emit(engine, pf | pipesrc);
  9333. intel_mark_page_flip_active(intel_crtc->unpin_work);
  9334. return 0;
  9335. }
  9336. static int intel_gen6_queue_flip(struct drm_device *dev,
  9337. struct drm_crtc *crtc,
  9338. struct drm_framebuffer *fb,
  9339. struct drm_i915_gem_object *obj,
  9340. struct drm_i915_gem_request *req,
  9341. uint32_t flags)
  9342. {
  9343. struct intel_engine_cs *engine = req->engine;
  9344. struct drm_i915_private *dev_priv = dev->dev_private;
  9345. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9346. uint32_t pf, pipesrc;
  9347. int ret;
  9348. ret = intel_ring_begin(req, 4);
  9349. if (ret)
  9350. return ret;
  9351. intel_ring_emit(engine, MI_DISPLAY_FLIP |
  9352. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9353. intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
  9354. intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
  9355. /* Contrary to the suggestions in the documentation,
  9356. * "Enable Panel Fitter" does not seem to be required when page
  9357. * flipping with a non-native mode, and worse causes a normal
  9358. * modeset to fail.
  9359. * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
  9360. */
  9361. pf = 0;
  9362. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9363. intel_ring_emit(engine, pf | pipesrc);
  9364. intel_mark_page_flip_active(intel_crtc->unpin_work);
  9365. return 0;
  9366. }
  9367. static int intel_gen7_queue_flip(struct drm_device *dev,
  9368. struct drm_crtc *crtc,
  9369. struct drm_framebuffer *fb,
  9370. struct drm_i915_gem_object *obj,
  9371. struct drm_i915_gem_request *req,
  9372. uint32_t flags)
  9373. {
  9374. struct intel_engine_cs *engine = req->engine;
  9375. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9376. uint32_t plane_bit = 0;
  9377. int len, ret;
  9378. switch (intel_crtc->plane) {
  9379. case PLANE_A:
  9380. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
  9381. break;
  9382. case PLANE_B:
  9383. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
  9384. break;
  9385. case PLANE_C:
  9386. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
  9387. break;
  9388. default:
  9389. WARN_ONCE(1, "unknown plane in flip command\n");
  9390. return -ENODEV;
  9391. }
  9392. len = 4;
  9393. if (engine->id == RCS) {
  9394. len += 6;
  9395. /*
  9396. * On Gen 8, SRM is now taking an extra dword to accommodate
  9397. * 48bits addresses, and we need a NOOP for the batch size to
  9398. * stay even.
  9399. */
  9400. if (IS_GEN8(dev))
  9401. len += 2;
  9402. }
  9403. /*
  9404. * BSpec MI_DISPLAY_FLIP for IVB:
  9405. * "The full packet must be contained within the same cache line."
  9406. *
  9407. * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
  9408. * cacheline, if we ever start emitting more commands before
  9409. * the MI_DISPLAY_FLIP we may need to first emit everything else,
  9410. * then do the cacheline alignment, and finally emit the
  9411. * MI_DISPLAY_FLIP.
  9412. */
  9413. ret = intel_ring_cacheline_align(req);
  9414. if (ret)
  9415. return ret;
  9416. ret = intel_ring_begin(req, len);
  9417. if (ret)
  9418. return ret;
  9419. /* Unmask the flip-done completion message. Note that the bspec says that
  9420. * we should do this for both the BCS and RCS, and that we must not unmask
  9421. * more than one flip event at any time (or ensure that one flip message
  9422. * can be sent by waiting for flip-done prior to queueing new flips).
  9423. * Experimentation says that BCS works despite DERRMR masking all
  9424. * flip-done completion events and that unmasking all planes at once
  9425. * for the RCS also doesn't appear to drop events. Setting the DERRMR
  9426. * to zero does lead to lockups within MI_DISPLAY_FLIP.
  9427. */
  9428. if (engine->id == RCS) {
  9429. intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
  9430. intel_ring_emit_reg(engine, DERRMR);
  9431. intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
  9432. DERRMR_PIPEB_PRI_FLIP_DONE |
  9433. DERRMR_PIPEC_PRI_FLIP_DONE));
  9434. if (IS_GEN8(dev))
  9435. intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
  9436. MI_SRM_LRM_GLOBAL_GTT);
  9437. else
  9438. intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
  9439. MI_SRM_LRM_GLOBAL_GTT);
  9440. intel_ring_emit_reg(engine, DERRMR);
  9441. intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
  9442. if (IS_GEN8(dev)) {
  9443. intel_ring_emit(engine, 0);
  9444. intel_ring_emit(engine, MI_NOOP);
  9445. }
  9446. }
  9447. intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
  9448. intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
  9449. intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
  9450. intel_ring_emit(engine, (MI_NOOP));
  9451. intel_mark_page_flip_active(intel_crtc->unpin_work);
  9452. return 0;
  9453. }
  9454. static bool use_mmio_flip(struct intel_engine_cs *engine,
  9455. struct drm_i915_gem_object *obj)
  9456. {
  9457. /*
  9458. * This is not being used for older platforms, because
  9459. * non-availability of flip done interrupt forces us to use
  9460. * CS flips. Older platforms derive flip done using some clever
  9461. * tricks involving the flip_pending status bits and vblank irqs.
  9462. * So using MMIO flips there would disrupt this mechanism.
  9463. */
  9464. if (engine == NULL)
  9465. return true;
  9466. if (INTEL_INFO(engine->dev)->gen < 5)
  9467. return false;
  9468. if (i915.use_mmio_flip < 0)
  9469. return false;
  9470. else if (i915.use_mmio_flip > 0)
  9471. return true;
  9472. else if (i915.enable_execlists)
  9473. return true;
  9474. else if (obj->base.dma_buf &&
  9475. !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
  9476. false))
  9477. return true;
  9478. else
  9479. return engine != i915_gem_request_get_engine(obj->last_write_req);
  9480. }
  9481. static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
  9482. unsigned int rotation,
  9483. struct intel_unpin_work *work)
  9484. {
  9485. struct drm_device *dev = intel_crtc->base.dev;
  9486. struct drm_i915_private *dev_priv = dev->dev_private;
  9487. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  9488. const enum pipe pipe = intel_crtc->pipe;
  9489. u32 ctl, stride, tile_height;
  9490. ctl = I915_READ(PLANE_CTL(pipe, 0));
  9491. ctl &= ~PLANE_CTL_TILED_MASK;
  9492. switch (fb->modifier[0]) {
  9493. case DRM_FORMAT_MOD_NONE:
  9494. break;
  9495. case I915_FORMAT_MOD_X_TILED:
  9496. ctl |= PLANE_CTL_TILED_X;
  9497. break;
  9498. case I915_FORMAT_MOD_Y_TILED:
  9499. ctl |= PLANE_CTL_TILED_Y;
  9500. break;
  9501. case I915_FORMAT_MOD_Yf_TILED:
  9502. ctl |= PLANE_CTL_TILED_YF;
  9503. break;
  9504. default:
  9505. MISSING_CASE(fb->modifier[0]);
  9506. }
  9507. /*
  9508. * The stride is either expressed as a multiple of 64 bytes chunks for
  9509. * linear buffers or in number of tiles for tiled buffers.
  9510. */
  9511. if (intel_rotation_90_or_270(rotation)) {
  9512. /* stride = Surface height in tiles */
  9513. tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
  9514. stride = DIV_ROUND_UP(fb->height, tile_height);
  9515. } else {
  9516. stride = fb->pitches[0] /
  9517. intel_fb_stride_alignment(dev_priv, fb->modifier[0],
  9518. fb->pixel_format);
  9519. }
  9520. /*
  9521. * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
  9522. * PLANE_SURF updates, the update is then guaranteed to be atomic.
  9523. */
  9524. I915_WRITE(PLANE_CTL(pipe, 0), ctl);
  9525. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  9526. I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
  9527. POSTING_READ(PLANE_SURF(pipe, 0));
  9528. }
  9529. static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
  9530. struct intel_unpin_work *work)
  9531. {
  9532. struct drm_device *dev = intel_crtc->base.dev;
  9533. struct drm_i915_private *dev_priv = dev->dev_private;
  9534. struct intel_framebuffer *intel_fb =
  9535. to_intel_framebuffer(intel_crtc->base.primary->fb);
  9536. struct drm_i915_gem_object *obj = intel_fb->obj;
  9537. i915_reg_t reg = DSPCNTR(intel_crtc->plane);
  9538. u32 dspcntr;
  9539. dspcntr = I915_READ(reg);
  9540. if (obj->tiling_mode != I915_TILING_NONE)
  9541. dspcntr |= DISPPLANE_TILED;
  9542. else
  9543. dspcntr &= ~DISPPLANE_TILED;
  9544. I915_WRITE(reg, dspcntr);
  9545. I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
  9546. POSTING_READ(DSPSURF(intel_crtc->plane));
  9547. }
  9548. /*
  9549. * XXX: This is the temporary way to update the plane registers until we get
  9550. * around to using the usual plane update functions for MMIO flips
  9551. */
  9552. static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
  9553. {
  9554. struct intel_crtc *crtc = mmio_flip->crtc;
  9555. struct intel_unpin_work *work;
  9556. spin_lock_irq(&crtc->base.dev->event_lock);
  9557. work = crtc->unpin_work;
  9558. spin_unlock_irq(&crtc->base.dev->event_lock);
  9559. if (work == NULL)
  9560. return;
  9561. intel_mark_page_flip_active(work);
  9562. intel_pipe_update_start(crtc);
  9563. if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
  9564. skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
  9565. else
  9566. /* use_mmio_flip() retricts MMIO flips to ilk+ */
  9567. ilk_do_mmio_flip(crtc, work);
  9568. intel_pipe_update_end(crtc);
  9569. }
  9570. static void intel_mmio_flip_work_func(struct work_struct *work)
  9571. {
  9572. struct intel_mmio_flip *mmio_flip =
  9573. container_of(work, struct intel_mmio_flip, work);
  9574. struct intel_framebuffer *intel_fb =
  9575. to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
  9576. struct drm_i915_gem_object *obj = intel_fb->obj;
  9577. if (mmio_flip->req) {
  9578. WARN_ON(__i915_wait_request(mmio_flip->req,
  9579. false, NULL,
  9580. &mmio_flip->i915->rps.mmioflips));
  9581. i915_gem_request_unreference__unlocked(mmio_flip->req);
  9582. }
  9583. /* For framebuffer backed by dmabuf, wait for fence */
  9584. if (obj->base.dma_buf)
  9585. WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
  9586. false, false,
  9587. MAX_SCHEDULE_TIMEOUT) < 0);
  9588. intel_do_mmio_flip(mmio_flip);
  9589. kfree(mmio_flip);
  9590. }
  9591. static int intel_queue_mmio_flip(struct drm_device *dev,
  9592. struct drm_crtc *crtc,
  9593. struct drm_i915_gem_object *obj)
  9594. {
  9595. struct intel_mmio_flip *mmio_flip;
  9596. mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
  9597. if (mmio_flip == NULL)
  9598. return -ENOMEM;
  9599. mmio_flip->i915 = to_i915(dev);
  9600. mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
  9601. mmio_flip->crtc = to_intel_crtc(crtc);
  9602. mmio_flip->rotation = crtc->primary->state->rotation;
  9603. INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
  9604. schedule_work(&mmio_flip->work);
  9605. return 0;
  9606. }
  9607. static int intel_default_queue_flip(struct drm_device *dev,
  9608. struct drm_crtc *crtc,
  9609. struct drm_framebuffer *fb,
  9610. struct drm_i915_gem_object *obj,
  9611. struct drm_i915_gem_request *req,
  9612. uint32_t flags)
  9613. {
  9614. return -ENODEV;
  9615. }
  9616. static bool __intel_pageflip_stall_check(struct drm_device *dev,
  9617. struct drm_crtc *crtc)
  9618. {
  9619. struct drm_i915_private *dev_priv = dev->dev_private;
  9620. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9621. struct intel_unpin_work *work = intel_crtc->unpin_work;
  9622. u32 addr;
  9623. if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
  9624. return true;
  9625. if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
  9626. return false;
  9627. if (!work->enable_stall_check)
  9628. return false;
  9629. if (work->flip_ready_vblank == 0) {
  9630. if (work->flip_queued_req &&
  9631. !i915_gem_request_completed(work->flip_queued_req, true))
  9632. return false;
  9633. work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
  9634. }
  9635. if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
  9636. return false;
  9637. /* Potential stall - if we see that the flip has happened,
  9638. * assume a missed interrupt. */
  9639. if (INTEL_INFO(dev)->gen >= 4)
  9640. addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
  9641. else
  9642. addr = I915_READ(DSPADDR(intel_crtc->plane));
  9643. /* There is a potential issue here with a false positive after a flip
  9644. * to the same address. We could address this by checking for a
  9645. * non-incrementing frame counter.
  9646. */
  9647. return addr == work->gtt_offset;
  9648. }
  9649. void intel_check_page_flip(struct drm_device *dev, int pipe)
  9650. {
  9651. struct drm_i915_private *dev_priv = dev->dev_private;
  9652. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  9653. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9654. struct intel_unpin_work *work;
  9655. WARN_ON(!in_interrupt());
  9656. if (crtc == NULL)
  9657. return;
  9658. spin_lock(&dev->event_lock);
  9659. work = intel_crtc->unpin_work;
  9660. if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
  9661. WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
  9662. work->flip_queued_vblank, drm_vblank_count(dev, pipe));
  9663. page_flip_completed(intel_crtc);
  9664. work = NULL;
  9665. }
  9666. if (work != NULL &&
  9667. drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
  9668. intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
  9669. spin_unlock(&dev->event_lock);
  9670. }
  9671. static int intel_crtc_page_flip(struct drm_crtc *crtc,
  9672. struct drm_framebuffer *fb,
  9673. struct drm_pending_vblank_event *event,
  9674. uint32_t page_flip_flags)
  9675. {
  9676. struct drm_device *dev = crtc->dev;
  9677. struct drm_i915_private *dev_priv = dev->dev_private;
  9678. struct drm_framebuffer *old_fb = crtc->primary->fb;
  9679. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  9680. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9681. struct drm_plane *primary = crtc->primary;
  9682. enum pipe pipe = intel_crtc->pipe;
  9683. struct intel_unpin_work *work;
  9684. struct intel_engine_cs *engine;
  9685. bool mmio_flip;
  9686. struct drm_i915_gem_request *request = NULL;
  9687. int ret;
  9688. /*
  9689. * drm_mode_page_flip_ioctl() should already catch this, but double
  9690. * check to be safe. In the future we may enable pageflipping from
  9691. * a disabled primary plane.
  9692. */
  9693. if (WARN_ON(intel_fb_obj(old_fb) == NULL))
  9694. return -EBUSY;
  9695. /* Can't change pixel format via MI display flips. */
  9696. if (fb->pixel_format != crtc->primary->fb->pixel_format)
  9697. return -EINVAL;
  9698. /*
  9699. * TILEOFF/LINOFF registers can't be changed via MI display flips.
  9700. * Note that pitch changes could also affect these register.
  9701. */
  9702. if (INTEL_INFO(dev)->gen > 3 &&
  9703. (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
  9704. fb->pitches[0] != crtc->primary->fb->pitches[0]))
  9705. return -EINVAL;
  9706. if (i915_terminally_wedged(&dev_priv->gpu_error))
  9707. goto out_hang;
  9708. work = kzalloc(sizeof(*work), GFP_KERNEL);
  9709. if (work == NULL)
  9710. return -ENOMEM;
  9711. work->event = event;
  9712. work->crtc = crtc;
  9713. work->old_fb = old_fb;
  9714. INIT_WORK(&work->work, intel_unpin_work_fn);
  9715. ret = drm_crtc_vblank_get(crtc);
  9716. if (ret)
  9717. goto free_work;
  9718. /* We borrow the event spin lock for protecting unpin_work */
  9719. spin_lock_irq(&dev->event_lock);
  9720. if (intel_crtc->unpin_work) {
  9721. /* Before declaring the flip queue wedged, check if
  9722. * the hardware completed the operation behind our backs.
  9723. */
  9724. if (__intel_pageflip_stall_check(dev, crtc)) {
  9725. DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
  9726. page_flip_completed(intel_crtc);
  9727. } else {
  9728. DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
  9729. spin_unlock_irq(&dev->event_lock);
  9730. drm_crtc_vblank_put(crtc);
  9731. kfree(work);
  9732. return -EBUSY;
  9733. }
  9734. }
  9735. intel_crtc->unpin_work = work;
  9736. spin_unlock_irq(&dev->event_lock);
  9737. if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
  9738. flush_workqueue(dev_priv->wq);
  9739. /* Reference the objects for the scheduled work. */
  9740. drm_framebuffer_reference(work->old_fb);
  9741. drm_gem_object_reference(&obj->base);
  9742. crtc->primary->fb = fb;
  9743. update_state_fb(crtc->primary);
  9744. intel_fbc_pre_update(intel_crtc);
  9745. work->pending_flip_obj = obj;
  9746. ret = i915_mutex_lock_interruptible(dev);
  9747. if (ret)
  9748. goto cleanup;
  9749. intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
  9750. if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
  9751. ret = -EIO;
  9752. goto cleanup;
  9753. }
  9754. atomic_inc(&intel_crtc->unpin_work_count);
  9755. if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
  9756. work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
  9757. if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  9758. engine = &dev_priv->engine[BCS];
  9759. if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
  9760. /* vlv: DISPLAY_FLIP fails to change tiling */
  9761. engine = NULL;
  9762. } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
  9763. engine = &dev_priv->engine[BCS];
  9764. } else if (INTEL_INFO(dev)->gen >= 7) {
  9765. engine = i915_gem_request_get_engine(obj->last_write_req);
  9766. if (engine == NULL || engine->id != RCS)
  9767. engine = &dev_priv->engine[BCS];
  9768. } else {
  9769. engine = &dev_priv->engine[RCS];
  9770. }
  9771. mmio_flip = use_mmio_flip(engine, obj);
  9772. /* When using CS flips, we want to emit semaphores between rings.
  9773. * However, when using mmio flips we will create a task to do the
  9774. * synchronisation, so all we want here is to pin the framebuffer
  9775. * into the display plane and skip any waits.
  9776. */
  9777. if (!mmio_flip) {
  9778. ret = i915_gem_object_sync(obj, engine, &request);
  9779. if (ret)
  9780. goto cleanup_pending;
  9781. }
  9782. ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
  9783. if (ret)
  9784. goto cleanup_pending;
  9785. work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
  9786. obj, 0);
  9787. work->gtt_offset += intel_crtc->dspaddr_offset;
  9788. if (mmio_flip) {
  9789. ret = intel_queue_mmio_flip(dev, crtc, obj);
  9790. if (ret)
  9791. goto cleanup_unpin;
  9792. i915_gem_request_assign(&work->flip_queued_req,
  9793. obj->last_write_req);
  9794. } else {
  9795. if (!request) {
  9796. request = i915_gem_request_alloc(engine, NULL);
  9797. if (IS_ERR(request)) {
  9798. ret = PTR_ERR(request);
  9799. goto cleanup_unpin;
  9800. }
  9801. }
  9802. ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
  9803. page_flip_flags);
  9804. if (ret)
  9805. goto cleanup_unpin;
  9806. i915_gem_request_assign(&work->flip_queued_req, request);
  9807. }
  9808. if (request)
  9809. i915_add_request_no_flush(request);
  9810. work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
  9811. work->enable_stall_check = true;
  9812. i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
  9813. to_intel_plane(primary)->frontbuffer_bit);
  9814. mutex_unlock(&dev->struct_mutex);
  9815. intel_frontbuffer_flip_prepare(dev,
  9816. to_intel_plane(primary)->frontbuffer_bit);
  9817. trace_i915_flip_request(intel_crtc->plane, obj);
  9818. return 0;
  9819. cleanup_unpin:
  9820. intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
  9821. cleanup_pending:
  9822. if (!IS_ERR_OR_NULL(request))
  9823. i915_add_request_no_flush(request);
  9824. atomic_dec(&intel_crtc->unpin_work_count);
  9825. mutex_unlock(&dev->struct_mutex);
  9826. cleanup:
  9827. crtc->primary->fb = old_fb;
  9828. update_state_fb(crtc->primary);
  9829. drm_gem_object_unreference_unlocked(&obj->base);
  9830. drm_framebuffer_unreference(work->old_fb);
  9831. spin_lock_irq(&dev->event_lock);
  9832. intel_crtc->unpin_work = NULL;
  9833. spin_unlock_irq(&dev->event_lock);
  9834. drm_crtc_vblank_put(crtc);
  9835. free_work:
  9836. kfree(work);
  9837. if (ret == -EIO) {
  9838. struct drm_atomic_state *state;
  9839. struct drm_plane_state *plane_state;
  9840. out_hang:
  9841. state = drm_atomic_state_alloc(dev);
  9842. if (!state)
  9843. return -ENOMEM;
  9844. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  9845. retry:
  9846. plane_state = drm_atomic_get_plane_state(state, primary);
  9847. ret = PTR_ERR_OR_ZERO(plane_state);
  9848. if (!ret) {
  9849. drm_atomic_set_fb_for_plane(plane_state, fb);
  9850. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  9851. if (!ret)
  9852. ret = drm_atomic_commit(state);
  9853. }
  9854. if (ret == -EDEADLK) {
  9855. drm_modeset_backoff(state->acquire_ctx);
  9856. drm_atomic_state_clear(state);
  9857. goto retry;
  9858. }
  9859. if (ret)
  9860. drm_atomic_state_free(state);
  9861. if (ret == 0 && event) {
  9862. spin_lock_irq(&dev->event_lock);
  9863. drm_crtc_send_vblank_event(crtc, event);
  9864. spin_unlock_irq(&dev->event_lock);
  9865. }
  9866. }
  9867. return ret;
  9868. }
  9869. /**
  9870. * intel_wm_need_update - Check whether watermarks need updating
  9871. * @plane: drm plane
  9872. * @state: new plane state
  9873. *
  9874. * Check current plane state versus the new one to determine whether
  9875. * watermarks need to be recalculated.
  9876. *
  9877. * Returns true or false.
  9878. */
  9879. static bool intel_wm_need_update(struct drm_plane *plane,
  9880. struct drm_plane_state *state)
  9881. {
  9882. struct intel_plane_state *new = to_intel_plane_state(state);
  9883. struct intel_plane_state *cur = to_intel_plane_state(plane->state);
  9884. /* Update watermarks on tiling or size changes. */
  9885. if (new->visible != cur->visible)
  9886. return true;
  9887. if (!cur->base.fb || !new->base.fb)
  9888. return false;
  9889. if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
  9890. cur->base.rotation != new->base.rotation ||
  9891. drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
  9892. drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
  9893. drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
  9894. drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
  9895. return true;
  9896. return false;
  9897. }
  9898. static bool needs_scaling(struct intel_plane_state *state)
  9899. {
  9900. int src_w = drm_rect_width(&state->src) >> 16;
  9901. int src_h = drm_rect_height(&state->src) >> 16;
  9902. int dst_w = drm_rect_width(&state->dst);
  9903. int dst_h = drm_rect_height(&state->dst);
  9904. return (src_w != dst_w || src_h != dst_h);
  9905. }
  9906. int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
  9907. struct drm_plane_state *plane_state)
  9908. {
  9909. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
  9910. struct drm_crtc *crtc = crtc_state->crtc;
  9911. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9912. struct drm_plane *plane = plane_state->plane;
  9913. struct drm_device *dev = crtc->dev;
  9914. struct drm_i915_private *dev_priv = to_i915(dev);
  9915. struct intel_plane_state *old_plane_state =
  9916. to_intel_plane_state(plane->state);
  9917. int idx = intel_crtc->base.base.id, ret;
  9918. bool mode_changed = needs_modeset(crtc_state);
  9919. bool was_crtc_enabled = crtc->state->active;
  9920. bool is_crtc_enabled = crtc_state->active;
  9921. bool turn_off, turn_on, visible, was_visible;
  9922. struct drm_framebuffer *fb = plane_state->fb;
  9923. if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
  9924. plane->type != DRM_PLANE_TYPE_CURSOR) {
  9925. ret = skl_update_scaler_plane(
  9926. to_intel_crtc_state(crtc_state),
  9927. to_intel_plane_state(plane_state));
  9928. if (ret)
  9929. return ret;
  9930. }
  9931. was_visible = old_plane_state->visible;
  9932. visible = to_intel_plane_state(plane_state)->visible;
  9933. if (!was_crtc_enabled && WARN_ON(was_visible))
  9934. was_visible = false;
  9935. /*
  9936. * Visibility is calculated as if the crtc was on, but
  9937. * after scaler setup everything depends on it being off
  9938. * when the crtc isn't active.
  9939. */
  9940. if (!is_crtc_enabled)
  9941. to_intel_plane_state(plane_state)->visible = visible = false;
  9942. if (!was_visible && !visible)
  9943. return 0;
  9944. if (fb != old_plane_state->base.fb)
  9945. pipe_config->fb_changed = true;
  9946. turn_off = was_visible && (!visible || mode_changed);
  9947. turn_on = visible && (!was_visible || mode_changed);
  9948. DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
  9949. plane->base.id, fb ? fb->base.id : -1);
  9950. DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
  9951. plane->base.id, was_visible, visible,
  9952. turn_off, turn_on, mode_changed);
  9953. if (turn_on) {
  9954. pipe_config->update_wm_pre = true;
  9955. /* must disable cxsr around plane enable/disable */
  9956. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  9957. pipe_config->disable_cxsr = true;
  9958. } else if (turn_off) {
  9959. pipe_config->update_wm_post = true;
  9960. /* must disable cxsr around plane enable/disable */
  9961. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  9962. pipe_config->disable_cxsr = true;
  9963. } else if (intel_wm_need_update(plane, plane_state)) {
  9964. /* FIXME bollocks */
  9965. pipe_config->update_wm_pre = true;
  9966. pipe_config->update_wm_post = true;
  9967. }
  9968. /* Pre-gen9 platforms need two-step watermark updates */
  9969. if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
  9970. INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
  9971. to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
  9972. if (visible || was_visible)
  9973. pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
  9974. /*
  9975. * WaCxSRDisabledForSpriteScaling:ivb
  9976. *
  9977. * cstate->update_wm was already set above, so this flag will
  9978. * take effect when we commit and program watermarks.
  9979. */
  9980. if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
  9981. needs_scaling(to_intel_plane_state(plane_state)) &&
  9982. !needs_scaling(old_plane_state))
  9983. pipe_config->disable_lp_wm = true;
  9984. return 0;
  9985. }
  9986. static bool encoders_cloneable(const struct intel_encoder *a,
  9987. const struct intel_encoder *b)
  9988. {
  9989. /* masks could be asymmetric, so check both ways */
  9990. return a == b || (a->cloneable & (1 << b->type) &&
  9991. b->cloneable & (1 << a->type));
  9992. }
  9993. static bool check_single_encoder_cloning(struct drm_atomic_state *state,
  9994. struct intel_crtc *crtc,
  9995. struct intel_encoder *encoder)
  9996. {
  9997. struct intel_encoder *source_encoder;
  9998. struct drm_connector *connector;
  9999. struct drm_connector_state *connector_state;
  10000. int i;
  10001. for_each_connector_in_state(state, connector, connector_state, i) {
  10002. if (connector_state->crtc != &crtc->base)
  10003. continue;
  10004. source_encoder =
  10005. to_intel_encoder(connector_state->best_encoder);
  10006. if (!encoders_cloneable(encoder, source_encoder))
  10007. return false;
  10008. }
  10009. return true;
  10010. }
  10011. static bool check_encoder_cloning(struct drm_atomic_state *state,
  10012. struct intel_crtc *crtc)
  10013. {
  10014. struct intel_encoder *encoder;
  10015. struct drm_connector *connector;
  10016. struct drm_connector_state *connector_state;
  10017. int i;
  10018. for_each_connector_in_state(state, connector, connector_state, i) {
  10019. if (connector_state->crtc != &crtc->base)
  10020. continue;
  10021. encoder = to_intel_encoder(connector_state->best_encoder);
  10022. if (!check_single_encoder_cloning(state, crtc, encoder))
  10023. return false;
  10024. }
  10025. return true;
  10026. }
  10027. static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  10028. struct drm_crtc_state *crtc_state)
  10029. {
  10030. struct drm_device *dev = crtc->dev;
  10031. struct drm_i915_private *dev_priv = dev->dev_private;
  10032. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10033. struct intel_crtc_state *pipe_config =
  10034. to_intel_crtc_state(crtc_state);
  10035. struct drm_atomic_state *state = crtc_state->state;
  10036. int ret;
  10037. bool mode_changed = needs_modeset(crtc_state);
  10038. if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
  10039. DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
  10040. return -EINVAL;
  10041. }
  10042. if (mode_changed && !crtc_state->active)
  10043. pipe_config->update_wm_post = true;
  10044. if (mode_changed && crtc_state->enable &&
  10045. dev_priv->display.crtc_compute_clock &&
  10046. !WARN_ON(pipe_config->shared_dpll)) {
  10047. ret = dev_priv->display.crtc_compute_clock(intel_crtc,
  10048. pipe_config);
  10049. if (ret)
  10050. return ret;
  10051. }
  10052. if (crtc_state->color_mgmt_changed) {
  10053. ret = intel_color_check(crtc, crtc_state);
  10054. if (ret)
  10055. return ret;
  10056. /*
  10057. * Changing color management on Intel hardware is
  10058. * handled as part of planes update.
  10059. */
  10060. crtc_state->planes_changed = true;
  10061. }
  10062. ret = 0;
  10063. if (dev_priv->display.compute_pipe_wm) {
  10064. ret = dev_priv->display.compute_pipe_wm(pipe_config);
  10065. if (ret) {
  10066. DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
  10067. return ret;
  10068. }
  10069. }
  10070. if (dev_priv->display.compute_intermediate_wm &&
  10071. !to_intel_atomic_state(state)->skip_intermediate_wm) {
  10072. if (WARN_ON(!dev_priv->display.compute_pipe_wm))
  10073. return 0;
  10074. /*
  10075. * Calculate 'intermediate' watermarks that satisfy both the
  10076. * old state and the new state. We can program these
  10077. * immediately.
  10078. */
  10079. ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
  10080. intel_crtc,
  10081. pipe_config);
  10082. if (ret) {
  10083. DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
  10084. return ret;
  10085. }
  10086. } else if (dev_priv->display.compute_intermediate_wm) {
  10087. if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
  10088. pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
  10089. }
  10090. if (INTEL_INFO(dev)->gen >= 9) {
  10091. if (mode_changed)
  10092. ret = skl_update_scaler_crtc(pipe_config);
  10093. if (!ret)
  10094. ret = intel_atomic_setup_scalers(dev, intel_crtc,
  10095. pipe_config);
  10096. }
  10097. return ret;
  10098. }
  10099. static const struct drm_crtc_helper_funcs intel_helper_funcs = {
  10100. .mode_set_base_atomic = intel_pipe_set_base_atomic,
  10101. .atomic_begin = intel_begin_crtc_commit,
  10102. .atomic_flush = intel_finish_crtc_commit,
  10103. .atomic_check = intel_crtc_atomic_check,
  10104. };
  10105. static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
  10106. {
  10107. struct intel_connector *connector;
  10108. for_each_intel_connector(dev, connector) {
  10109. if (connector->base.state->crtc)
  10110. drm_connector_unreference(&connector->base);
  10111. if (connector->base.encoder) {
  10112. connector->base.state->best_encoder =
  10113. connector->base.encoder;
  10114. connector->base.state->crtc =
  10115. connector->base.encoder->crtc;
  10116. drm_connector_reference(&connector->base);
  10117. } else {
  10118. connector->base.state->best_encoder = NULL;
  10119. connector->base.state->crtc = NULL;
  10120. }
  10121. }
  10122. }
  10123. static void
  10124. connected_sink_compute_bpp(struct intel_connector *connector,
  10125. struct intel_crtc_state *pipe_config)
  10126. {
  10127. int bpp = pipe_config->pipe_bpp;
  10128. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
  10129. connector->base.base.id,
  10130. connector->base.name);
  10131. /* Don't use an invalid EDID bpc value */
  10132. if (connector->base.display_info.bpc &&
  10133. connector->base.display_info.bpc * 3 < bpp) {
  10134. DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
  10135. bpp, connector->base.display_info.bpc*3);
  10136. pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
  10137. }
  10138. /* Clamp bpp to default limit on screens without EDID 1.4 */
  10139. if (connector->base.display_info.bpc == 0) {
  10140. int type = connector->base.connector_type;
  10141. int clamp_bpp = 24;
  10142. /* Fall back to 18 bpp when DP sink capability is unknown. */
  10143. if (type == DRM_MODE_CONNECTOR_DisplayPort ||
  10144. type == DRM_MODE_CONNECTOR_eDP)
  10145. clamp_bpp = 18;
  10146. if (bpp > clamp_bpp) {
  10147. DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
  10148. bpp, clamp_bpp);
  10149. pipe_config->pipe_bpp = clamp_bpp;
  10150. }
  10151. }
  10152. }
  10153. static int
  10154. compute_baseline_pipe_bpp(struct intel_crtc *crtc,
  10155. struct intel_crtc_state *pipe_config)
  10156. {
  10157. struct drm_device *dev = crtc->base.dev;
  10158. struct drm_atomic_state *state;
  10159. struct drm_connector *connector;
  10160. struct drm_connector_state *connector_state;
  10161. int bpp, i;
  10162. if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
  10163. bpp = 10*3;
  10164. else if (INTEL_INFO(dev)->gen >= 5)
  10165. bpp = 12*3;
  10166. else
  10167. bpp = 8*3;
  10168. pipe_config->pipe_bpp = bpp;
  10169. state = pipe_config->base.state;
  10170. /* Clamp display bpp to EDID value */
  10171. for_each_connector_in_state(state, connector, connector_state, i) {
  10172. if (connector_state->crtc != &crtc->base)
  10173. continue;
  10174. connected_sink_compute_bpp(to_intel_connector(connector),
  10175. pipe_config);
  10176. }
  10177. return bpp;
  10178. }
  10179. static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
  10180. {
  10181. DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
  10182. "type: 0x%x flags: 0x%x\n",
  10183. mode->crtc_clock,
  10184. mode->crtc_hdisplay, mode->crtc_hsync_start,
  10185. mode->crtc_hsync_end, mode->crtc_htotal,
  10186. mode->crtc_vdisplay, mode->crtc_vsync_start,
  10187. mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
  10188. }
  10189. static void intel_dump_pipe_config(struct intel_crtc *crtc,
  10190. struct intel_crtc_state *pipe_config,
  10191. const char *context)
  10192. {
  10193. struct drm_device *dev = crtc->base.dev;
  10194. struct drm_plane *plane;
  10195. struct intel_plane *intel_plane;
  10196. struct intel_plane_state *state;
  10197. struct drm_framebuffer *fb;
  10198. DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
  10199. context, pipe_config, pipe_name(crtc->pipe));
  10200. DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
  10201. DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
  10202. pipe_config->pipe_bpp, pipe_config->dither);
  10203. DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
  10204. pipe_config->has_pch_encoder,
  10205. pipe_config->fdi_lanes,
  10206. pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
  10207. pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
  10208. pipe_config->fdi_m_n.tu);
  10209. DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
  10210. pipe_config->has_dp_encoder,
  10211. pipe_config->lane_count,
  10212. pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
  10213. pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
  10214. pipe_config->dp_m_n.tu);
  10215. DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
  10216. pipe_config->has_dp_encoder,
  10217. pipe_config->lane_count,
  10218. pipe_config->dp_m2_n2.gmch_m,
  10219. pipe_config->dp_m2_n2.gmch_n,
  10220. pipe_config->dp_m2_n2.link_m,
  10221. pipe_config->dp_m2_n2.link_n,
  10222. pipe_config->dp_m2_n2.tu);
  10223. DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
  10224. pipe_config->has_audio,
  10225. pipe_config->has_infoframe);
  10226. DRM_DEBUG_KMS("requested mode:\n");
  10227. drm_mode_debug_printmodeline(&pipe_config->base.mode);
  10228. DRM_DEBUG_KMS("adjusted mode:\n");
  10229. drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
  10230. intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
  10231. DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
  10232. DRM_DEBUG_KMS("pipe src size: %dx%d\n",
  10233. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  10234. DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
  10235. crtc->num_scalers,
  10236. pipe_config->scaler_state.scaler_users,
  10237. pipe_config->scaler_state.scaler_id);
  10238. DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
  10239. pipe_config->gmch_pfit.control,
  10240. pipe_config->gmch_pfit.pgm_ratios,
  10241. pipe_config->gmch_pfit.lvds_border_bits);
  10242. DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
  10243. pipe_config->pch_pfit.pos,
  10244. pipe_config->pch_pfit.size,
  10245. pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
  10246. DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
  10247. DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
  10248. if (IS_BROXTON(dev)) {
  10249. DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
  10250. "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
  10251. "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
  10252. pipe_config->ddi_pll_sel,
  10253. pipe_config->dpll_hw_state.ebb0,
  10254. pipe_config->dpll_hw_state.ebb4,
  10255. pipe_config->dpll_hw_state.pll0,
  10256. pipe_config->dpll_hw_state.pll1,
  10257. pipe_config->dpll_hw_state.pll2,
  10258. pipe_config->dpll_hw_state.pll3,
  10259. pipe_config->dpll_hw_state.pll6,
  10260. pipe_config->dpll_hw_state.pll8,
  10261. pipe_config->dpll_hw_state.pll9,
  10262. pipe_config->dpll_hw_state.pll10,
  10263. pipe_config->dpll_hw_state.pcsdw12);
  10264. } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
  10265. DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
  10266. "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
  10267. pipe_config->ddi_pll_sel,
  10268. pipe_config->dpll_hw_state.ctrl1,
  10269. pipe_config->dpll_hw_state.cfgcr1,
  10270. pipe_config->dpll_hw_state.cfgcr2);
  10271. } else if (HAS_DDI(dev)) {
  10272. DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
  10273. pipe_config->ddi_pll_sel,
  10274. pipe_config->dpll_hw_state.wrpll,
  10275. pipe_config->dpll_hw_state.spll);
  10276. } else {
  10277. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  10278. "fp0: 0x%x, fp1: 0x%x\n",
  10279. pipe_config->dpll_hw_state.dpll,
  10280. pipe_config->dpll_hw_state.dpll_md,
  10281. pipe_config->dpll_hw_state.fp0,
  10282. pipe_config->dpll_hw_state.fp1);
  10283. }
  10284. DRM_DEBUG_KMS("planes on this crtc\n");
  10285. list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
  10286. intel_plane = to_intel_plane(plane);
  10287. if (intel_plane->pipe != crtc->pipe)
  10288. continue;
  10289. state = to_intel_plane_state(plane->state);
  10290. fb = state->base.fb;
  10291. if (!fb) {
  10292. DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
  10293. "disabled, scaler_id = %d\n",
  10294. plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
  10295. plane->base.id, intel_plane->pipe,
  10296. (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
  10297. drm_plane_index(plane), state->scaler_id);
  10298. continue;
  10299. }
  10300. DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
  10301. plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
  10302. plane->base.id, intel_plane->pipe,
  10303. crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
  10304. drm_plane_index(plane));
  10305. DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
  10306. fb->base.id, fb->width, fb->height, fb->pixel_format);
  10307. DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
  10308. state->scaler_id,
  10309. state->src.x1 >> 16, state->src.y1 >> 16,
  10310. drm_rect_width(&state->src) >> 16,
  10311. drm_rect_height(&state->src) >> 16,
  10312. state->dst.x1, state->dst.y1,
  10313. drm_rect_width(&state->dst), drm_rect_height(&state->dst));
  10314. }
  10315. }
  10316. static bool check_digital_port_conflicts(struct drm_atomic_state *state)
  10317. {
  10318. struct drm_device *dev = state->dev;
  10319. struct drm_connector *connector;
  10320. unsigned int used_ports = 0;
  10321. /*
  10322. * Walk the connector list instead of the encoder
  10323. * list to detect the problem on ddi platforms
  10324. * where there's just one encoder per digital port.
  10325. */
  10326. drm_for_each_connector(connector, dev) {
  10327. struct drm_connector_state *connector_state;
  10328. struct intel_encoder *encoder;
  10329. connector_state = drm_atomic_get_existing_connector_state(state, connector);
  10330. if (!connector_state)
  10331. connector_state = connector->state;
  10332. if (!connector_state->best_encoder)
  10333. continue;
  10334. encoder = to_intel_encoder(connector_state->best_encoder);
  10335. WARN_ON(!connector_state->crtc);
  10336. switch (encoder->type) {
  10337. unsigned int port_mask;
  10338. case INTEL_OUTPUT_UNKNOWN:
  10339. if (WARN_ON(!HAS_DDI(dev)))
  10340. break;
  10341. case INTEL_OUTPUT_DISPLAYPORT:
  10342. case INTEL_OUTPUT_HDMI:
  10343. case INTEL_OUTPUT_EDP:
  10344. port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
  10345. /* the same port mustn't appear more than once */
  10346. if (used_ports & port_mask)
  10347. return false;
  10348. used_ports |= port_mask;
  10349. default:
  10350. break;
  10351. }
  10352. }
  10353. return true;
  10354. }
  10355. static void
  10356. clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  10357. {
  10358. struct drm_crtc_state tmp_state;
  10359. struct intel_crtc_scaler_state scaler_state;
  10360. struct intel_dpll_hw_state dpll_hw_state;
  10361. struct intel_shared_dpll *shared_dpll;
  10362. uint32_t ddi_pll_sel;
  10363. bool force_thru;
  10364. /* FIXME: before the switch to atomic started, a new pipe_config was
  10365. * kzalloc'd. Code that depends on any field being zero should be
  10366. * fixed, so that the crtc_state can be safely duplicated. For now,
  10367. * only fields that are know to not cause problems are preserved. */
  10368. tmp_state = crtc_state->base;
  10369. scaler_state = crtc_state->scaler_state;
  10370. shared_dpll = crtc_state->shared_dpll;
  10371. dpll_hw_state = crtc_state->dpll_hw_state;
  10372. ddi_pll_sel = crtc_state->ddi_pll_sel;
  10373. force_thru = crtc_state->pch_pfit.force_thru;
  10374. memset(crtc_state, 0, sizeof *crtc_state);
  10375. crtc_state->base = tmp_state;
  10376. crtc_state->scaler_state = scaler_state;
  10377. crtc_state->shared_dpll = shared_dpll;
  10378. crtc_state->dpll_hw_state = dpll_hw_state;
  10379. crtc_state->ddi_pll_sel = ddi_pll_sel;
  10380. crtc_state->pch_pfit.force_thru = force_thru;
  10381. }
  10382. static int
  10383. intel_modeset_pipe_config(struct drm_crtc *crtc,
  10384. struct intel_crtc_state *pipe_config)
  10385. {
  10386. struct drm_atomic_state *state = pipe_config->base.state;
  10387. struct intel_encoder *encoder;
  10388. struct drm_connector *connector;
  10389. struct drm_connector_state *connector_state;
  10390. int base_bpp, ret = -EINVAL;
  10391. int i;
  10392. bool retry = true;
  10393. clear_intel_crtc_state(pipe_config);
  10394. pipe_config->cpu_transcoder =
  10395. (enum transcoder) to_intel_crtc(crtc)->pipe;
  10396. /*
  10397. * Sanitize sync polarity flags based on requested ones. If neither
  10398. * positive or negative polarity is requested, treat this as meaning
  10399. * negative polarity.
  10400. */
  10401. if (!(pipe_config->base.adjusted_mode.flags &
  10402. (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
  10403. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
  10404. if (!(pipe_config->base.adjusted_mode.flags &
  10405. (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
  10406. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
  10407. base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
  10408. pipe_config);
  10409. if (base_bpp < 0)
  10410. goto fail;
  10411. /*
  10412. * Determine the real pipe dimensions. Note that stereo modes can
  10413. * increase the actual pipe size due to the frame doubling and
  10414. * insertion of additional space for blanks between the frame. This
  10415. * is stored in the crtc timings. We use the requested mode to do this
  10416. * computation to clearly distinguish it from the adjusted mode, which
  10417. * can be changed by the connectors in the below retry loop.
  10418. */
  10419. drm_crtc_get_hv_timing(&pipe_config->base.mode,
  10420. &pipe_config->pipe_src_w,
  10421. &pipe_config->pipe_src_h);
  10422. encoder_retry:
  10423. /* Ensure the port clock defaults are reset when retrying. */
  10424. pipe_config->port_clock = 0;
  10425. pipe_config->pixel_multiplier = 1;
  10426. /* Fill in default crtc timings, allow encoders to overwrite them. */
  10427. drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
  10428. CRTC_STEREO_DOUBLE);
  10429. /* Pass our mode to the connectors and the CRTC to give them a chance to
  10430. * adjust it according to limitations or connector properties, and also
  10431. * a chance to reject the mode entirely.
  10432. */
  10433. for_each_connector_in_state(state, connector, connector_state, i) {
  10434. if (connector_state->crtc != crtc)
  10435. continue;
  10436. encoder = to_intel_encoder(connector_state->best_encoder);
  10437. if (!(encoder->compute_config(encoder, pipe_config))) {
  10438. DRM_DEBUG_KMS("Encoder config failure\n");
  10439. goto fail;
  10440. }
  10441. }
  10442. /* Set default port clock if not overwritten by the encoder. Needs to be
  10443. * done afterwards in case the encoder adjusts the mode. */
  10444. if (!pipe_config->port_clock)
  10445. pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
  10446. * pipe_config->pixel_multiplier;
  10447. ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
  10448. if (ret < 0) {
  10449. DRM_DEBUG_KMS("CRTC fixup failed\n");
  10450. goto fail;
  10451. }
  10452. if (ret == RETRY) {
  10453. if (WARN(!retry, "loop in pipe configuration computation\n")) {
  10454. ret = -EINVAL;
  10455. goto fail;
  10456. }
  10457. DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
  10458. retry = false;
  10459. goto encoder_retry;
  10460. }
  10461. /* Dithering seems to not pass-through bits correctly when it should, so
  10462. * only enable it on 6bpc panels. */
  10463. pipe_config->dither = pipe_config->pipe_bpp == 6*3;
  10464. DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
  10465. base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  10466. fail:
  10467. return ret;
  10468. }
  10469. static void
  10470. intel_modeset_update_crtc_state(struct drm_atomic_state *state)
  10471. {
  10472. struct drm_crtc *crtc;
  10473. struct drm_crtc_state *crtc_state;
  10474. int i;
  10475. /* Double check state. */
  10476. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  10477. to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
  10478. /* Update hwmode for vblank functions */
  10479. if (crtc->state->active)
  10480. crtc->hwmode = crtc->state->adjusted_mode;
  10481. else
  10482. crtc->hwmode.crtc_clock = 0;
  10483. /*
  10484. * Update legacy state to satisfy fbc code. This can
  10485. * be removed when fbc uses the atomic state.
  10486. */
  10487. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  10488. struct drm_plane_state *plane_state = crtc->primary->state;
  10489. crtc->primary->fb = plane_state->fb;
  10490. crtc->x = plane_state->src_x >> 16;
  10491. crtc->y = plane_state->src_y >> 16;
  10492. }
  10493. }
  10494. }
  10495. static bool intel_fuzzy_clock_check(int clock1, int clock2)
  10496. {
  10497. int diff;
  10498. if (clock1 == clock2)
  10499. return true;
  10500. if (!clock1 || !clock2)
  10501. return false;
  10502. diff = abs(clock1 - clock2);
  10503. if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
  10504. return true;
  10505. return false;
  10506. }
  10507. #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
  10508. list_for_each_entry((intel_crtc), \
  10509. &(dev)->mode_config.crtc_list, \
  10510. base.head) \
  10511. for_each_if (mask & (1 <<(intel_crtc)->pipe))
  10512. static bool
  10513. intel_compare_m_n(unsigned int m, unsigned int n,
  10514. unsigned int m2, unsigned int n2,
  10515. bool exact)
  10516. {
  10517. if (m == m2 && n == n2)
  10518. return true;
  10519. if (exact || !m || !n || !m2 || !n2)
  10520. return false;
  10521. BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
  10522. if (n > n2) {
  10523. while (n > n2) {
  10524. m2 <<= 1;
  10525. n2 <<= 1;
  10526. }
  10527. } else if (n < n2) {
  10528. while (n < n2) {
  10529. m <<= 1;
  10530. n <<= 1;
  10531. }
  10532. }
  10533. if (n != n2)
  10534. return false;
  10535. return intel_fuzzy_clock_check(m, m2);
  10536. }
  10537. static bool
  10538. intel_compare_link_m_n(const struct intel_link_m_n *m_n,
  10539. struct intel_link_m_n *m2_n2,
  10540. bool adjust)
  10541. {
  10542. if (m_n->tu == m2_n2->tu &&
  10543. intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
  10544. m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
  10545. intel_compare_m_n(m_n->link_m, m_n->link_n,
  10546. m2_n2->link_m, m2_n2->link_n, !adjust)) {
  10547. if (adjust)
  10548. *m2_n2 = *m_n;
  10549. return true;
  10550. }
  10551. return false;
  10552. }
  10553. static bool
  10554. intel_pipe_config_compare(struct drm_device *dev,
  10555. struct intel_crtc_state *current_config,
  10556. struct intel_crtc_state *pipe_config,
  10557. bool adjust)
  10558. {
  10559. bool ret = true;
  10560. #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
  10561. do { \
  10562. if (!adjust) \
  10563. DRM_ERROR(fmt, ##__VA_ARGS__); \
  10564. else \
  10565. DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
  10566. } while (0)
  10567. #define PIPE_CONF_CHECK_X(name) \
  10568. if (current_config->name != pipe_config->name) { \
  10569. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  10570. "(expected 0x%08x, found 0x%08x)\n", \
  10571. current_config->name, \
  10572. pipe_config->name); \
  10573. ret = false; \
  10574. }
  10575. #define PIPE_CONF_CHECK_I(name) \
  10576. if (current_config->name != pipe_config->name) { \
  10577. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  10578. "(expected %i, found %i)\n", \
  10579. current_config->name, \
  10580. pipe_config->name); \
  10581. ret = false; \
  10582. }
  10583. #define PIPE_CONF_CHECK_P(name) \
  10584. if (current_config->name != pipe_config->name) { \
  10585. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  10586. "(expected %p, found %p)\n", \
  10587. current_config->name, \
  10588. pipe_config->name); \
  10589. ret = false; \
  10590. }
  10591. #define PIPE_CONF_CHECK_M_N(name) \
  10592. if (!intel_compare_link_m_n(&current_config->name, \
  10593. &pipe_config->name,\
  10594. adjust)) { \
  10595. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  10596. "(expected tu %i gmch %i/%i link %i/%i, " \
  10597. "found tu %i, gmch %i/%i link %i/%i)\n", \
  10598. current_config->name.tu, \
  10599. current_config->name.gmch_m, \
  10600. current_config->name.gmch_n, \
  10601. current_config->name.link_m, \
  10602. current_config->name.link_n, \
  10603. pipe_config->name.tu, \
  10604. pipe_config->name.gmch_m, \
  10605. pipe_config->name.gmch_n, \
  10606. pipe_config->name.link_m, \
  10607. pipe_config->name.link_n); \
  10608. ret = false; \
  10609. }
  10610. /* This is required for BDW+ where there is only one set of registers for
  10611. * switching between high and low RR.
  10612. * This macro can be used whenever a comparison has to be made between one
  10613. * hw state and multiple sw state variables.
  10614. */
  10615. #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
  10616. if (!intel_compare_link_m_n(&current_config->name, \
  10617. &pipe_config->name, adjust) && \
  10618. !intel_compare_link_m_n(&current_config->alt_name, \
  10619. &pipe_config->name, adjust)) { \
  10620. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  10621. "(expected tu %i gmch %i/%i link %i/%i, " \
  10622. "or tu %i gmch %i/%i link %i/%i, " \
  10623. "found tu %i, gmch %i/%i link %i/%i)\n", \
  10624. current_config->name.tu, \
  10625. current_config->name.gmch_m, \
  10626. current_config->name.gmch_n, \
  10627. current_config->name.link_m, \
  10628. current_config->name.link_n, \
  10629. current_config->alt_name.tu, \
  10630. current_config->alt_name.gmch_m, \
  10631. current_config->alt_name.gmch_n, \
  10632. current_config->alt_name.link_m, \
  10633. current_config->alt_name.link_n, \
  10634. pipe_config->name.tu, \
  10635. pipe_config->name.gmch_m, \
  10636. pipe_config->name.gmch_n, \
  10637. pipe_config->name.link_m, \
  10638. pipe_config->name.link_n); \
  10639. ret = false; \
  10640. }
  10641. #define PIPE_CONF_CHECK_FLAGS(name, mask) \
  10642. if ((current_config->name ^ pipe_config->name) & (mask)) { \
  10643. INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
  10644. "(expected %i, found %i)\n", \
  10645. current_config->name & (mask), \
  10646. pipe_config->name & (mask)); \
  10647. ret = false; \
  10648. }
  10649. #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
  10650. if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
  10651. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  10652. "(expected %i, found %i)\n", \
  10653. current_config->name, \
  10654. pipe_config->name); \
  10655. ret = false; \
  10656. }
  10657. #define PIPE_CONF_QUIRK(quirk) \
  10658. ((current_config->quirks | pipe_config->quirks) & (quirk))
  10659. PIPE_CONF_CHECK_I(cpu_transcoder);
  10660. PIPE_CONF_CHECK_I(has_pch_encoder);
  10661. PIPE_CONF_CHECK_I(fdi_lanes);
  10662. PIPE_CONF_CHECK_M_N(fdi_m_n);
  10663. PIPE_CONF_CHECK_I(has_dp_encoder);
  10664. PIPE_CONF_CHECK_I(lane_count);
  10665. if (INTEL_INFO(dev)->gen < 8) {
  10666. PIPE_CONF_CHECK_M_N(dp_m_n);
  10667. if (current_config->has_drrs)
  10668. PIPE_CONF_CHECK_M_N(dp_m2_n2);
  10669. } else
  10670. PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
  10671. PIPE_CONF_CHECK_I(has_dsi_encoder);
  10672. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
  10673. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
  10674. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
  10675. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
  10676. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
  10677. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
  10678. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
  10679. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
  10680. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
  10681. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
  10682. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
  10683. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
  10684. PIPE_CONF_CHECK_I(pixel_multiplier);
  10685. PIPE_CONF_CHECK_I(has_hdmi_sink);
  10686. if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
  10687. IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
  10688. PIPE_CONF_CHECK_I(limited_color_range);
  10689. PIPE_CONF_CHECK_I(has_infoframe);
  10690. PIPE_CONF_CHECK_I(has_audio);
  10691. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  10692. DRM_MODE_FLAG_INTERLACE);
  10693. if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
  10694. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  10695. DRM_MODE_FLAG_PHSYNC);
  10696. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  10697. DRM_MODE_FLAG_NHSYNC);
  10698. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  10699. DRM_MODE_FLAG_PVSYNC);
  10700. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  10701. DRM_MODE_FLAG_NVSYNC);
  10702. }
  10703. PIPE_CONF_CHECK_X(gmch_pfit.control);
  10704. /* pfit ratios are autocomputed by the hw on gen4+ */
  10705. if (INTEL_INFO(dev)->gen < 4)
  10706. PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
  10707. PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
  10708. if (!adjust) {
  10709. PIPE_CONF_CHECK_I(pipe_src_w);
  10710. PIPE_CONF_CHECK_I(pipe_src_h);
  10711. PIPE_CONF_CHECK_I(pch_pfit.enabled);
  10712. if (current_config->pch_pfit.enabled) {
  10713. PIPE_CONF_CHECK_X(pch_pfit.pos);
  10714. PIPE_CONF_CHECK_X(pch_pfit.size);
  10715. }
  10716. PIPE_CONF_CHECK_I(scaler_state.scaler_id);
  10717. }
  10718. /* BDW+ don't expose a synchronous way to read the state */
  10719. if (IS_HASWELL(dev))
  10720. PIPE_CONF_CHECK_I(ips_enabled);
  10721. PIPE_CONF_CHECK_I(double_wide);
  10722. PIPE_CONF_CHECK_X(ddi_pll_sel);
  10723. PIPE_CONF_CHECK_P(shared_dpll);
  10724. PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
  10725. PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
  10726. PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
  10727. PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
  10728. PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
  10729. PIPE_CONF_CHECK_X(dpll_hw_state.spll);
  10730. PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
  10731. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
  10732. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
  10733. PIPE_CONF_CHECK_X(dsi_pll.ctrl);
  10734. PIPE_CONF_CHECK_X(dsi_pll.div);
  10735. if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
  10736. PIPE_CONF_CHECK_I(pipe_bpp);
  10737. PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
  10738. PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
  10739. #undef PIPE_CONF_CHECK_X
  10740. #undef PIPE_CONF_CHECK_I
  10741. #undef PIPE_CONF_CHECK_P
  10742. #undef PIPE_CONF_CHECK_FLAGS
  10743. #undef PIPE_CONF_CHECK_CLOCK_FUZZY
  10744. #undef PIPE_CONF_QUIRK
  10745. #undef INTEL_ERR_OR_DBG_KMS
  10746. return ret;
  10747. }
  10748. static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
  10749. const struct intel_crtc_state *pipe_config)
  10750. {
  10751. if (pipe_config->has_pch_encoder) {
  10752. int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  10753. &pipe_config->fdi_m_n);
  10754. int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
  10755. /*
  10756. * FDI already provided one idea for the dotclock.
  10757. * Yell if the encoder disagrees.
  10758. */
  10759. WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
  10760. "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
  10761. fdi_dotclock, dotclock);
  10762. }
  10763. }
  10764. static void verify_wm_state(struct drm_crtc *crtc,
  10765. struct drm_crtc_state *new_state)
  10766. {
  10767. struct drm_device *dev = crtc->dev;
  10768. struct drm_i915_private *dev_priv = dev->dev_private;
  10769. struct skl_ddb_allocation hw_ddb, *sw_ddb;
  10770. struct skl_ddb_entry *hw_entry, *sw_entry;
  10771. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10772. const enum pipe pipe = intel_crtc->pipe;
  10773. int plane;
  10774. if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
  10775. return;
  10776. skl_ddb_get_hw_state(dev_priv, &hw_ddb);
  10777. sw_ddb = &dev_priv->wm.skl_hw.ddb;
  10778. /* planes */
  10779. for_each_plane(dev_priv, pipe, plane) {
  10780. hw_entry = &hw_ddb.plane[pipe][plane];
  10781. sw_entry = &sw_ddb->plane[pipe][plane];
  10782. if (skl_ddb_entry_equal(hw_entry, sw_entry))
  10783. continue;
  10784. DRM_ERROR("mismatch in DDB state pipe %c plane %d "
  10785. "(expected (%u,%u), found (%u,%u))\n",
  10786. pipe_name(pipe), plane + 1,
  10787. sw_entry->start, sw_entry->end,
  10788. hw_entry->start, hw_entry->end);
  10789. }
  10790. /* cursor */
  10791. hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
  10792. sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
  10793. if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
  10794. DRM_ERROR("mismatch in DDB state pipe %c cursor "
  10795. "(expected (%u,%u), found (%u,%u))\n",
  10796. pipe_name(pipe),
  10797. sw_entry->start, sw_entry->end,
  10798. hw_entry->start, hw_entry->end);
  10799. }
  10800. }
  10801. static void
  10802. verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
  10803. {
  10804. struct drm_connector *connector;
  10805. drm_for_each_connector(connector, dev) {
  10806. struct drm_encoder *encoder = connector->encoder;
  10807. struct drm_connector_state *state = connector->state;
  10808. if (state->crtc != crtc)
  10809. continue;
  10810. intel_connector_verify_state(to_intel_connector(connector));
  10811. I915_STATE_WARN(state->best_encoder != encoder,
  10812. "connector's atomic encoder doesn't match legacy encoder\n");
  10813. }
  10814. }
  10815. static void
  10816. verify_encoder_state(struct drm_device *dev)
  10817. {
  10818. struct intel_encoder *encoder;
  10819. struct intel_connector *connector;
  10820. for_each_intel_encoder(dev, encoder) {
  10821. bool enabled = false;
  10822. enum pipe pipe;
  10823. DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
  10824. encoder->base.base.id,
  10825. encoder->base.name);
  10826. for_each_intel_connector(dev, connector) {
  10827. if (connector->base.state->best_encoder != &encoder->base)
  10828. continue;
  10829. enabled = true;
  10830. I915_STATE_WARN(connector->base.state->crtc !=
  10831. encoder->base.crtc,
  10832. "connector's crtc doesn't match encoder crtc\n");
  10833. }
  10834. I915_STATE_WARN(!!encoder->base.crtc != enabled,
  10835. "encoder's enabled state mismatch "
  10836. "(expected %i, found %i)\n",
  10837. !!encoder->base.crtc, enabled);
  10838. if (!encoder->base.crtc) {
  10839. bool active;
  10840. active = encoder->get_hw_state(encoder, &pipe);
  10841. I915_STATE_WARN(active,
  10842. "encoder detached but still enabled on pipe %c.\n",
  10843. pipe_name(pipe));
  10844. }
  10845. }
  10846. }
  10847. static void
  10848. verify_crtc_state(struct drm_crtc *crtc,
  10849. struct drm_crtc_state *old_crtc_state,
  10850. struct drm_crtc_state *new_crtc_state)
  10851. {
  10852. struct drm_device *dev = crtc->dev;
  10853. struct drm_i915_private *dev_priv = dev->dev_private;
  10854. struct intel_encoder *encoder;
  10855. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10856. struct intel_crtc_state *pipe_config, *sw_config;
  10857. struct drm_atomic_state *old_state;
  10858. bool active;
  10859. old_state = old_crtc_state->state;
  10860. __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
  10861. pipe_config = to_intel_crtc_state(old_crtc_state);
  10862. memset(pipe_config, 0, sizeof(*pipe_config));
  10863. pipe_config->base.crtc = crtc;
  10864. pipe_config->base.state = old_state;
  10865. DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
  10866. active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
  10867. /* hw state is inconsistent with the pipe quirk */
  10868. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  10869. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  10870. active = new_crtc_state->active;
  10871. I915_STATE_WARN(new_crtc_state->active != active,
  10872. "crtc active state doesn't match with hw state "
  10873. "(expected %i, found %i)\n", new_crtc_state->active, active);
  10874. I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
  10875. "transitional active state does not match atomic hw state "
  10876. "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
  10877. for_each_encoder_on_crtc(dev, crtc, encoder) {
  10878. enum pipe pipe;
  10879. active = encoder->get_hw_state(encoder, &pipe);
  10880. I915_STATE_WARN(active != new_crtc_state->active,
  10881. "[ENCODER:%i] active %i with crtc active %i\n",
  10882. encoder->base.base.id, active, new_crtc_state->active);
  10883. I915_STATE_WARN(active && intel_crtc->pipe != pipe,
  10884. "Encoder connected to wrong pipe %c\n",
  10885. pipe_name(pipe));
  10886. if (active)
  10887. encoder->get_config(encoder, pipe_config);
  10888. }
  10889. if (!new_crtc_state->active)
  10890. return;
  10891. intel_pipe_config_sanity_check(dev_priv, pipe_config);
  10892. sw_config = to_intel_crtc_state(crtc->state);
  10893. if (!intel_pipe_config_compare(dev, sw_config,
  10894. pipe_config, false)) {
  10895. I915_STATE_WARN(1, "pipe state doesn't match!\n");
  10896. intel_dump_pipe_config(intel_crtc, pipe_config,
  10897. "[hw state]");
  10898. intel_dump_pipe_config(intel_crtc, sw_config,
  10899. "[sw state]");
  10900. }
  10901. }
  10902. static void
  10903. verify_single_dpll_state(struct drm_i915_private *dev_priv,
  10904. struct intel_shared_dpll *pll,
  10905. struct drm_crtc *crtc,
  10906. struct drm_crtc_state *new_state)
  10907. {
  10908. struct intel_dpll_hw_state dpll_hw_state;
  10909. unsigned crtc_mask;
  10910. bool active;
  10911. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  10912. DRM_DEBUG_KMS("%s\n", pll->name);
  10913. active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
  10914. if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
  10915. I915_STATE_WARN(!pll->on && pll->active_mask,
  10916. "pll in active use but not on in sw tracking\n");
  10917. I915_STATE_WARN(pll->on && !pll->active_mask,
  10918. "pll is on but not used by any active crtc\n");
  10919. I915_STATE_WARN(pll->on != active,
  10920. "pll on state mismatch (expected %i, found %i)\n",
  10921. pll->on, active);
  10922. }
  10923. if (!crtc) {
  10924. I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
  10925. "more active pll users than references: %x vs %x\n",
  10926. pll->active_mask, pll->config.crtc_mask);
  10927. return;
  10928. }
  10929. crtc_mask = 1 << drm_crtc_index(crtc);
  10930. if (new_state->active)
  10931. I915_STATE_WARN(!(pll->active_mask & crtc_mask),
  10932. "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
  10933. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  10934. else
  10935. I915_STATE_WARN(pll->active_mask & crtc_mask,
  10936. "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
  10937. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  10938. I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
  10939. "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
  10940. crtc_mask, pll->config.crtc_mask);
  10941. I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
  10942. &dpll_hw_state,
  10943. sizeof(dpll_hw_state)),
  10944. "pll hw state mismatch\n");
  10945. }
  10946. static void
  10947. verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
  10948. struct drm_crtc_state *old_crtc_state,
  10949. struct drm_crtc_state *new_crtc_state)
  10950. {
  10951. struct drm_i915_private *dev_priv = dev->dev_private;
  10952. struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
  10953. struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
  10954. if (new_state->shared_dpll)
  10955. verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
  10956. if (old_state->shared_dpll &&
  10957. old_state->shared_dpll != new_state->shared_dpll) {
  10958. unsigned crtc_mask = 1 << drm_crtc_index(crtc);
  10959. struct intel_shared_dpll *pll = old_state->shared_dpll;
  10960. I915_STATE_WARN(pll->active_mask & crtc_mask,
  10961. "pll active mismatch (didn't expect pipe %c in active mask)\n",
  10962. pipe_name(drm_crtc_index(crtc)));
  10963. I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
  10964. "pll enabled crtcs mismatch (found %x in enabled mask)\n",
  10965. pipe_name(drm_crtc_index(crtc)));
  10966. }
  10967. }
  10968. static void
  10969. intel_modeset_verify_crtc(struct drm_crtc *crtc,
  10970. struct drm_crtc_state *old_state,
  10971. struct drm_crtc_state *new_state)
  10972. {
  10973. if (!needs_modeset(new_state) &&
  10974. !to_intel_crtc_state(new_state)->update_pipe)
  10975. return;
  10976. verify_wm_state(crtc, new_state);
  10977. verify_connector_state(crtc->dev, crtc);
  10978. verify_crtc_state(crtc, old_state, new_state);
  10979. verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
  10980. }
  10981. static void
  10982. verify_disabled_dpll_state(struct drm_device *dev)
  10983. {
  10984. struct drm_i915_private *dev_priv = dev->dev_private;
  10985. int i;
  10986. for (i = 0; i < dev_priv->num_shared_dpll; i++)
  10987. verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
  10988. }
  10989. static void
  10990. intel_modeset_verify_disabled(struct drm_device *dev)
  10991. {
  10992. verify_encoder_state(dev);
  10993. verify_connector_state(dev, NULL);
  10994. verify_disabled_dpll_state(dev);
  10995. }
  10996. static void update_scanline_offset(struct intel_crtc *crtc)
  10997. {
  10998. struct drm_device *dev = crtc->base.dev;
  10999. /*
  11000. * The scanline counter increments at the leading edge of hsync.
  11001. *
  11002. * On most platforms it starts counting from vtotal-1 on the
  11003. * first active line. That means the scanline counter value is
  11004. * always one less than what we would expect. Ie. just after
  11005. * start of vblank, which also occurs at start of hsync (on the
  11006. * last active line), the scanline counter will read vblank_start-1.
  11007. *
  11008. * On gen2 the scanline counter starts counting from 1 instead
  11009. * of vtotal-1, so we have to subtract one (or rather add vtotal-1
  11010. * to keep the value positive), instead of adding one.
  11011. *
  11012. * On HSW+ the behaviour of the scanline counter depends on the output
  11013. * type. For DP ports it behaves like most other platforms, but on HDMI
  11014. * there's an extra 1 line difference. So we need to add two instead of
  11015. * one to the value.
  11016. */
  11017. if (IS_GEN2(dev)) {
  11018. const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
  11019. int vtotal;
  11020. vtotal = adjusted_mode->crtc_vtotal;
  11021. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
  11022. vtotal /= 2;
  11023. crtc->scanline_offset = vtotal - 1;
  11024. } else if (HAS_DDI(dev) &&
  11025. intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
  11026. crtc->scanline_offset = 2;
  11027. } else
  11028. crtc->scanline_offset = 1;
  11029. }
  11030. static void intel_modeset_clear_plls(struct drm_atomic_state *state)
  11031. {
  11032. struct drm_device *dev = state->dev;
  11033. struct drm_i915_private *dev_priv = to_i915(dev);
  11034. struct intel_shared_dpll_config *shared_dpll = NULL;
  11035. struct drm_crtc *crtc;
  11036. struct drm_crtc_state *crtc_state;
  11037. int i;
  11038. if (!dev_priv->display.crtc_compute_clock)
  11039. return;
  11040. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11041. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11042. struct intel_shared_dpll *old_dpll =
  11043. to_intel_crtc_state(crtc->state)->shared_dpll;
  11044. if (!needs_modeset(crtc_state))
  11045. continue;
  11046. to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
  11047. if (!old_dpll)
  11048. continue;
  11049. if (!shared_dpll)
  11050. shared_dpll = intel_atomic_get_shared_dpll_state(state);
  11051. intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
  11052. }
  11053. }
  11054. /*
  11055. * This implements the workaround described in the "notes" section of the mode
  11056. * set sequence documentation. When going from no pipes or single pipe to
  11057. * multiple pipes, and planes are enabled after the pipe, we need to wait at
  11058. * least 2 vblanks on the first pipe before enabling planes on the second pipe.
  11059. */
  11060. static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
  11061. {
  11062. struct drm_crtc_state *crtc_state;
  11063. struct intel_crtc *intel_crtc;
  11064. struct drm_crtc *crtc;
  11065. struct intel_crtc_state *first_crtc_state = NULL;
  11066. struct intel_crtc_state *other_crtc_state = NULL;
  11067. enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
  11068. int i;
  11069. /* look at all crtc's that are going to be enabled in during modeset */
  11070. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11071. intel_crtc = to_intel_crtc(crtc);
  11072. if (!crtc_state->active || !needs_modeset(crtc_state))
  11073. continue;
  11074. if (first_crtc_state) {
  11075. other_crtc_state = to_intel_crtc_state(crtc_state);
  11076. break;
  11077. } else {
  11078. first_crtc_state = to_intel_crtc_state(crtc_state);
  11079. first_pipe = intel_crtc->pipe;
  11080. }
  11081. }
  11082. /* No workaround needed? */
  11083. if (!first_crtc_state)
  11084. return 0;
  11085. /* w/a possibly needed, check how many crtc's are already enabled. */
  11086. for_each_intel_crtc(state->dev, intel_crtc) {
  11087. struct intel_crtc_state *pipe_config;
  11088. pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
  11089. if (IS_ERR(pipe_config))
  11090. return PTR_ERR(pipe_config);
  11091. pipe_config->hsw_workaround_pipe = INVALID_PIPE;
  11092. if (!pipe_config->base.active ||
  11093. needs_modeset(&pipe_config->base))
  11094. continue;
  11095. /* 2 or more enabled crtcs means no need for w/a */
  11096. if (enabled_pipe != INVALID_PIPE)
  11097. return 0;
  11098. enabled_pipe = intel_crtc->pipe;
  11099. }
  11100. if (enabled_pipe != INVALID_PIPE)
  11101. first_crtc_state->hsw_workaround_pipe = enabled_pipe;
  11102. else if (other_crtc_state)
  11103. other_crtc_state->hsw_workaround_pipe = first_pipe;
  11104. return 0;
  11105. }
  11106. static int intel_modeset_all_pipes(struct drm_atomic_state *state)
  11107. {
  11108. struct drm_crtc *crtc;
  11109. struct drm_crtc_state *crtc_state;
  11110. int ret = 0;
  11111. /* add all active pipes to the state */
  11112. for_each_crtc(state->dev, crtc) {
  11113. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  11114. if (IS_ERR(crtc_state))
  11115. return PTR_ERR(crtc_state);
  11116. if (!crtc_state->active || needs_modeset(crtc_state))
  11117. continue;
  11118. crtc_state->mode_changed = true;
  11119. ret = drm_atomic_add_affected_connectors(state, crtc);
  11120. if (ret)
  11121. break;
  11122. ret = drm_atomic_add_affected_planes(state, crtc);
  11123. if (ret)
  11124. break;
  11125. }
  11126. return ret;
  11127. }
  11128. static int intel_modeset_checks(struct drm_atomic_state *state)
  11129. {
  11130. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11131. struct drm_i915_private *dev_priv = state->dev->dev_private;
  11132. struct drm_crtc *crtc;
  11133. struct drm_crtc_state *crtc_state;
  11134. int ret = 0, i;
  11135. if (!check_digital_port_conflicts(state)) {
  11136. DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
  11137. return -EINVAL;
  11138. }
  11139. intel_state->modeset = true;
  11140. intel_state->active_crtcs = dev_priv->active_crtcs;
  11141. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11142. if (crtc_state->active)
  11143. intel_state->active_crtcs |= 1 << i;
  11144. else
  11145. intel_state->active_crtcs &= ~(1 << i);
  11146. }
  11147. /*
  11148. * See if the config requires any additional preparation, e.g.
  11149. * to adjust global state with pipes off. We need to do this
  11150. * here so we can get the modeset_pipe updated config for the new
  11151. * mode set on this crtc. For other crtcs we need to use the
  11152. * adjusted_mode bits in the crtc directly.
  11153. */
  11154. if (dev_priv->display.modeset_calc_cdclk) {
  11155. ret = dev_priv->display.modeset_calc_cdclk(state);
  11156. if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
  11157. ret = intel_modeset_all_pipes(state);
  11158. if (ret < 0)
  11159. return ret;
  11160. DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
  11161. intel_state->cdclk, intel_state->dev_cdclk);
  11162. } else
  11163. to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
  11164. intel_modeset_clear_plls(state);
  11165. if (IS_HASWELL(dev_priv))
  11166. return haswell_mode_set_planes_workaround(state);
  11167. return 0;
  11168. }
  11169. /*
  11170. * Handle calculation of various watermark data at the end of the atomic check
  11171. * phase. The code here should be run after the per-crtc and per-plane 'check'
  11172. * handlers to ensure that all derived state has been updated.
  11173. */
  11174. static void calc_watermark_data(struct drm_atomic_state *state)
  11175. {
  11176. struct drm_device *dev = state->dev;
  11177. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11178. struct drm_crtc *crtc;
  11179. struct drm_crtc_state *cstate;
  11180. struct drm_plane *plane;
  11181. struct drm_plane_state *pstate;
  11182. /*
  11183. * Calculate watermark configuration details now that derived
  11184. * plane/crtc state is all properly updated.
  11185. */
  11186. drm_for_each_crtc(crtc, dev) {
  11187. cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
  11188. crtc->state;
  11189. if (cstate->active)
  11190. intel_state->wm_config.num_pipes_active++;
  11191. }
  11192. drm_for_each_legacy_plane(plane, dev) {
  11193. pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
  11194. plane->state;
  11195. if (!to_intel_plane_state(pstate)->visible)
  11196. continue;
  11197. intel_state->wm_config.sprites_enabled = true;
  11198. if (pstate->crtc_w != pstate->src_w >> 16 ||
  11199. pstate->crtc_h != pstate->src_h >> 16)
  11200. intel_state->wm_config.sprites_scaled = true;
  11201. }
  11202. }
  11203. /**
  11204. * intel_atomic_check - validate state object
  11205. * @dev: drm device
  11206. * @state: state to validate
  11207. */
  11208. static int intel_atomic_check(struct drm_device *dev,
  11209. struct drm_atomic_state *state)
  11210. {
  11211. struct drm_i915_private *dev_priv = to_i915(dev);
  11212. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11213. struct drm_crtc *crtc;
  11214. struct drm_crtc_state *crtc_state;
  11215. int ret, i;
  11216. bool any_ms = false;
  11217. ret = drm_atomic_helper_check_modeset(dev, state);
  11218. if (ret)
  11219. return ret;
  11220. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11221. struct intel_crtc_state *pipe_config =
  11222. to_intel_crtc_state(crtc_state);
  11223. /* Catch I915_MODE_FLAG_INHERITED */
  11224. if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
  11225. crtc_state->mode_changed = true;
  11226. if (!crtc_state->enable) {
  11227. if (needs_modeset(crtc_state))
  11228. any_ms = true;
  11229. continue;
  11230. }
  11231. if (!needs_modeset(crtc_state))
  11232. continue;
  11233. /* FIXME: For only active_changed we shouldn't need to do any
  11234. * state recomputation at all. */
  11235. ret = drm_atomic_add_affected_connectors(state, crtc);
  11236. if (ret)
  11237. return ret;
  11238. ret = intel_modeset_pipe_config(crtc, pipe_config);
  11239. if (ret)
  11240. return ret;
  11241. if (i915.fastboot &&
  11242. intel_pipe_config_compare(dev,
  11243. to_intel_crtc_state(crtc->state),
  11244. pipe_config, true)) {
  11245. crtc_state->mode_changed = false;
  11246. to_intel_crtc_state(crtc_state)->update_pipe = true;
  11247. }
  11248. if (needs_modeset(crtc_state)) {
  11249. any_ms = true;
  11250. ret = drm_atomic_add_affected_planes(state, crtc);
  11251. if (ret)
  11252. return ret;
  11253. }
  11254. intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
  11255. needs_modeset(crtc_state) ?
  11256. "[modeset]" : "[fastset]");
  11257. }
  11258. if (any_ms) {
  11259. ret = intel_modeset_checks(state);
  11260. if (ret)
  11261. return ret;
  11262. } else
  11263. intel_state->cdclk = dev_priv->cdclk_freq;
  11264. ret = drm_atomic_helper_check_planes(dev, state);
  11265. if (ret)
  11266. return ret;
  11267. intel_fbc_choose_crtc(dev_priv, state);
  11268. calc_watermark_data(state);
  11269. return 0;
  11270. }
  11271. static int intel_atomic_prepare_commit(struct drm_device *dev,
  11272. struct drm_atomic_state *state,
  11273. bool nonblock)
  11274. {
  11275. struct drm_i915_private *dev_priv = dev->dev_private;
  11276. struct drm_plane_state *plane_state;
  11277. struct drm_crtc_state *crtc_state;
  11278. struct drm_plane *plane;
  11279. struct drm_crtc *crtc;
  11280. int i, ret;
  11281. if (nonblock) {
  11282. DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
  11283. return -EINVAL;
  11284. }
  11285. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11286. if (state->legacy_cursor_update)
  11287. continue;
  11288. ret = intel_crtc_wait_for_pending_flips(crtc);
  11289. if (ret)
  11290. return ret;
  11291. if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
  11292. flush_workqueue(dev_priv->wq);
  11293. }
  11294. ret = mutex_lock_interruptible(&dev->struct_mutex);
  11295. if (ret)
  11296. return ret;
  11297. ret = drm_atomic_helper_prepare_planes(dev, state);
  11298. mutex_unlock(&dev->struct_mutex);
  11299. if (!ret && !nonblock) {
  11300. for_each_plane_in_state(state, plane, plane_state, i) {
  11301. struct intel_plane_state *intel_plane_state =
  11302. to_intel_plane_state(plane_state);
  11303. if (!intel_plane_state->wait_req)
  11304. continue;
  11305. ret = __i915_wait_request(intel_plane_state->wait_req,
  11306. true, NULL, NULL);
  11307. if (ret) {
  11308. /* Any hang should be swallowed by the wait */
  11309. WARN_ON(ret == -EIO);
  11310. mutex_lock(&dev->struct_mutex);
  11311. drm_atomic_helper_cleanup_planes(dev, state);
  11312. mutex_unlock(&dev->struct_mutex);
  11313. break;
  11314. }
  11315. }
  11316. }
  11317. return ret;
  11318. }
  11319. static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
  11320. struct drm_i915_private *dev_priv,
  11321. unsigned crtc_mask)
  11322. {
  11323. unsigned last_vblank_count[I915_MAX_PIPES];
  11324. enum pipe pipe;
  11325. int ret;
  11326. if (!crtc_mask)
  11327. return;
  11328. for_each_pipe(dev_priv, pipe) {
  11329. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  11330. if (!((1 << pipe) & crtc_mask))
  11331. continue;
  11332. ret = drm_crtc_vblank_get(crtc);
  11333. if (WARN_ON(ret != 0)) {
  11334. crtc_mask &= ~(1 << pipe);
  11335. continue;
  11336. }
  11337. last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
  11338. }
  11339. for_each_pipe(dev_priv, pipe) {
  11340. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  11341. long lret;
  11342. if (!((1 << pipe) & crtc_mask))
  11343. continue;
  11344. lret = wait_event_timeout(dev->vblank[pipe].queue,
  11345. last_vblank_count[pipe] !=
  11346. drm_crtc_vblank_count(crtc),
  11347. msecs_to_jiffies(50));
  11348. WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
  11349. drm_crtc_vblank_put(crtc);
  11350. }
  11351. }
  11352. static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
  11353. {
  11354. /* fb updated, need to unpin old fb */
  11355. if (crtc_state->fb_changed)
  11356. return true;
  11357. /* wm changes, need vblank before final wm's */
  11358. if (crtc_state->update_wm_post)
  11359. return true;
  11360. /*
  11361. * cxsr is re-enabled after vblank.
  11362. * This is already handled by crtc_state->update_wm_post,
  11363. * but added for clarity.
  11364. */
  11365. if (crtc_state->disable_cxsr)
  11366. return true;
  11367. return false;
  11368. }
  11369. /**
  11370. * intel_atomic_commit - commit validated state object
  11371. * @dev: DRM device
  11372. * @state: the top-level driver state object
  11373. * @nonblock: nonblocking commit
  11374. *
  11375. * This function commits a top-level state object that has been validated
  11376. * with drm_atomic_helper_check().
  11377. *
  11378. * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
  11379. * we can only handle plane-related operations and do not yet support
  11380. * nonblocking commit.
  11381. *
  11382. * RETURNS
  11383. * Zero for success or -errno.
  11384. */
  11385. static int intel_atomic_commit(struct drm_device *dev,
  11386. struct drm_atomic_state *state,
  11387. bool nonblock)
  11388. {
  11389. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11390. struct drm_i915_private *dev_priv = dev->dev_private;
  11391. struct drm_crtc_state *old_crtc_state;
  11392. struct drm_crtc *crtc;
  11393. struct intel_crtc_state *intel_cstate;
  11394. int ret = 0, i;
  11395. bool hw_check = intel_state->modeset;
  11396. unsigned long put_domains[I915_MAX_PIPES] = {};
  11397. unsigned crtc_vblank_mask = 0;
  11398. ret = intel_atomic_prepare_commit(dev, state, nonblock);
  11399. if (ret) {
  11400. DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
  11401. return ret;
  11402. }
  11403. drm_atomic_helper_swap_state(dev, state);
  11404. dev_priv->wm.config = intel_state->wm_config;
  11405. intel_shared_dpll_commit(state);
  11406. if (intel_state->modeset) {
  11407. memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
  11408. sizeof(intel_state->min_pixclk));
  11409. dev_priv->active_crtcs = intel_state->active_crtcs;
  11410. dev_priv->atomic_cdclk_freq = intel_state->cdclk;
  11411. intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
  11412. }
  11413. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11414. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11415. if (needs_modeset(crtc->state) ||
  11416. to_intel_crtc_state(crtc->state)->update_pipe) {
  11417. hw_check = true;
  11418. put_domains[to_intel_crtc(crtc)->pipe] =
  11419. modeset_get_crtc_power_domains(crtc,
  11420. to_intel_crtc_state(crtc->state));
  11421. }
  11422. if (!needs_modeset(crtc->state))
  11423. continue;
  11424. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  11425. if (old_crtc_state->active) {
  11426. intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
  11427. dev_priv->display.crtc_disable(crtc);
  11428. intel_crtc->active = false;
  11429. intel_fbc_disable(intel_crtc);
  11430. intel_disable_shared_dpll(intel_crtc);
  11431. /*
  11432. * Underruns don't always raise
  11433. * interrupts, so check manually.
  11434. */
  11435. intel_check_cpu_fifo_underruns(dev_priv);
  11436. intel_check_pch_fifo_underruns(dev_priv);
  11437. if (!crtc->state->active)
  11438. intel_update_watermarks(crtc);
  11439. }
  11440. }
  11441. /* Only after disabling all output pipelines that will be changed can we
  11442. * update the the output configuration. */
  11443. intel_modeset_update_crtc_state(state);
  11444. if (intel_state->modeset) {
  11445. drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
  11446. if (dev_priv->display.modeset_commit_cdclk &&
  11447. intel_state->dev_cdclk != dev_priv->cdclk_freq)
  11448. dev_priv->display.modeset_commit_cdclk(state);
  11449. intel_modeset_verify_disabled(dev);
  11450. }
  11451. /* Now enable the clocks, plane, pipe, and connectors that we set up. */
  11452. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11453. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11454. bool modeset = needs_modeset(crtc->state);
  11455. struct intel_crtc_state *pipe_config =
  11456. to_intel_crtc_state(crtc->state);
  11457. bool update_pipe = !modeset && pipe_config->update_pipe;
  11458. if (modeset && crtc->state->active) {
  11459. update_scanline_offset(to_intel_crtc(crtc));
  11460. dev_priv->display.crtc_enable(crtc);
  11461. }
  11462. if (!modeset)
  11463. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  11464. if (crtc->state->active &&
  11465. drm_atomic_get_existing_plane_state(state, crtc->primary))
  11466. intel_fbc_enable(intel_crtc);
  11467. if (crtc->state->active &&
  11468. (crtc->state->planes_changed || update_pipe))
  11469. drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
  11470. if (pipe_config->base.active && needs_vblank_wait(pipe_config))
  11471. crtc_vblank_mask |= 1 << i;
  11472. }
  11473. /* FIXME: add subpixel order */
  11474. if (!state->legacy_cursor_update)
  11475. intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
  11476. /*
  11477. * Now that the vblank has passed, we can go ahead and program the
  11478. * optimal watermarks on platforms that need two-step watermark
  11479. * programming.
  11480. *
  11481. * TODO: Move this (and other cleanup) to an async worker eventually.
  11482. */
  11483. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11484. intel_cstate = to_intel_crtc_state(crtc->state);
  11485. if (dev_priv->display.optimize_watermarks)
  11486. dev_priv->display.optimize_watermarks(intel_cstate);
  11487. }
  11488. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11489. intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
  11490. if (put_domains[i])
  11491. modeset_put_power_domains(dev_priv, put_domains[i]);
  11492. intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
  11493. }
  11494. if (intel_state->modeset)
  11495. intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
  11496. mutex_lock(&dev->struct_mutex);
  11497. drm_atomic_helper_cleanup_planes(dev, state);
  11498. mutex_unlock(&dev->struct_mutex);
  11499. drm_atomic_state_free(state);
  11500. /* As one of the primary mmio accessors, KMS has a high likelihood
  11501. * of triggering bugs in unclaimed access. After we finish
  11502. * modesetting, see if an error has been flagged, and if so
  11503. * enable debugging for the next modeset - and hope we catch
  11504. * the culprit.
  11505. *
  11506. * XXX note that we assume display power is on at this point.
  11507. * This might hold true now but we need to add pm helper to check
  11508. * unclaimed only when the hardware is on, as atomic commits
  11509. * can happen also when the device is completely off.
  11510. */
  11511. intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
  11512. return 0;
  11513. }
  11514. void intel_crtc_restore_mode(struct drm_crtc *crtc)
  11515. {
  11516. struct drm_device *dev = crtc->dev;
  11517. struct drm_atomic_state *state;
  11518. struct drm_crtc_state *crtc_state;
  11519. int ret;
  11520. state = drm_atomic_state_alloc(dev);
  11521. if (!state) {
  11522. DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
  11523. crtc->base.id);
  11524. return;
  11525. }
  11526. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  11527. retry:
  11528. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  11529. ret = PTR_ERR_OR_ZERO(crtc_state);
  11530. if (!ret) {
  11531. if (!crtc_state->active)
  11532. goto out;
  11533. crtc_state->mode_changed = true;
  11534. ret = drm_atomic_commit(state);
  11535. }
  11536. if (ret == -EDEADLK) {
  11537. drm_atomic_state_clear(state);
  11538. drm_modeset_backoff(state->acquire_ctx);
  11539. goto retry;
  11540. }
  11541. if (ret)
  11542. out:
  11543. drm_atomic_state_free(state);
  11544. }
  11545. #undef for_each_intel_crtc_masked
  11546. static const struct drm_crtc_funcs intel_crtc_funcs = {
  11547. .gamma_set = drm_atomic_helper_legacy_gamma_set,
  11548. .set_config = drm_atomic_helper_set_config,
  11549. .set_property = drm_atomic_helper_crtc_set_property,
  11550. .destroy = intel_crtc_destroy,
  11551. .page_flip = intel_crtc_page_flip,
  11552. .atomic_duplicate_state = intel_crtc_duplicate_state,
  11553. .atomic_destroy_state = intel_crtc_destroy_state,
  11554. };
  11555. /**
  11556. * intel_prepare_plane_fb - Prepare fb for usage on plane
  11557. * @plane: drm plane to prepare for
  11558. * @fb: framebuffer to prepare for presentation
  11559. *
  11560. * Prepares a framebuffer for usage on a display plane. Generally this
  11561. * involves pinning the underlying object and updating the frontbuffer tracking
  11562. * bits. Some older platforms need special physical address handling for
  11563. * cursor planes.
  11564. *
  11565. * Must be called with struct_mutex held.
  11566. *
  11567. * Returns 0 on success, negative error code on failure.
  11568. */
  11569. int
  11570. intel_prepare_plane_fb(struct drm_plane *plane,
  11571. const struct drm_plane_state *new_state)
  11572. {
  11573. struct drm_device *dev = plane->dev;
  11574. struct drm_framebuffer *fb = new_state->fb;
  11575. struct intel_plane *intel_plane = to_intel_plane(plane);
  11576. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  11577. struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
  11578. int ret = 0;
  11579. if (!obj && !old_obj)
  11580. return 0;
  11581. if (old_obj) {
  11582. struct drm_crtc_state *crtc_state =
  11583. drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
  11584. /* Big Hammer, we also need to ensure that any pending
  11585. * MI_WAIT_FOR_EVENT inside a user batch buffer on the
  11586. * current scanout is retired before unpinning the old
  11587. * framebuffer. Note that we rely on userspace rendering
  11588. * into the buffer attached to the pipe they are waiting
  11589. * on. If not, userspace generates a GPU hang with IPEHR
  11590. * point to the MI_WAIT_FOR_EVENT.
  11591. *
  11592. * This should only fail upon a hung GPU, in which case we
  11593. * can safely continue.
  11594. */
  11595. if (needs_modeset(crtc_state))
  11596. ret = i915_gem_object_wait_rendering(old_obj, true);
  11597. if (ret) {
  11598. /* GPU hangs should have been swallowed by the wait */
  11599. WARN_ON(ret == -EIO);
  11600. return ret;
  11601. }
  11602. }
  11603. /* For framebuffer backed by dmabuf, wait for fence */
  11604. if (obj && obj->base.dma_buf) {
  11605. long lret;
  11606. lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
  11607. false, true,
  11608. MAX_SCHEDULE_TIMEOUT);
  11609. if (lret == -ERESTARTSYS)
  11610. return lret;
  11611. WARN(lret < 0, "waiting returns %li\n", lret);
  11612. }
  11613. if (!obj) {
  11614. ret = 0;
  11615. } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
  11616. INTEL_INFO(dev)->cursor_needs_physical) {
  11617. int align = IS_I830(dev) ? 16 * 1024 : 256;
  11618. ret = i915_gem_object_attach_phys(obj, align);
  11619. if (ret)
  11620. DRM_DEBUG_KMS("failed to attach phys object\n");
  11621. } else {
  11622. ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
  11623. }
  11624. if (ret == 0) {
  11625. if (obj) {
  11626. struct intel_plane_state *plane_state =
  11627. to_intel_plane_state(new_state);
  11628. i915_gem_request_assign(&plane_state->wait_req,
  11629. obj->last_write_req);
  11630. }
  11631. i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
  11632. }
  11633. return ret;
  11634. }
  11635. /**
  11636. * intel_cleanup_plane_fb - Cleans up an fb after plane use
  11637. * @plane: drm plane to clean up for
  11638. * @fb: old framebuffer that was on plane
  11639. *
  11640. * Cleans up a framebuffer that has just been removed from a plane.
  11641. *
  11642. * Must be called with struct_mutex held.
  11643. */
  11644. void
  11645. intel_cleanup_plane_fb(struct drm_plane *plane,
  11646. const struct drm_plane_state *old_state)
  11647. {
  11648. struct drm_device *dev = plane->dev;
  11649. struct intel_plane *intel_plane = to_intel_plane(plane);
  11650. struct intel_plane_state *old_intel_state;
  11651. struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
  11652. struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
  11653. old_intel_state = to_intel_plane_state(old_state);
  11654. if (!obj && !old_obj)
  11655. return;
  11656. if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
  11657. !INTEL_INFO(dev)->cursor_needs_physical))
  11658. intel_unpin_fb_obj(old_state->fb, old_state->rotation);
  11659. /* prepare_fb aborted? */
  11660. if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
  11661. (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
  11662. i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
  11663. i915_gem_request_assign(&old_intel_state->wait_req, NULL);
  11664. }
  11665. int
  11666. skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
  11667. {
  11668. int max_scale;
  11669. struct drm_device *dev;
  11670. struct drm_i915_private *dev_priv;
  11671. int crtc_clock, cdclk;
  11672. if (!intel_crtc || !crtc_state->base.enable)
  11673. return DRM_PLANE_HELPER_NO_SCALING;
  11674. dev = intel_crtc->base.dev;
  11675. dev_priv = dev->dev_private;
  11676. crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
  11677. cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
  11678. if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
  11679. return DRM_PLANE_HELPER_NO_SCALING;
  11680. /*
  11681. * skl max scale is lower of:
  11682. * close to 3 but not 3, -1 is for that purpose
  11683. * or
  11684. * cdclk/crtc_clock
  11685. */
  11686. max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
  11687. return max_scale;
  11688. }
  11689. static int
  11690. intel_check_primary_plane(struct drm_plane *plane,
  11691. struct intel_crtc_state *crtc_state,
  11692. struct intel_plane_state *state)
  11693. {
  11694. struct drm_crtc *crtc = state->base.crtc;
  11695. struct drm_framebuffer *fb = state->base.fb;
  11696. int min_scale = DRM_PLANE_HELPER_NO_SCALING;
  11697. int max_scale = DRM_PLANE_HELPER_NO_SCALING;
  11698. bool can_position = false;
  11699. if (INTEL_INFO(plane->dev)->gen >= 9) {
  11700. /* use scaler when colorkey is not required */
  11701. if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
  11702. min_scale = 1;
  11703. max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
  11704. }
  11705. can_position = true;
  11706. }
  11707. return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
  11708. &state->dst, &state->clip,
  11709. min_scale, max_scale,
  11710. can_position, true,
  11711. &state->visible);
  11712. }
  11713. static void intel_begin_crtc_commit(struct drm_crtc *crtc,
  11714. struct drm_crtc_state *old_crtc_state)
  11715. {
  11716. struct drm_device *dev = crtc->dev;
  11717. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11718. struct intel_crtc_state *old_intel_state =
  11719. to_intel_crtc_state(old_crtc_state);
  11720. bool modeset = needs_modeset(crtc->state);
  11721. /* Perform vblank evasion around commit operation */
  11722. intel_pipe_update_start(intel_crtc);
  11723. if (modeset)
  11724. return;
  11725. if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
  11726. intel_color_set_csc(crtc->state);
  11727. intel_color_load_luts(crtc->state);
  11728. }
  11729. if (to_intel_crtc_state(crtc->state)->update_pipe)
  11730. intel_update_pipe_config(intel_crtc, old_intel_state);
  11731. else if (INTEL_INFO(dev)->gen >= 9)
  11732. skl_detach_scalers(intel_crtc);
  11733. }
  11734. static void intel_finish_crtc_commit(struct drm_crtc *crtc,
  11735. struct drm_crtc_state *old_crtc_state)
  11736. {
  11737. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11738. intel_pipe_update_end(intel_crtc);
  11739. }
  11740. /**
  11741. * intel_plane_destroy - destroy a plane
  11742. * @plane: plane to destroy
  11743. *
  11744. * Common destruction function for all types of planes (primary, cursor,
  11745. * sprite).
  11746. */
  11747. void intel_plane_destroy(struct drm_plane *plane)
  11748. {
  11749. struct intel_plane *intel_plane = to_intel_plane(plane);
  11750. drm_plane_cleanup(plane);
  11751. kfree(intel_plane);
  11752. }
  11753. const struct drm_plane_funcs intel_plane_funcs = {
  11754. .update_plane = drm_atomic_helper_update_plane,
  11755. .disable_plane = drm_atomic_helper_disable_plane,
  11756. .destroy = intel_plane_destroy,
  11757. .set_property = drm_atomic_helper_plane_set_property,
  11758. .atomic_get_property = intel_plane_atomic_get_property,
  11759. .atomic_set_property = intel_plane_atomic_set_property,
  11760. .atomic_duplicate_state = intel_plane_duplicate_state,
  11761. .atomic_destroy_state = intel_plane_destroy_state,
  11762. };
  11763. static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
  11764. int pipe)
  11765. {
  11766. struct intel_plane *primary = NULL;
  11767. struct intel_plane_state *state = NULL;
  11768. const uint32_t *intel_primary_formats;
  11769. unsigned int num_formats;
  11770. int ret;
  11771. primary = kzalloc(sizeof(*primary), GFP_KERNEL);
  11772. if (!primary)
  11773. goto fail;
  11774. state = intel_create_plane_state(&primary->base);
  11775. if (!state)
  11776. goto fail;
  11777. primary->base.state = &state->base;
  11778. primary->can_scale = false;
  11779. primary->max_downscale = 1;
  11780. if (INTEL_INFO(dev)->gen >= 9) {
  11781. primary->can_scale = true;
  11782. state->scaler_id = -1;
  11783. }
  11784. primary->pipe = pipe;
  11785. primary->plane = pipe;
  11786. primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
  11787. primary->check_plane = intel_check_primary_plane;
  11788. if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
  11789. primary->plane = !pipe;
  11790. if (INTEL_INFO(dev)->gen >= 9) {
  11791. intel_primary_formats = skl_primary_formats;
  11792. num_formats = ARRAY_SIZE(skl_primary_formats);
  11793. primary->update_plane = skylake_update_primary_plane;
  11794. primary->disable_plane = skylake_disable_primary_plane;
  11795. } else if (HAS_PCH_SPLIT(dev)) {
  11796. intel_primary_formats = i965_primary_formats;
  11797. num_formats = ARRAY_SIZE(i965_primary_formats);
  11798. primary->update_plane = ironlake_update_primary_plane;
  11799. primary->disable_plane = i9xx_disable_primary_plane;
  11800. } else if (INTEL_INFO(dev)->gen >= 4) {
  11801. intel_primary_formats = i965_primary_formats;
  11802. num_formats = ARRAY_SIZE(i965_primary_formats);
  11803. primary->update_plane = i9xx_update_primary_plane;
  11804. primary->disable_plane = i9xx_disable_primary_plane;
  11805. } else {
  11806. intel_primary_formats = i8xx_primary_formats;
  11807. num_formats = ARRAY_SIZE(i8xx_primary_formats);
  11808. primary->update_plane = i9xx_update_primary_plane;
  11809. primary->disable_plane = i9xx_disable_primary_plane;
  11810. }
  11811. ret = drm_universal_plane_init(dev, &primary->base, 0,
  11812. &intel_plane_funcs,
  11813. intel_primary_formats, num_formats,
  11814. DRM_PLANE_TYPE_PRIMARY, NULL);
  11815. if (ret)
  11816. goto fail;
  11817. if (INTEL_INFO(dev)->gen >= 4)
  11818. intel_create_rotation_property(dev, primary);
  11819. drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
  11820. return &primary->base;
  11821. fail:
  11822. kfree(state);
  11823. kfree(primary);
  11824. return NULL;
  11825. }
  11826. void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
  11827. {
  11828. if (!dev->mode_config.rotation_property) {
  11829. unsigned long flags = BIT(DRM_ROTATE_0) |
  11830. BIT(DRM_ROTATE_180);
  11831. if (INTEL_INFO(dev)->gen >= 9)
  11832. flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
  11833. dev->mode_config.rotation_property =
  11834. drm_mode_create_rotation_property(dev, flags);
  11835. }
  11836. if (dev->mode_config.rotation_property)
  11837. drm_object_attach_property(&plane->base.base,
  11838. dev->mode_config.rotation_property,
  11839. plane->base.state->rotation);
  11840. }
  11841. static int
  11842. intel_check_cursor_plane(struct drm_plane *plane,
  11843. struct intel_crtc_state *crtc_state,
  11844. struct intel_plane_state *state)
  11845. {
  11846. struct drm_crtc *crtc = crtc_state->base.crtc;
  11847. struct drm_framebuffer *fb = state->base.fb;
  11848. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  11849. enum pipe pipe = to_intel_plane(plane)->pipe;
  11850. unsigned stride;
  11851. int ret;
  11852. ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
  11853. &state->dst, &state->clip,
  11854. DRM_PLANE_HELPER_NO_SCALING,
  11855. DRM_PLANE_HELPER_NO_SCALING,
  11856. true, true, &state->visible);
  11857. if (ret)
  11858. return ret;
  11859. /* if we want to turn off the cursor ignore width and height */
  11860. if (!obj)
  11861. return 0;
  11862. /* Check for which cursor types we support */
  11863. if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
  11864. DRM_DEBUG("Cursor dimension %dx%d not supported\n",
  11865. state->base.crtc_w, state->base.crtc_h);
  11866. return -EINVAL;
  11867. }
  11868. stride = roundup_pow_of_two(state->base.crtc_w) * 4;
  11869. if (obj->base.size < stride * state->base.crtc_h) {
  11870. DRM_DEBUG_KMS("buffer is too small\n");
  11871. return -ENOMEM;
  11872. }
  11873. if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
  11874. DRM_DEBUG_KMS("cursor cannot be tiled\n");
  11875. return -EINVAL;
  11876. }
  11877. /*
  11878. * There's something wrong with the cursor on CHV pipe C.
  11879. * If it straddles the left edge of the screen then
  11880. * moving it away from the edge or disabling it often
  11881. * results in a pipe underrun, and often that can lead to
  11882. * dead pipe (constant underrun reported, and it scans
  11883. * out just a solid color). To recover from that, the
  11884. * display power well must be turned off and on again.
  11885. * Refuse the put the cursor into that compromised position.
  11886. */
  11887. if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
  11888. state->visible && state->base.crtc_x < 0) {
  11889. DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
  11890. return -EINVAL;
  11891. }
  11892. return 0;
  11893. }
  11894. static void
  11895. intel_disable_cursor_plane(struct drm_plane *plane,
  11896. struct drm_crtc *crtc)
  11897. {
  11898. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11899. intel_crtc->cursor_addr = 0;
  11900. intel_crtc_update_cursor(crtc, NULL);
  11901. }
  11902. static void
  11903. intel_update_cursor_plane(struct drm_plane *plane,
  11904. const struct intel_crtc_state *crtc_state,
  11905. const struct intel_plane_state *state)
  11906. {
  11907. struct drm_crtc *crtc = crtc_state->base.crtc;
  11908. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11909. struct drm_device *dev = plane->dev;
  11910. struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
  11911. uint32_t addr;
  11912. if (!obj)
  11913. addr = 0;
  11914. else if (!INTEL_INFO(dev)->cursor_needs_physical)
  11915. addr = i915_gem_obj_ggtt_offset(obj);
  11916. else
  11917. addr = obj->phys_handle->busaddr;
  11918. intel_crtc->cursor_addr = addr;
  11919. intel_crtc_update_cursor(crtc, state);
  11920. }
  11921. static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
  11922. int pipe)
  11923. {
  11924. struct intel_plane *cursor = NULL;
  11925. struct intel_plane_state *state = NULL;
  11926. int ret;
  11927. cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
  11928. if (!cursor)
  11929. goto fail;
  11930. state = intel_create_plane_state(&cursor->base);
  11931. if (!state)
  11932. goto fail;
  11933. cursor->base.state = &state->base;
  11934. cursor->can_scale = false;
  11935. cursor->max_downscale = 1;
  11936. cursor->pipe = pipe;
  11937. cursor->plane = pipe;
  11938. cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
  11939. cursor->check_plane = intel_check_cursor_plane;
  11940. cursor->update_plane = intel_update_cursor_plane;
  11941. cursor->disable_plane = intel_disable_cursor_plane;
  11942. ret = drm_universal_plane_init(dev, &cursor->base, 0,
  11943. &intel_plane_funcs,
  11944. intel_cursor_formats,
  11945. ARRAY_SIZE(intel_cursor_formats),
  11946. DRM_PLANE_TYPE_CURSOR, NULL);
  11947. if (ret)
  11948. goto fail;
  11949. if (INTEL_INFO(dev)->gen >= 4) {
  11950. if (!dev->mode_config.rotation_property)
  11951. dev->mode_config.rotation_property =
  11952. drm_mode_create_rotation_property(dev,
  11953. BIT(DRM_ROTATE_0) |
  11954. BIT(DRM_ROTATE_180));
  11955. if (dev->mode_config.rotation_property)
  11956. drm_object_attach_property(&cursor->base.base,
  11957. dev->mode_config.rotation_property,
  11958. state->base.rotation);
  11959. }
  11960. if (INTEL_INFO(dev)->gen >=9)
  11961. state->scaler_id = -1;
  11962. drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
  11963. return &cursor->base;
  11964. fail:
  11965. kfree(state);
  11966. kfree(cursor);
  11967. return NULL;
  11968. }
  11969. static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
  11970. struct intel_crtc_state *crtc_state)
  11971. {
  11972. int i;
  11973. struct intel_scaler *intel_scaler;
  11974. struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
  11975. for (i = 0; i < intel_crtc->num_scalers; i++) {
  11976. intel_scaler = &scaler_state->scalers[i];
  11977. intel_scaler->in_use = 0;
  11978. intel_scaler->mode = PS_SCALER_MODE_DYN;
  11979. }
  11980. scaler_state->scaler_id = -1;
  11981. }
  11982. static void intel_crtc_init(struct drm_device *dev, int pipe)
  11983. {
  11984. struct drm_i915_private *dev_priv = dev->dev_private;
  11985. struct intel_crtc *intel_crtc;
  11986. struct intel_crtc_state *crtc_state = NULL;
  11987. struct drm_plane *primary = NULL;
  11988. struct drm_plane *cursor = NULL;
  11989. int ret;
  11990. intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
  11991. if (intel_crtc == NULL)
  11992. return;
  11993. crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
  11994. if (!crtc_state)
  11995. goto fail;
  11996. intel_crtc->config = crtc_state;
  11997. intel_crtc->base.state = &crtc_state->base;
  11998. crtc_state->base.crtc = &intel_crtc->base;
  11999. /* initialize shared scalers */
  12000. if (INTEL_INFO(dev)->gen >= 9) {
  12001. if (pipe == PIPE_C)
  12002. intel_crtc->num_scalers = 1;
  12003. else
  12004. intel_crtc->num_scalers = SKL_NUM_SCALERS;
  12005. skl_init_scalers(dev, intel_crtc, crtc_state);
  12006. }
  12007. primary = intel_primary_plane_create(dev, pipe);
  12008. if (!primary)
  12009. goto fail;
  12010. cursor = intel_cursor_plane_create(dev, pipe);
  12011. if (!cursor)
  12012. goto fail;
  12013. ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
  12014. cursor, &intel_crtc_funcs, NULL);
  12015. if (ret)
  12016. goto fail;
  12017. /*
  12018. * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
  12019. * is hooked to pipe B. Hence we want plane A feeding pipe B.
  12020. */
  12021. intel_crtc->pipe = pipe;
  12022. intel_crtc->plane = pipe;
  12023. if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
  12024. DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
  12025. intel_crtc->plane = !pipe;
  12026. }
  12027. intel_crtc->cursor_base = ~0;
  12028. intel_crtc->cursor_cntl = ~0;
  12029. intel_crtc->cursor_size = ~0;
  12030. intel_crtc->wm.cxsr_allowed = true;
  12031. BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
  12032. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
  12033. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
  12034. dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
  12035. drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
  12036. intel_color_init(&intel_crtc->base);
  12037. WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
  12038. return;
  12039. fail:
  12040. if (primary)
  12041. drm_plane_cleanup(primary);
  12042. if (cursor)
  12043. drm_plane_cleanup(cursor);
  12044. kfree(crtc_state);
  12045. kfree(intel_crtc);
  12046. }
  12047. enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
  12048. {
  12049. struct drm_encoder *encoder = connector->base.encoder;
  12050. struct drm_device *dev = connector->base.dev;
  12051. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  12052. if (!encoder || WARN_ON(!encoder->crtc))
  12053. return INVALID_PIPE;
  12054. return to_intel_crtc(encoder->crtc)->pipe;
  12055. }
  12056. int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
  12057. struct drm_file *file)
  12058. {
  12059. struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
  12060. struct drm_crtc *drmmode_crtc;
  12061. struct intel_crtc *crtc;
  12062. drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
  12063. if (!drmmode_crtc) {
  12064. DRM_ERROR("no such CRTC id\n");
  12065. return -ENOENT;
  12066. }
  12067. crtc = to_intel_crtc(drmmode_crtc);
  12068. pipe_from_crtc_id->pipe = crtc->pipe;
  12069. return 0;
  12070. }
  12071. static int intel_encoder_clones(struct intel_encoder *encoder)
  12072. {
  12073. struct drm_device *dev = encoder->base.dev;
  12074. struct intel_encoder *source_encoder;
  12075. int index_mask = 0;
  12076. int entry = 0;
  12077. for_each_intel_encoder(dev, source_encoder) {
  12078. if (encoders_cloneable(encoder, source_encoder))
  12079. index_mask |= (1 << entry);
  12080. entry++;
  12081. }
  12082. return index_mask;
  12083. }
  12084. static bool has_edp_a(struct drm_device *dev)
  12085. {
  12086. struct drm_i915_private *dev_priv = dev->dev_private;
  12087. if (!IS_MOBILE(dev))
  12088. return false;
  12089. if ((I915_READ(DP_A) & DP_DETECTED) == 0)
  12090. return false;
  12091. if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
  12092. return false;
  12093. return true;
  12094. }
  12095. static bool intel_crt_present(struct drm_device *dev)
  12096. {
  12097. struct drm_i915_private *dev_priv = dev->dev_private;
  12098. if (INTEL_INFO(dev)->gen >= 9)
  12099. return false;
  12100. if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
  12101. return false;
  12102. if (IS_CHERRYVIEW(dev))
  12103. return false;
  12104. if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
  12105. return false;
  12106. /* DDI E can't be used if DDI A requires 4 lanes */
  12107. if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
  12108. return false;
  12109. if (!dev_priv->vbt.int_crt_support)
  12110. return false;
  12111. return true;
  12112. }
  12113. static void intel_setup_outputs(struct drm_device *dev)
  12114. {
  12115. struct drm_i915_private *dev_priv = dev->dev_private;
  12116. struct intel_encoder *encoder;
  12117. bool dpd_is_edp = false;
  12118. intel_lvds_init(dev);
  12119. if (intel_crt_present(dev))
  12120. intel_crt_init(dev);
  12121. if (IS_BROXTON(dev)) {
  12122. /*
  12123. * FIXME: Broxton doesn't support port detection via the
  12124. * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
  12125. * detect the ports.
  12126. */
  12127. intel_ddi_init(dev, PORT_A);
  12128. intel_ddi_init(dev, PORT_B);
  12129. intel_ddi_init(dev, PORT_C);
  12130. intel_dsi_init(dev);
  12131. } else if (HAS_DDI(dev)) {
  12132. int found;
  12133. /*
  12134. * Haswell uses DDI functions to detect digital outputs.
  12135. * On SKL pre-D0 the strap isn't connected, so we assume
  12136. * it's there.
  12137. */
  12138. found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
  12139. /* WaIgnoreDDIAStrap: skl */
  12140. if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
  12141. intel_ddi_init(dev, PORT_A);
  12142. /* DDI B, C and D detection is indicated by the SFUSE_STRAP
  12143. * register */
  12144. found = I915_READ(SFUSE_STRAP);
  12145. if (found & SFUSE_STRAP_DDIB_DETECTED)
  12146. intel_ddi_init(dev, PORT_B);
  12147. if (found & SFUSE_STRAP_DDIC_DETECTED)
  12148. intel_ddi_init(dev, PORT_C);
  12149. if (found & SFUSE_STRAP_DDID_DETECTED)
  12150. intel_ddi_init(dev, PORT_D);
  12151. /*
  12152. * On SKL we don't have a way to detect DDI-E so we rely on VBT.
  12153. */
  12154. if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
  12155. (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
  12156. dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
  12157. dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
  12158. intel_ddi_init(dev, PORT_E);
  12159. } else if (HAS_PCH_SPLIT(dev)) {
  12160. int found;
  12161. dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
  12162. if (has_edp_a(dev))
  12163. intel_dp_init(dev, DP_A, PORT_A);
  12164. if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
  12165. /* PCH SDVOB multiplex with HDMIB */
  12166. found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
  12167. if (!found)
  12168. intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
  12169. if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
  12170. intel_dp_init(dev, PCH_DP_B, PORT_B);
  12171. }
  12172. if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
  12173. intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
  12174. if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
  12175. intel_hdmi_init(dev, PCH_HDMID, PORT_D);
  12176. if (I915_READ(PCH_DP_C) & DP_DETECTED)
  12177. intel_dp_init(dev, PCH_DP_C, PORT_C);
  12178. if (I915_READ(PCH_DP_D) & DP_DETECTED)
  12179. intel_dp_init(dev, PCH_DP_D, PORT_D);
  12180. } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  12181. bool has_edp, has_port;
  12182. /*
  12183. * The DP_DETECTED bit is the latched state of the DDC
  12184. * SDA pin at boot. However since eDP doesn't require DDC
  12185. * (no way to plug in a DP->HDMI dongle) the DDC pins for
  12186. * eDP ports may have been muxed to an alternate function.
  12187. * Thus we can't rely on the DP_DETECTED bit alone to detect
  12188. * eDP ports. Consult the VBT as well as DP_DETECTED to
  12189. * detect eDP ports.
  12190. *
  12191. * Sadly the straps seem to be missing sometimes even for HDMI
  12192. * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
  12193. * and VBT for the presence of the port. Additionally we can't
  12194. * trust the port type the VBT declares as we've seen at least
  12195. * HDMI ports that the VBT claim are DP or eDP.
  12196. */
  12197. has_edp = intel_dp_is_edp(dev, PORT_B);
  12198. has_port = intel_bios_is_port_present(dev_priv, PORT_B);
  12199. if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
  12200. has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
  12201. if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
  12202. intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
  12203. has_edp = intel_dp_is_edp(dev, PORT_C);
  12204. has_port = intel_bios_is_port_present(dev_priv, PORT_C);
  12205. if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
  12206. has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
  12207. if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
  12208. intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
  12209. if (IS_CHERRYVIEW(dev)) {
  12210. /*
  12211. * eDP not supported on port D,
  12212. * so no need to worry about it
  12213. */
  12214. has_port = intel_bios_is_port_present(dev_priv, PORT_D);
  12215. if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
  12216. intel_dp_init(dev, CHV_DP_D, PORT_D);
  12217. if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
  12218. intel_hdmi_init(dev, CHV_HDMID, PORT_D);
  12219. }
  12220. intel_dsi_init(dev);
  12221. } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
  12222. bool found = false;
  12223. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  12224. DRM_DEBUG_KMS("probing SDVOB\n");
  12225. found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
  12226. if (!found && IS_G4X(dev)) {
  12227. DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
  12228. intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
  12229. }
  12230. if (!found && IS_G4X(dev))
  12231. intel_dp_init(dev, DP_B, PORT_B);
  12232. }
  12233. /* Before G4X SDVOC doesn't have its own detect register */
  12234. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  12235. DRM_DEBUG_KMS("probing SDVOC\n");
  12236. found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
  12237. }
  12238. if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
  12239. if (IS_G4X(dev)) {
  12240. DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
  12241. intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
  12242. }
  12243. if (IS_G4X(dev))
  12244. intel_dp_init(dev, DP_C, PORT_C);
  12245. }
  12246. if (IS_G4X(dev) &&
  12247. (I915_READ(DP_D) & DP_DETECTED))
  12248. intel_dp_init(dev, DP_D, PORT_D);
  12249. } else if (IS_GEN2(dev))
  12250. intel_dvo_init(dev);
  12251. if (SUPPORTS_TV(dev))
  12252. intel_tv_init(dev);
  12253. intel_psr_init(dev);
  12254. for_each_intel_encoder(dev, encoder) {
  12255. encoder->base.possible_crtcs = encoder->crtc_mask;
  12256. encoder->base.possible_clones =
  12257. intel_encoder_clones(encoder);
  12258. }
  12259. intel_init_pch_refclk(dev);
  12260. drm_helper_move_panel_connectors_to_head(dev);
  12261. }
  12262. static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  12263. {
  12264. struct drm_device *dev = fb->dev;
  12265. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  12266. drm_framebuffer_cleanup(fb);
  12267. mutex_lock(&dev->struct_mutex);
  12268. WARN_ON(!intel_fb->obj->framebuffer_references--);
  12269. drm_gem_object_unreference(&intel_fb->obj->base);
  12270. mutex_unlock(&dev->struct_mutex);
  12271. kfree(intel_fb);
  12272. }
  12273. static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
  12274. struct drm_file *file,
  12275. unsigned int *handle)
  12276. {
  12277. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  12278. struct drm_i915_gem_object *obj = intel_fb->obj;
  12279. if (obj->userptr.mm) {
  12280. DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
  12281. return -EINVAL;
  12282. }
  12283. return drm_gem_handle_create(file, &obj->base, handle);
  12284. }
  12285. static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
  12286. struct drm_file *file,
  12287. unsigned flags, unsigned color,
  12288. struct drm_clip_rect *clips,
  12289. unsigned num_clips)
  12290. {
  12291. struct drm_device *dev = fb->dev;
  12292. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  12293. struct drm_i915_gem_object *obj = intel_fb->obj;
  12294. mutex_lock(&dev->struct_mutex);
  12295. intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
  12296. mutex_unlock(&dev->struct_mutex);
  12297. return 0;
  12298. }
  12299. static const struct drm_framebuffer_funcs intel_fb_funcs = {
  12300. .destroy = intel_user_framebuffer_destroy,
  12301. .create_handle = intel_user_framebuffer_create_handle,
  12302. .dirty = intel_user_framebuffer_dirty,
  12303. };
  12304. static
  12305. u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
  12306. uint32_t pixel_format)
  12307. {
  12308. u32 gen = INTEL_INFO(dev)->gen;
  12309. if (gen >= 9) {
  12310. int cpp = drm_format_plane_cpp(pixel_format, 0);
  12311. /* "The stride in bytes must not exceed the of the size of 8K
  12312. * pixels and 32K bytes."
  12313. */
  12314. return min(8192 * cpp, 32768);
  12315. } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
  12316. return 32*1024;
  12317. } else if (gen >= 4) {
  12318. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  12319. return 16*1024;
  12320. else
  12321. return 32*1024;
  12322. } else if (gen >= 3) {
  12323. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  12324. return 8*1024;
  12325. else
  12326. return 16*1024;
  12327. } else {
  12328. /* XXX DSPC is limited to 4k tiled */
  12329. return 8*1024;
  12330. }
  12331. }
  12332. static int intel_framebuffer_init(struct drm_device *dev,
  12333. struct intel_framebuffer *intel_fb,
  12334. struct drm_mode_fb_cmd2 *mode_cmd,
  12335. struct drm_i915_gem_object *obj)
  12336. {
  12337. struct drm_i915_private *dev_priv = to_i915(dev);
  12338. unsigned int aligned_height;
  12339. int ret;
  12340. u32 pitch_limit, stride_alignment;
  12341. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  12342. if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
  12343. /* Enforce that fb modifier and tiling mode match, but only for
  12344. * X-tiled. This is needed for FBC. */
  12345. if (!!(obj->tiling_mode == I915_TILING_X) !=
  12346. !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
  12347. DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
  12348. return -EINVAL;
  12349. }
  12350. } else {
  12351. if (obj->tiling_mode == I915_TILING_X)
  12352. mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
  12353. else if (obj->tiling_mode == I915_TILING_Y) {
  12354. DRM_DEBUG("No Y tiling for legacy addfb\n");
  12355. return -EINVAL;
  12356. }
  12357. }
  12358. /* Passed in modifier sanity checking. */
  12359. switch (mode_cmd->modifier[0]) {
  12360. case I915_FORMAT_MOD_Y_TILED:
  12361. case I915_FORMAT_MOD_Yf_TILED:
  12362. if (INTEL_INFO(dev)->gen < 9) {
  12363. DRM_DEBUG("Unsupported tiling 0x%llx!\n",
  12364. mode_cmd->modifier[0]);
  12365. return -EINVAL;
  12366. }
  12367. case DRM_FORMAT_MOD_NONE:
  12368. case I915_FORMAT_MOD_X_TILED:
  12369. break;
  12370. default:
  12371. DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
  12372. mode_cmd->modifier[0]);
  12373. return -EINVAL;
  12374. }
  12375. stride_alignment = intel_fb_stride_alignment(dev_priv,
  12376. mode_cmd->modifier[0],
  12377. mode_cmd->pixel_format);
  12378. if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
  12379. DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
  12380. mode_cmd->pitches[0], stride_alignment);
  12381. return -EINVAL;
  12382. }
  12383. pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
  12384. mode_cmd->pixel_format);
  12385. if (mode_cmd->pitches[0] > pitch_limit) {
  12386. DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
  12387. mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
  12388. "tiled" : "linear",
  12389. mode_cmd->pitches[0], pitch_limit);
  12390. return -EINVAL;
  12391. }
  12392. if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
  12393. mode_cmd->pitches[0] != obj->stride) {
  12394. DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
  12395. mode_cmd->pitches[0], obj->stride);
  12396. return -EINVAL;
  12397. }
  12398. /* Reject formats not supported by any plane early. */
  12399. switch (mode_cmd->pixel_format) {
  12400. case DRM_FORMAT_C8:
  12401. case DRM_FORMAT_RGB565:
  12402. case DRM_FORMAT_XRGB8888:
  12403. case DRM_FORMAT_ARGB8888:
  12404. break;
  12405. case DRM_FORMAT_XRGB1555:
  12406. if (INTEL_INFO(dev)->gen > 3) {
  12407. DRM_DEBUG("unsupported pixel format: %s\n",
  12408. drm_get_format_name(mode_cmd->pixel_format));
  12409. return -EINVAL;
  12410. }
  12411. break;
  12412. case DRM_FORMAT_ABGR8888:
  12413. if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
  12414. INTEL_INFO(dev)->gen < 9) {
  12415. DRM_DEBUG("unsupported pixel format: %s\n",
  12416. drm_get_format_name(mode_cmd->pixel_format));
  12417. return -EINVAL;
  12418. }
  12419. break;
  12420. case DRM_FORMAT_XBGR8888:
  12421. case DRM_FORMAT_XRGB2101010:
  12422. case DRM_FORMAT_XBGR2101010:
  12423. if (INTEL_INFO(dev)->gen < 4) {
  12424. DRM_DEBUG("unsupported pixel format: %s\n",
  12425. drm_get_format_name(mode_cmd->pixel_format));
  12426. return -EINVAL;
  12427. }
  12428. break;
  12429. case DRM_FORMAT_ABGR2101010:
  12430. if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
  12431. DRM_DEBUG("unsupported pixel format: %s\n",
  12432. drm_get_format_name(mode_cmd->pixel_format));
  12433. return -EINVAL;
  12434. }
  12435. break;
  12436. case DRM_FORMAT_YUYV:
  12437. case DRM_FORMAT_UYVY:
  12438. case DRM_FORMAT_YVYU:
  12439. case DRM_FORMAT_VYUY:
  12440. if (INTEL_INFO(dev)->gen < 5) {
  12441. DRM_DEBUG("unsupported pixel format: %s\n",
  12442. drm_get_format_name(mode_cmd->pixel_format));
  12443. return -EINVAL;
  12444. }
  12445. break;
  12446. default:
  12447. DRM_DEBUG("unsupported pixel format: %s\n",
  12448. drm_get_format_name(mode_cmd->pixel_format));
  12449. return -EINVAL;
  12450. }
  12451. /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
  12452. if (mode_cmd->offsets[0] != 0)
  12453. return -EINVAL;
  12454. aligned_height = intel_fb_align_height(dev, mode_cmd->height,
  12455. mode_cmd->pixel_format,
  12456. mode_cmd->modifier[0]);
  12457. /* FIXME drm helper for size checks (especially planar formats)? */
  12458. if (obj->base.size < aligned_height * mode_cmd->pitches[0])
  12459. return -EINVAL;
  12460. drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
  12461. intel_fb->obj = obj;
  12462. intel_fill_fb_info(dev_priv, &intel_fb->base);
  12463. ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
  12464. if (ret) {
  12465. DRM_ERROR("framebuffer init failed %d\n", ret);
  12466. return ret;
  12467. }
  12468. intel_fb->obj->framebuffer_references++;
  12469. return 0;
  12470. }
  12471. static struct drm_framebuffer *
  12472. intel_user_framebuffer_create(struct drm_device *dev,
  12473. struct drm_file *filp,
  12474. const struct drm_mode_fb_cmd2 *user_mode_cmd)
  12475. {
  12476. struct drm_framebuffer *fb;
  12477. struct drm_i915_gem_object *obj;
  12478. struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
  12479. obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
  12480. if (&obj->base == NULL)
  12481. return ERR_PTR(-ENOENT);
  12482. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  12483. if (IS_ERR(fb))
  12484. drm_gem_object_unreference_unlocked(&obj->base);
  12485. return fb;
  12486. }
  12487. #ifndef CONFIG_DRM_FBDEV_EMULATION
  12488. static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
  12489. {
  12490. }
  12491. #endif
  12492. static const struct drm_mode_config_funcs intel_mode_funcs = {
  12493. .fb_create = intel_user_framebuffer_create,
  12494. .output_poll_changed = intel_fbdev_output_poll_changed,
  12495. .atomic_check = intel_atomic_check,
  12496. .atomic_commit = intel_atomic_commit,
  12497. .atomic_state_alloc = intel_atomic_state_alloc,
  12498. .atomic_state_clear = intel_atomic_state_clear,
  12499. };
  12500. /**
  12501. * intel_init_display_hooks - initialize the display modesetting hooks
  12502. * @dev_priv: device private
  12503. */
  12504. void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  12505. {
  12506. if (INTEL_INFO(dev_priv)->gen >= 9) {
  12507. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  12508. dev_priv->display.get_initial_plane_config =
  12509. skylake_get_initial_plane_config;
  12510. dev_priv->display.crtc_compute_clock =
  12511. haswell_crtc_compute_clock;
  12512. dev_priv->display.crtc_enable = haswell_crtc_enable;
  12513. dev_priv->display.crtc_disable = haswell_crtc_disable;
  12514. } else if (HAS_DDI(dev_priv)) {
  12515. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  12516. dev_priv->display.get_initial_plane_config =
  12517. ironlake_get_initial_plane_config;
  12518. dev_priv->display.crtc_compute_clock =
  12519. haswell_crtc_compute_clock;
  12520. dev_priv->display.crtc_enable = haswell_crtc_enable;
  12521. dev_priv->display.crtc_disable = haswell_crtc_disable;
  12522. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12523. dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
  12524. dev_priv->display.get_initial_plane_config =
  12525. ironlake_get_initial_plane_config;
  12526. dev_priv->display.crtc_compute_clock =
  12527. ironlake_crtc_compute_clock;
  12528. dev_priv->display.crtc_enable = ironlake_crtc_enable;
  12529. dev_priv->display.crtc_disable = ironlake_crtc_disable;
  12530. } else if (IS_CHERRYVIEW(dev_priv)) {
  12531. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12532. dev_priv->display.get_initial_plane_config =
  12533. i9xx_get_initial_plane_config;
  12534. dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
  12535. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  12536. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12537. } else if (IS_VALLEYVIEW(dev_priv)) {
  12538. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12539. dev_priv->display.get_initial_plane_config =
  12540. i9xx_get_initial_plane_config;
  12541. dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
  12542. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  12543. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12544. } else if (IS_G4X(dev_priv)) {
  12545. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12546. dev_priv->display.get_initial_plane_config =
  12547. i9xx_get_initial_plane_config;
  12548. dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
  12549. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12550. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12551. } else if (IS_PINEVIEW(dev_priv)) {
  12552. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12553. dev_priv->display.get_initial_plane_config =
  12554. i9xx_get_initial_plane_config;
  12555. dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
  12556. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12557. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12558. } else if (!IS_GEN2(dev_priv)) {
  12559. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12560. dev_priv->display.get_initial_plane_config =
  12561. i9xx_get_initial_plane_config;
  12562. dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
  12563. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12564. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12565. } else {
  12566. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12567. dev_priv->display.get_initial_plane_config =
  12568. i9xx_get_initial_plane_config;
  12569. dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
  12570. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12571. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12572. }
  12573. /* Returns the core display clock speed */
  12574. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  12575. dev_priv->display.get_display_clock_speed =
  12576. skylake_get_display_clock_speed;
  12577. else if (IS_BROXTON(dev_priv))
  12578. dev_priv->display.get_display_clock_speed =
  12579. broxton_get_display_clock_speed;
  12580. else if (IS_BROADWELL(dev_priv))
  12581. dev_priv->display.get_display_clock_speed =
  12582. broadwell_get_display_clock_speed;
  12583. else if (IS_HASWELL(dev_priv))
  12584. dev_priv->display.get_display_clock_speed =
  12585. haswell_get_display_clock_speed;
  12586. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12587. dev_priv->display.get_display_clock_speed =
  12588. valleyview_get_display_clock_speed;
  12589. else if (IS_GEN5(dev_priv))
  12590. dev_priv->display.get_display_clock_speed =
  12591. ilk_get_display_clock_speed;
  12592. else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
  12593. IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
  12594. dev_priv->display.get_display_clock_speed =
  12595. i945_get_display_clock_speed;
  12596. else if (IS_GM45(dev_priv))
  12597. dev_priv->display.get_display_clock_speed =
  12598. gm45_get_display_clock_speed;
  12599. else if (IS_CRESTLINE(dev_priv))
  12600. dev_priv->display.get_display_clock_speed =
  12601. i965gm_get_display_clock_speed;
  12602. else if (IS_PINEVIEW(dev_priv))
  12603. dev_priv->display.get_display_clock_speed =
  12604. pnv_get_display_clock_speed;
  12605. else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
  12606. dev_priv->display.get_display_clock_speed =
  12607. g33_get_display_clock_speed;
  12608. else if (IS_I915G(dev_priv))
  12609. dev_priv->display.get_display_clock_speed =
  12610. i915_get_display_clock_speed;
  12611. else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
  12612. dev_priv->display.get_display_clock_speed =
  12613. i9xx_misc_get_display_clock_speed;
  12614. else if (IS_I915GM(dev_priv))
  12615. dev_priv->display.get_display_clock_speed =
  12616. i915gm_get_display_clock_speed;
  12617. else if (IS_I865G(dev_priv))
  12618. dev_priv->display.get_display_clock_speed =
  12619. i865_get_display_clock_speed;
  12620. else if (IS_I85X(dev_priv))
  12621. dev_priv->display.get_display_clock_speed =
  12622. i85x_get_display_clock_speed;
  12623. else { /* 830 */
  12624. WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
  12625. dev_priv->display.get_display_clock_speed =
  12626. i830_get_display_clock_speed;
  12627. }
  12628. if (IS_GEN5(dev_priv)) {
  12629. dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
  12630. } else if (IS_GEN6(dev_priv)) {
  12631. dev_priv->display.fdi_link_train = gen6_fdi_link_train;
  12632. } else if (IS_IVYBRIDGE(dev_priv)) {
  12633. /* FIXME: detect B0+ stepping and use auto training */
  12634. dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
  12635. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  12636. dev_priv->display.fdi_link_train = hsw_fdi_link_train;
  12637. if (IS_BROADWELL(dev_priv)) {
  12638. dev_priv->display.modeset_commit_cdclk =
  12639. broadwell_modeset_commit_cdclk;
  12640. dev_priv->display.modeset_calc_cdclk =
  12641. broadwell_modeset_calc_cdclk;
  12642. }
  12643. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  12644. dev_priv->display.modeset_commit_cdclk =
  12645. valleyview_modeset_commit_cdclk;
  12646. dev_priv->display.modeset_calc_cdclk =
  12647. valleyview_modeset_calc_cdclk;
  12648. } else if (IS_BROXTON(dev_priv)) {
  12649. dev_priv->display.modeset_commit_cdclk =
  12650. broxton_modeset_commit_cdclk;
  12651. dev_priv->display.modeset_calc_cdclk =
  12652. broxton_modeset_calc_cdclk;
  12653. }
  12654. switch (INTEL_INFO(dev_priv)->gen) {
  12655. case 2:
  12656. dev_priv->display.queue_flip = intel_gen2_queue_flip;
  12657. break;
  12658. case 3:
  12659. dev_priv->display.queue_flip = intel_gen3_queue_flip;
  12660. break;
  12661. case 4:
  12662. case 5:
  12663. dev_priv->display.queue_flip = intel_gen4_queue_flip;
  12664. break;
  12665. case 6:
  12666. dev_priv->display.queue_flip = intel_gen6_queue_flip;
  12667. break;
  12668. case 7:
  12669. case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
  12670. dev_priv->display.queue_flip = intel_gen7_queue_flip;
  12671. break;
  12672. case 9:
  12673. /* Drop through - unsupported since execlist only. */
  12674. default:
  12675. /* Default just returns -ENODEV to indicate unsupported */
  12676. dev_priv->display.queue_flip = intel_default_queue_flip;
  12677. }
  12678. }
  12679. /*
  12680. * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
  12681. * resume, or other times. This quirk makes sure that's the case for
  12682. * affected systems.
  12683. */
  12684. static void quirk_pipea_force(struct drm_device *dev)
  12685. {
  12686. struct drm_i915_private *dev_priv = dev->dev_private;
  12687. dev_priv->quirks |= QUIRK_PIPEA_FORCE;
  12688. DRM_INFO("applying pipe a force quirk\n");
  12689. }
  12690. static void quirk_pipeb_force(struct drm_device *dev)
  12691. {
  12692. struct drm_i915_private *dev_priv = dev->dev_private;
  12693. dev_priv->quirks |= QUIRK_PIPEB_FORCE;
  12694. DRM_INFO("applying pipe b force quirk\n");
  12695. }
  12696. /*
  12697. * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
  12698. */
  12699. static void quirk_ssc_force_disable(struct drm_device *dev)
  12700. {
  12701. struct drm_i915_private *dev_priv = dev->dev_private;
  12702. dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
  12703. DRM_INFO("applying lvds SSC disable quirk\n");
  12704. }
  12705. /*
  12706. * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
  12707. * brightness value
  12708. */
  12709. static void quirk_invert_brightness(struct drm_device *dev)
  12710. {
  12711. struct drm_i915_private *dev_priv = dev->dev_private;
  12712. dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
  12713. DRM_INFO("applying inverted panel brightness quirk\n");
  12714. }
  12715. /* Some VBT's incorrectly indicate no backlight is present */
  12716. static void quirk_backlight_present(struct drm_device *dev)
  12717. {
  12718. struct drm_i915_private *dev_priv = dev->dev_private;
  12719. dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
  12720. DRM_INFO("applying backlight present quirk\n");
  12721. }
  12722. struct intel_quirk {
  12723. int device;
  12724. int subsystem_vendor;
  12725. int subsystem_device;
  12726. void (*hook)(struct drm_device *dev);
  12727. };
  12728. /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
  12729. struct intel_dmi_quirk {
  12730. void (*hook)(struct drm_device *dev);
  12731. const struct dmi_system_id (*dmi_id_list)[];
  12732. };
  12733. static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  12734. {
  12735. DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
  12736. return 1;
  12737. }
  12738. static const struct intel_dmi_quirk intel_dmi_quirks[] = {
  12739. {
  12740. .dmi_id_list = &(const struct dmi_system_id[]) {
  12741. {
  12742. .callback = intel_dmi_reverse_brightness,
  12743. .ident = "NCR Corporation",
  12744. .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
  12745. DMI_MATCH(DMI_PRODUCT_NAME, ""),
  12746. },
  12747. },
  12748. { } /* terminating entry */
  12749. },
  12750. .hook = quirk_invert_brightness,
  12751. },
  12752. };
  12753. static struct intel_quirk intel_quirks[] = {
  12754. /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
  12755. { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
  12756. /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
  12757. { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  12758. /* 830 needs to leave pipe A & dpll A up */
  12759. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  12760. /* 830 needs to leave pipe B & dpll B up */
  12761. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
  12762. /* Lenovo U160 cannot use SSC on LVDS */
  12763. { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
  12764. /* Sony Vaio Y cannot use SSC on LVDS */
  12765. { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
  12766. /* Acer Aspire 5734Z must invert backlight brightness */
  12767. { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
  12768. /* Acer/eMachines G725 */
  12769. { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
  12770. /* Acer/eMachines e725 */
  12771. { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
  12772. /* Acer/Packard Bell NCL20 */
  12773. { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
  12774. /* Acer Aspire 4736Z */
  12775. { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
  12776. /* Acer Aspire 5336 */
  12777. { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
  12778. /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
  12779. { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
  12780. /* Acer C720 Chromebook (Core i3 4005U) */
  12781. { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
  12782. /* Apple Macbook 2,1 (Core 2 T7400) */
  12783. { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
  12784. /* Apple Macbook 4,1 */
  12785. { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
  12786. /* Toshiba CB35 Chromebook (Celeron 2955U) */
  12787. { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
  12788. /* HP Chromebook 14 (Celeron 2955U) */
  12789. { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
  12790. /* Dell Chromebook 11 */
  12791. { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
  12792. /* Dell Chromebook 11 (2015 version) */
  12793. { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
  12794. };
  12795. static void intel_init_quirks(struct drm_device *dev)
  12796. {
  12797. struct pci_dev *d = dev->pdev;
  12798. int i;
  12799. for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
  12800. struct intel_quirk *q = &intel_quirks[i];
  12801. if (d->device == q->device &&
  12802. (d->subsystem_vendor == q->subsystem_vendor ||
  12803. q->subsystem_vendor == PCI_ANY_ID) &&
  12804. (d->subsystem_device == q->subsystem_device ||
  12805. q->subsystem_device == PCI_ANY_ID))
  12806. q->hook(dev);
  12807. }
  12808. for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
  12809. if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
  12810. intel_dmi_quirks[i].hook(dev);
  12811. }
  12812. }
  12813. /* Disable the VGA plane that we never use */
  12814. static void i915_disable_vga(struct drm_device *dev)
  12815. {
  12816. struct drm_i915_private *dev_priv = dev->dev_private;
  12817. u8 sr1;
  12818. i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
  12819. /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
  12820. vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
  12821. outb(SR01, VGA_SR_INDEX);
  12822. sr1 = inb(VGA_SR_DATA);
  12823. outb(sr1 | 1<<5, VGA_SR_DATA);
  12824. vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  12825. udelay(300);
  12826. I915_WRITE(vga_reg, VGA_DISP_DISABLE);
  12827. POSTING_READ(vga_reg);
  12828. }
  12829. void intel_modeset_init_hw(struct drm_device *dev)
  12830. {
  12831. struct drm_i915_private *dev_priv = dev->dev_private;
  12832. intel_update_cdclk(dev);
  12833. dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
  12834. intel_init_clock_gating(dev);
  12835. intel_enable_gt_powersave(dev);
  12836. }
  12837. /*
  12838. * Calculate what we think the watermarks should be for the state we've read
  12839. * out of the hardware and then immediately program those watermarks so that
  12840. * we ensure the hardware settings match our internal state.
  12841. *
  12842. * We can calculate what we think WM's should be by creating a duplicate of the
  12843. * current state (which was constructed during hardware readout) and running it
  12844. * through the atomic check code to calculate new watermark values in the
  12845. * state object.
  12846. */
  12847. static void sanitize_watermarks(struct drm_device *dev)
  12848. {
  12849. struct drm_i915_private *dev_priv = to_i915(dev);
  12850. struct drm_atomic_state *state;
  12851. struct drm_crtc *crtc;
  12852. struct drm_crtc_state *cstate;
  12853. struct drm_modeset_acquire_ctx ctx;
  12854. int ret;
  12855. int i;
  12856. /* Only supported on platforms that use atomic watermark design */
  12857. if (!dev_priv->display.optimize_watermarks)
  12858. return;
  12859. /*
  12860. * We need to hold connection_mutex before calling duplicate_state so
  12861. * that the connector loop is protected.
  12862. */
  12863. drm_modeset_acquire_init(&ctx, 0);
  12864. retry:
  12865. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  12866. if (ret == -EDEADLK) {
  12867. drm_modeset_backoff(&ctx);
  12868. goto retry;
  12869. } else if (WARN_ON(ret)) {
  12870. goto fail;
  12871. }
  12872. state = drm_atomic_helper_duplicate_state(dev, &ctx);
  12873. if (WARN_ON(IS_ERR(state)))
  12874. goto fail;
  12875. /*
  12876. * Hardware readout is the only time we don't want to calculate
  12877. * intermediate watermarks (since we don't trust the current
  12878. * watermarks).
  12879. */
  12880. to_intel_atomic_state(state)->skip_intermediate_wm = true;
  12881. ret = intel_atomic_check(dev, state);
  12882. if (ret) {
  12883. /*
  12884. * If we fail here, it means that the hardware appears to be
  12885. * programmed in a way that shouldn't be possible, given our
  12886. * understanding of watermark requirements. This might mean a
  12887. * mistake in the hardware readout code or a mistake in the
  12888. * watermark calculations for a given platform. Raise a WARN
  12889. * so that this is noticeable.
  12890. *
  12891. * If this actually happens, we'll have to just leave the
  12892. * BIOS-programmed watermarks untouched and hope for the best.
  12893. */
  12894. WARN(true, "Could not determine valid watermarks for inherited state\n");
  12895. goto fail;
  12896. }
  12897. /* Write calculated watermark values back */
  12898. to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
  12899. for_each_crtc_in_state(state, crtc, cstate, i) {
  12900. struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
  12901. cs->wm.need_postvbl_update = true;
  12902. dev_priv->display.optimize_watermarks(cs);
  12903. }
  12904. drm_atomic_state_free(state);
  12905. fail:
  12906. drm_modeset_drop_locks(&ctx);
  12907. drm_modeset_acquire_fini(&ctx);
  12908. }
  12909. void intel_modeset_init(struct drm_device *dev)
  12910. {
  12911. struct drm_i915_private *dev_priv = to_i915(dev);
  12912. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  12913. int sprite, ret;
  12914. enum pipe pipe;
  12915. struct intel_crtc *crtc;
  12916. drm_mode_config_init(dev);
  12917. dev->mode_config.min_width = 0;
  12918. dev->mode_config.min_height = 0;
  12919. dev->mode_config.preferred_depth = 24;
  12920. dev->mode_config.prefer_shadow = 1;
  12921. dev->mode_config.allow_fb_modifiers = true;
  12922. dev->mode_config.funcs = &intel_mode_funcs;
  12923. intel_init_quirks(dev);
  12924. intel_init_pm(dev);
  12925. if (INTEL_INFO(dev)->num_pipes == 0)
  12926. return;
  12927. /*
  12928. * There may be no VBT; and if the BIOS enabled SSC we can
  12929. * just keep using it to avoid unnecessary flicker. Whereas if the
  12930. * BIOS isn't using it, don't assume it will work even if the VBT
  12931. * indicates as much.
  12932. */
  12933. if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
  12934. bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
  12935. DREF_SSC1_ENABLE);
  12936. if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
  12937. DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
  12938. bios_lvds_use_ssc ? "en" : "dis",
  12939. dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
  12940. dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
  12941. }
  12942. }
  12943. if (IS_GEN2(dev)) {
  12944. dev->mode_config.max_width = 2048;
  12945. dev->mode_config.max_height = 2048;
  12946. } else if (IS_GEN3(dev)) {
  12947. dev->mode_config.max_width = 4096;
  12948. dev->mode_config.max_height = 4096;
  12949. } else {
  12950. dev->mode_config.max_width = 8192;
  12951. dev->mode_config.max_height = 8192;
  12952. }
  12953. if (IS_845G(dev) || IS_I865G(dev)) {
  12954. dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
  12955. dev->mode_config.cursor_height = 1023;
  12956. } else if (IS_GEN2(dev)) {
  12957. dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
  12958. dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
  12959. } else {
  12960. dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
  12961. dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
  12962. }
  12963. dev->mode_config.fb_base = ggtt->mappable_base;
  12964. DRM_DEBUG_KMS("%d display pipe%s available.\n",
  12965. INTEL_INFO(dev)->num_pipes,
  12966. INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
  12967. for_each_pipe(dev_priv, pipe) {
  12968. intel_crtc_init(dev, pipe);
  12969. for_each_sprite(dev_priv, pipe, sprite) {
  12970. ret = intel_plane_init(dev, pipe, sprite);
  12971. if (ret)
  12972. DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
  12973. pipe_name(pipe), sprite_name(pipe, sprite), ret);
  12974. }
  12975. }
  12976. intel_update_czclk(dev_priv);
  12977. intel_update_rawclk(dev_priv);
  12978. intel_update_cdclk(dev);
  12979. intel_shared_dpll_init(dev);
  12980. /* Just disable it once at startup */
  12981. i915_disable_vga(dev);
  12982. intel_setup_outputs(dev);
  12983. drm_modeset_lock_all(dev);
  12984. intel_modeset_setup_hw_state(dev);
  12985. drm_modeset_unlock_all(dev);
  12986. for_each_intel_crtc(dev, crtc) {
  12987. struct intel_initial_plane_config plane_config = {};
  12988. if (!crtc->active)
  12989. continue;
  12990. /*
  12991. * Note that reserving the BIOS fb up front prevents us
  12992. * from stuffing other stolen allocations like the ring
  12993. * on top. This prevents some ugliness at boot time, and
  12994. * can even allow for smooth boot transitions if the BIOS
  12995. * fb is large enough for the active pipe configuration.
  12996. */
  12997. dev_priv->display.get_initial_plane_config(crtc,
  12998. &plane_config);
  12999. /*
  13000. * If the fb is shared between multiple heads, we'll
  13001. * just get the first one.
  13002. */
  13003. intel_find_initial_plane_obj(crtc, &plane_config);
  13004. }
  13005. /*
  13006. * Make sure hardware watermarks really match the state we read out.
  13007. * Note that we need to do this after reconstructing the BIOS fb's
  13008. * since the watermark calculation done here will use pstate->fb.
  13009. */
  13010. sanitize_watermarks(dev);
  13011. }
  13012. static void intel_enable_pipe_a(struct drm_device *dev)
  13013. {
  13014. struct intel_connector *connector;
  13015. struct drm_connector *crt = NULL;
  13016. struct intel_load_detect_pipe load_detect_temp;
  13017. struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
  13018. /* We can't just switch on the pipe A, we need to set things up with a
  13019. * proper mode and output configuration. As a gross hack, enable pipe A
  13020. * by enabling the load detect pipe once. */
  13021. for_each_intel_connector(dev, connector) {
  13022. if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
  13023. crt = &connector->base;
  13024. break;
  13025. }
  13026. }
  13027. if (!crt)
  13028. return;
  13029. if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
  13030. intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
  13031. }
  13032. static bool
  13033. intel_check_plane_mapping(struct intel_crtc *crtc)
  13034. {
  13035. struct drm_device *dev = crtc->base.dev;
  13036. struct drm_i915_private *dev_priv = dev->dev_private;
  13037. u32 val;
  13038. if (INTEL_INFO(dev)->num_pipes == 1)
  13039. return true;
  13040. val = I915_READ(DSPCNTR(!crtc->plane));
  13041. if ((val & DISPLAY_PLANE_ENABLE) &&
  13042. (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
  13043. return false;
  13044. return true;
  13045. }
  13046. static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
  13047. {
  13048. struct drm_device *dev = crtc->base.dev;
  13049. struct intel_encoder *encoder;
  13050. for_each_encoder_on_crtc(dev, &crtc->base, encoder)
  13051. return true;
  13052. return false;
  13053. }
  13054. static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
  13055. {
  13056. struct drm_device *dev = encoder->base.dev;
  13057. struct intel_connector *connector;
  13058. for_each_connector_on_encoder(dev, &encoder->base, connector)
  13059. return true;
  13060. return false;
  13061. }
  13062. static void intel_sanitize_crtc(struct intel_crtc *crtc)
  13063. {
  13064. struct drm_device *dev = crtc->base.dev;
  13065. struct drm_i915_private *dev_priv = dev->dev_private;
  13066. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  13067. /* Clear any frame start delays used for debugging left by the BIOS */
  13068. if (!transcoder_is_dsi(cpu_transcoder)) {
  13069. i915_reg_t reg = PIPECONF(cpu_transcoder);
  13070. I915_WRITE(reg,
  13071. I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
  13072. }
  13073. /* restore vblank interrupts to correct state */
  13074. drm_crtc_vblank_reset(&crtc->base);
  13075. if (crtc->active) {
  13076. struct intel_plane *plane;
  13077. drm_crtc_vblank_on(&crtc->base);
  13078. /* Disable everything but the primary plane */
  13079. for_each_intel_plane_on_crtc(dev, crtc, plane) {
  13080. if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
  13081. continue;
  13082. plane->disable_plane(&plane->base, &crtc->base);
  13083. }
  13084. }
  13085. /* We need to sanitize the plane -> pipe mapping first because this will
  13086. * disable the crtc (and hence change the state) if it is wrong. Note
  13087. * that gen4+ has a fixed plane -> pipe mapping. */
  13088. if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
  13089. bool plane;
  13090. DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
  13091. crtc->base.base.id);
  13092. /* Pipe has the wrong plane attached and the plane is active.
  13093. * Temporarily change the plane mapping and disable everything
  13094. * ... */
  13095. plane = crtc->plane;
  13096. to_intel_plane_state(crtc->base.primary->state)->visible = true;
  13097. crtc->plane = !plane;
  13098. intel_crtc_disable_noatomic(&crtc->base);
  13099. crtc->plane = plane;
  13100. }
  13101. if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
  13102. crtc->pipe == PIPE_A && !crtc->active) {
  13103. /* BIOS forgot to enable pipe A, this mostly happens after
  13104. * resume. Force-enable the pipe to fix this, the update_dpms
  13105. * call below we restore the pipe to the right state, but leave
  13106. * the required bits on. */
  13107. intel_enable_pipe_a(dev);
  13108. }
  13109. /* Adjust the state of the output pipe according to whether we
  13110. * have active connectors/encoders. */
  13111. if (crtc->active && !intel_crtc_has_encoders(crtc))
  13112. intel_crtc_disable_noatomic(&crtc->base);
  13113. if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
  13114. /*
  13115. * We start out with underrun reporting disabled to avoid races.
  13116. * For correct bookkeeping mark this on active crtcs.
  13117. *
  13118. * Also on gmch platforms we dont have any hardware bits to
  13119. * disable the underrun reporting. Which means we need to start
  13120. * out with underrun reporting disabled also on inactive pipes,
  13121. * since otherwise we'll complain about the garbage we read when
  13122. * e.g. coming up after runtime pm.
  13123. *
  13124. * No protection against concurrent access is required - at
  13125. * worst a fifo underrun happens which also sets this to false.
  13126. */
  13127. crtc->cpu_fifo_underrun_disabled = true;
  13128. crtc->pch_fifo_underrun_disabled = true;
  13129. }
  13130. }
  13131. static void intel_sanitize_encoder(struct intel_encoder *encoder)
  13132. {
  13133. struct intel_connector *connector;
  13134. struct drm_device *dev = encoder->base.dev;
  13135. /* We need to check both for a crtc link (meaning that the
  13136. * encoder is active and trying to read from a pipe) and the
  13137. * pipe itself being active. */
  13138. bool has_active_crtc = encoder->base.crtc &&
  13139. to_intel_crtc(encoder->base.crtc)->active;
  13140. if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
  13141. DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
  13142. encoder->base.base.id,
  13143. encoder->base.name);
  13144. /* Connector is active, but has no active pipe. This is
  13145. * fallout from our resume register restoring. Disable
  13146. * the encoder manually again. */
  13147. if (encoder->base.crtc) {
  13148. DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
  13149. encoder->base.base.id,
  13150. encoder->base.name);
  13151. encoder->disable(encoder);
  13152. if (encoder->post_disable)
  13153. encoder->post_disable(encoder);
  13154. }
  13155. encoder->base.crtc = NULL;
  13156. /* Inconsistent output/port/pipe state happens presumably due to
  13157. * a bug in one of the get_hw_state functions. Or someplace else
  13158. * in our code, like the register restore mess on resume. Clamp
  13159. * things to off as a safer default. */
  13160. for_each_intel_connector(dev, connector) {
  13161. if (connector->encoder != encoder)
  13162. continue;
  13163. connector->base.dpms = DRM_MODE_DPMS_OFF;
  13164. connector->base.encoder = NULL;
  13165. }
  13166. }
  13167. /* Enabled encoders without active connectors will be fixed in
  13168. * the crtc fixup. */
  13169. }
  13170. void i915_redisable_vga_power_on(struct drm_device *dev)
  13171. {
  13172. struct drm_i915_private *dev_priv = dev->dev_private;
  13173. i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
  13174. if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
  13175. DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
  13176. i915_disable_vga(dev);
  13177. }
  13178. }
  13179. void i915_redisable_vga(struct drm_device *dev)
  13180. {
  13181. struct drm_i915_private *dev_priv = dev->dev_private;
  13182. /* This function can be called both from intel_modeset_setup_hw_state or
  13183. * at a very early point in our resume sequence, where the power well
  13184. * structures are not yet restored. Since this function is at a very
  13185. * paranoid "someone might have enabled VGA while we were not looking"
  13186. * level, just check if the power well is enabled instead of trying to
  13187. * follow the "don't touch the power well if we don't need it" policy
  13188. * the rest of the driver uses. */
  13189. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
  13190. return;
  13191. i915_redisable_vga_power_on(dev);
  13192. intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
  13193. }
  13194. static bool primary_get_hw_state(struct intel_plane *plane)
  13195. {
  13196. struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
  13197. return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
  13198. }
  13199. /* FIXME read out full plane state for all planes */
  13200. static void readout_plane_state(struct intel_crtc *crtc)
  13201. {
  13202. struct drm_plane *primary = crtc->base.primary;
  13203. struct intel_plane_state *plane_state =
  13204. to_intel_plane_state(primary->state);
  13205. plane_state->visible = crtc->active &&
  13206. primary_get_hw_state(to_intel_plane(primary));
  13207. if (plane_state->visible)
  13208. crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
  13209. }
  13210. static void intel_modeset_readout_hw_state(struct drm_device *dev)
  13211. {
  13212. struct drm_i915_private *dev_priv = dev->dev_private;
  13213. enum pipe pipe;
  13214. struct intel_crtc *crtc;
  13215. struct intel_encoder *encoder;
  13216. struct intel_connector *connector;
  13217. int i;
  13218. dev_priv->active_crtcs = 0;
  13219. for_each_intel_crtc(dev, crtc) {
  13220. struct intel_crtc_state *crtc_state = crtc->config;
  13221. int pixclk = 0;
  13222. __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
  13223. memset(crtc_state, 0, sizeof(*crtc_state));
  13224. crtc_state->base.crtc = &crtc->base;
  13225. crtc_state->base.active = crtc_state->base.enable =
  13226. dev_priv->display.get_pipe_config(crtc, crtc_state);
  13227. crtc->base.enabled = crtc_state->base.enable;
  13228. crtc->active = crtc_state->base.active;
  13229. if (crtc_state->base.active) {
  13230. dev_priv->active_crtcs |= 1 << crtc->pipe;
  13231. if (IS_BROADWELL(dev_priv)) {
  13232. pixclk = ilk_pipe_pixel_rate(crtc_state);
  13233. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  13234. if (crtc_state->ips_enabled)
  13235. pixclk = DIV_ROUND_UP(pixclk * 100, 95);
  13236. } else if (IS_VALLEYVIEW(dev_priv) ||
  13237. IS_CHERRYVIEW(dev_priv) ||
  13238. IS_BROXTON(dev_priv))
  13239. pixclk = crtc_state->base.adjusted_mode.crtc_clock;
  13240. else
  13241. WARN_ON(dev_priv->display.modeset_calc_cdclk);
  13242. }
  13243. dev_priv->min_pixclk[crtc->pipe] = pixclk;
  13244. readout_plane_state(crtc);
  13245. DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
  13246. crtc->base.base.id,
  13247. crtc->active ? "enabled" : "disabled");
  13248. }
  13249. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  13250. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  13251. pll->on = pll->funcs.get_hw_state(dev_priv, pll,
  13252. &pll->config.hw_state);
  13253. pll->config.crtc_mask = 0;
  13254. for_each_intel_crtc(dev, crtc) {
  13255. if (crtc->active && crtc->config->shared_dpll == pll)
  13256. pll->config.crtc_mask |= 1 << crtc->pipe;
  13257. }
  13258. pll->active_mask = pll->config.crtc_mask;
  13259. DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
  13260. pll->name, pll->config.crtc_mask, pll->on);
  13261. }
  13262. for_each_intel_encoder(dev, encoder) {
  13263. pipe = 0;
  13264. if (encoder->get_hw_state(encoder, &pipe)) {
  13265. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  13266. encoder->base.crtc = &crtc->base;
  13267. encoder->get_config(encoder, crtc->config);
  13268. } else {
  13269. encoder->base.crtc = NULL;
  13270. }
  13271. DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
  13272. encoder->base.base.id,
  13273. encoder->base.name,
  13274. encoder->base.crtc ? "enabled" : "disabled",
  13275. pipe_name(pipe));
  13276. }
  13277. for_each_intel_connector(dev, connector) {
  13278. if (connector->get_hw_state(connector)) {
  13279. connector->base.dpms = DRM_MODE_DPMS_ON;
  13280. encoder = connector->encoder;
  13281. connector->base.encoder = &encoder->base;
  13282. if (encoder->base.crtc &&
  13283. encoder->base.crtc->state->active) {
  13284. /*
  13285. * This has to be done during hardware readout
  13286. * because anything calling .crtc_disable may
  13287. * rely on the connector_mask being accurate.
  13288. */
  13289. encoder->base.crtc->state->connector_mask |=
  13290. 1 << drm_connector_index(&connector->base);
  13291. encoder->base.crtc->state->encoder_mask |=
  13292. 1 << drm_encoder_index(&encoder->base);
  13293. }
  13294. } else {
  13295. connector->base.dpms = DRM_MODE_DPMS_OFF;
  13296. connector->base.encoder = NULL;
  13297. }
  13298. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
  13299. connector->base.base.id,
  13300. connector->base.name,
  13301. connector->base.encoder ? "enabled" : "disabled");
  13302. }
  13303. for_each_intel_crtc(dev, crtc) {
  13304. crtc->base.hwmode = crtc->config->base.adjusted_mode;
  13305. memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
  13306. if (crtc->base.state->active) {
  13307. intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
  13308. intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
  13309. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
  13310. /*
  13311. * The initial mode needs to be set in order to keep
  13312. * the atomic core happy. It wants a valid mode if the
  13313. * crtc's enabled, so we do the above call.
  13314. *
  13315. * At this point some state updated by the connectors
  13316. * in their ->detect() callback has not run yet, so
  13317. * no recalculation can be done yet.
  13318. *
  13319. * Even if we could do a recalculation and modeset
  13320. * right now it would cause a double modeset if
  13321. * fbdev or userspace chooses a different initial mode.
  13322. *
  13323. * If that happens, someone indicated they wanted a
  13324. * mode change, which means it's safe to do a full
  13325. * recalculation.
  13326. */
  13327. crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
  13328. drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
  13329. update_scanline_offset(crtc);
  13330. }
  13331. intel_pipe_config_sanity_check(dev_priv, crtc->config);
  13332. }
  13333. }
  13334. /* Scan out the current hw modeset state,
  13335. * and sanitizes it to the current state
  13336. */
  13337. static void
  13338. intel_modeset_setup_hw_state(struct drm_device *dev)
  13339. {
  13340. struct drm_i915_private *dev_priv = dev->dev_private;
  13341. enum pipe pipe;
  13342. struct intel_crtc *crtc;
  13343. struct intel_encoder *encoder;
  13344. int i;
  13345. intel_modeset_readout_hw_state(dev);
  13346. /* HW state is read out, now we need to sanitize this mess. */
  13347. for_each_intel_encoder(dev, encoder) {
  13348. intel_sanitize_encoder(encoder);
  13349. }
  13350. for_each_pipe(dev_priv, pipe) {
  13351. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  13352. intel_sanitize_crtc(crtc);
  13353. intel_dump_pipe_config(crtc, crtc->config,
  13354. "[setup_hw_state]");
  13355. }
  13356. intel_modeset_update_connector_atomic_state(dev);
  13357. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  13358. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  13359. if (!pll->on || pll->active_mask)
  13360. continue;
  13361. DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
  13362. pll->funcs.disable(dev_priv, pll);
  13363. pll->on = false;
  13364. }
  13365. if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
  13366. vlv_wm_get_hw_state(dev);
  13367. else if (IS_GEN9(dev))
  13368. skl_wm_get_hw_state(dev);
  13369. else if (HAS_PCH_SPLIT(dev))
  13370. ilk_wm_get_hw_state(dev);
  13371. for_each_intel_crtc(dev, crtc) {
  13372. unsigned long put_domains;
  13373. put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
  13374. if (WARN_ON(put_domains))
  13375. modeset_put_power_domains(dev_priv, put_domains);
  13376. }
  13377. intel_display_set_init_power(dev_priv, false);
  13378. intel_fbc_init_pipe_state(dev_priv);
  13379. }
  13380. void intel_display_resume(struct drm_device *dev)
  13381. {
  13382. struct drm_i915_private *dev_priv = to_i915(dev);
  13383. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  13384. struct drm_modeset_acquire_ctx ctx;
  13385. int ret;
  13386. bool setup = false;
  13387. dev_priv->modeset_restore_state = NULL;
  13388. /*
  13389. * This is a cludge because with real atomic modeset mode_config.mutex
  13390. * won't be taken. Unfortunately some probed state like
  13391. * audio_codec_enable is still protected by mode_config.mutex, so lock
  13392. * it here for now.
  13393. */
  13394. mutex_lock(&dev->mode_config.mutex);
  13395. drm_modeset_acquire_init(&ctx, 0);
  13396. retry:
  13397. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  13398. if (ret == 0 && !setup) {
  13399. setup = true;
  13400. intel_modeset_setup_hw_state(dev);
  13401. i915_redisable_vga(dev);
  13402. }
  13403. if (ret == 0 && state) {
  13404. struct drm_crtc_state *crtc_state;
  13405. struct drm_crtc *crtc;
  13406. int i;
  13407. state->acquire_ctx = &ctx;
  13408. /* ignore any reset values/BIOS leftovers in the WM registers */
  13409. to_intel_atomic_state(state)->skip_intermediate_wm = true;
  13410. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  13411. /*
  13412. * Force recalculation even if we restore
  13413. * current state. With fast modeset this may not result
  13414. * in a modeset when the state is compatible.
  13415. */
  13416. crtc_state->mode_changed = true;
  13417. }
  13418. ret = drm_atomic_commit(state);
  13419. }
  13420. if (ret == -EDEADLK) {
  13421. drm_modeset_backoff(&ctx);
  13422. goto retry;
  13423. }
  13424. drm_modeset_drop_locks(&ctx);
  13425. drm_modeset_acquire_fini(&ctx);
  13426. mutex_unlock(&dev->mode_config.mutex);
  13427. if (ret) {
  13428. DRM_ERROR("Restoring old state failed with %i\n", ret);
  13429. drm_atomic_state_free(state);
  13430. }
  13431. }
  13432. void intel_modeset_gem_init(struct drm_device *dev)
  13433. {
  13434. struct drm_crtc *c;
  13435. struct drm_i915_gem_object *obj;
  13436. int ret;
  13437. intel_init_gt_powersave(dev);
  13438. intel_modeset_init_hw(dev);
  13439. intel_setup_overlay(dev);
  13440. /*
  13441. * Make sure any fbs we allocated at startup are properly
  13442. * pinned & fenced. When we do the allocation it's too early
  13443. * for this.
  13444. */
  13445. for_each_crtc(dev, c) {
  13446. obj = intel_fb_obj(c->primary->fb);
  13447. if (obj == NULL)
  13448. continue;
  13449. mutex_lock(&dev->struct_mutex);
  13450. ret = intel_pin_and_fence_fb_obj(c->primary->fb,
  13451. c->primary->state->rotation);
  13452. mutex_unlock(&dev->struct_mutex);
  13453. if (ret) {
  13454. DRM_ERROR("failed to pin boot fb on pipe %d\n",
  13455. to_intel_crtc(c)->pipe);
  13456. drm_framebuffer_unreference(c->primary->fb);
  13457. c->primary->fb = NULL;
  13458. c->primary->crtc = c->primary->state->crtc = NULL;
  13459. update_state_fb(c->primary);
  13460. c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
  13461. }
  13462. }
  13463. intel_backlight_register(dev);
  13464. }
  13465. void intel_connector_unregister(struct intel_connector *intel_connector)
  13466. {
  13467. struct drm_connector *connector = &intel_connector->base;
  13468. intel_panel_destroy_backlight(connector);
  13469. drm_connector_unregister(connector);
  13470. }
  13471. void intel_modeset_cleanup(struct drm_device *dev)
  13472. {
  13473. struct drm_i915_private *dev_priv = dev->dev_private;
  13474. struct intel_connector *connector;
  13475. intel_disable_gt_powersave(dev);
  13476. intel_backlight_unregister(dev);
  13477. /*
  13478. * Interrupts and polling as the first thing to avoid creating havoc.
  13479. * Too much stuff here (turning of connectors, ...) would
  13480. * experience fancy races otherwise.
  13481. */
  13482. intel_irq_uninstall(dev_priv);
  13483. /*
  13484. * Due to the hpd irq storm handling the hotplug work can re-arm the
  13485. * poll handlers. Hence disable polling after hpd handling is shut down.
  13486. */
  13487. drm_kms_helper_poll_fini(dev);
  13488. intel_unregister_dsm_handler();
  13489. intel_fbc_global_disable(dev_priv);
  13490. /* flush any delayed tasks or pending work */
  13491. flush_scheduled_work();
  13492. /* destroy the backlight and sysfs files before encoders/connectors */
  13493. for_each_intel_connector(dev, connector)
  13494. connector->unregister(connector);
  13495. drm_mode_config_cleanup(dev);
  13496. intel_cleanup_overlay(dev);
  13497. intel_cleanup_gt_powersave(dev);
  13498. intel_teardown_gmbus(dev);
  13499. }
  13500. /*
  13501. * Return which encoder is currently attached for connector.
  13502. */
  13503. struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
  13504. {
  13505. return &intel_attached_encoder(connector)->base;
  13506. }
  13507. void intel_connector_attach_encoder(struct intel_connector *connector,
  13508. struct intel_encoder *encoder)
  13509. {
  13510. connector->encoder = encoder;
  13511. drm_mode_connector_attach_encoder(&connector->base,
  13512. &encoder->base);
  13513. }
  13514. /*
  13515. * set vga decode state - true == enable VGA decode
  13516. */
  13517. int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
  13518. {
  13519. struct drm_i915_private *dev_priv = dev->dev_private;
  13520. unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
  13521. u16 gmch_ctrl;
  13522. if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
  13523. DRM_ERROR("failed to read control word\n");
  13524. return -EIO;
  13525. }
  13526. if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
  13527. return 0;
  13528. if (state)
  13529. gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
  13530. else
  13531. gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
  13532. if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
  13533. DRM_ERROR("failed to write control word\n");
  13534. return -EIO;
  13535. }
  13536. return 0;
  13537. }
  13538. struct intel_display_error_state {
  13539. u32 power_well_driver;
  13540. int num_transcoders;
  13541. struct intel_cursor_error_state {
  13542. u32 control;
  13543. u32 position;
  13544. u32 base;
  13545. u32 size;
  13546. } cursor[I915_MAX_PIPES];
  13547. struct intel_pipe_error_state {
  13548. bool power_domain_on;
  13549. u32 source;
  13550. u32 stat;
  13551. } pipe[I915_MAX_PIPES];
  13552. struct intel_plane_error_state {
  13553. u32 control;
  13554. u32 stride;
  13555. u32 size;
  13556. u32 pos;
  13557. u32 addr;
  13558. u32 surface;
  13559. u32 tile_offset;
  13560. } plane[I915_MAX_PIPES];
  13561. struct intel_transcoder_error_state {
  13562. bool power_domain_on;
  13563. enum transcoder cpu_transcoder;
  13564. u32 conf;
  13565. u32 htotal;
  13566. u32 hblank;
  13567. u32 hsync;
  13568. u32 vtotal;
  13569. u32 vblank;
  13570. u32 vsync;
  13571. } transcoder[4];
  13572. };
  13573. struct intel_display_error_state *
  13574. intel_display_capture_error_state(struct drm_device *dev)
  13575. {
  13576. struct drm_i915_private *dev_priv = dev->dev_private;
  13577. struct intel_display_error_state *error;
  13578. int transcoders[] = {
  13579. TRANSCODER_A,
  13580. TRANSCODER_B,
  13581. TRANSCODER_C,
  13582. TRANSCODER_EDP,
  13583. };
  13584. int i;
  13585. if (INTEL_INFO(dev)->num_pipes == 0)
  13586. return NULL;
  13587. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  13588. if (error == NULL)
  13589. return NULL;
  13590. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  13591. error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  13592. for_each_pipe(dev_priv, i) {
  13593. error->pipe[i].power_domain_on =
  13594. __intel_display_power_is_enabled(dev_priv,
  13595. POWER_DOMAIN_PIPE(i));
  13596. if (!error->pipe[i].power_domain_on)
  13597. continue;
  13598. error->cursor[i].control = I915_READ(CURCNTR(i));
  13599. error->cursor[i].position = I915_READ(CURPOS(i));
  13600. error->cursor[i].base = I915_READ(CURBASE(i));
  13601. error->plane[i].control = I915_READ(DSPCNTR(i));
  13602. error->plane[i].stride = I915_READ(DSPSTRIDE(i));
  13603. if (INTEL_INFO(dev)->gen <= 3) {
  13604. error->plane[i].size = I915_READ(DSPSIZE(i));
  13605. error->plane[i].pos = I915_READ(DSPPOS(i));
  13606. }
  13607. if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
  13608. error->plane[i].addr = I915_READ(DSPADDR(i));
  13609. if (INTEL_INFO(dev)->gen >= 4) {
  13610. error->plane[i].surface = I915_READ(DSPSURF(i));
  13611. error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
  13612. }
  13613. error->pipe[i].source = I915_READ(PIPESRC(i));
  13614. if (HAS_GMCH_DISPLAY(dev))
  13615. error->pipe[i].stat = I915_READ(PIPESTAT(i));
  13616. }
  13617. /* Note: this does not include DSI transcoders. */
  13618. error->num_transcoders = INTEL_INFO(dev)->num_pipes;
  13619. if (HAS_DDI(dev_priv))
  13620. error->num_transcoders++; /* Account for eDP. */
  13621. for (i = 0; i < error->num_transcoders; i++) {
  13622. enum transcoder cpu_transcoder = transcoders[i];
  13623. error->transcoder[i].power_domain_on =
  13624. __intel_display_power_is_enabled(dev_priv,
  13625. POWER_DOMAIN_TRANSCODER(cpu_transcoder));
  13626. if (!error->transcoder[i].power_domain_on)
  13627. continue;
  13628. error->transcoder[i].cpu_transcoder = cpu_transcoder;
  13629. error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
  13630. error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
  13631. error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
  13632. error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
  13633. error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
  13634. error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
  13635. error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
  13636. }
  13637. return error;
  13638. }
  13639. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  13640. void
  13641. intel_display_print_error_state(struct drm_i915_error_state_buf *m,
  13642. struct drm_device *dev,
  13643. struct intel_display_error_state *error)
  13644. {
  13645. struct drm_i915_private *dev_priv = dev->dev_private;
  13646. int i;
  13647. if (!error)
  13648. return;
  13649. err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
  13650. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  13651. err_printf(m, "PWR_WELL_CTL2: %08x\n",
  13652. error->power_well_driver);
  13653. for_each_pipe(dev_priv, i) {
  13654. err_printf(m, "Pipe [%d]:\n", i);
  13655. err_printf(m, " Power: %s\n",
  13656. onoff(error->pipe[i].power_domain_on));
  13657. err_printf(m, " SRC: %08x\n", error->pipe[i].source);
  13658. err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
  13659. err_printf(m, "Plane [%d]:\n", i);
  13660. err_printf(m, " CNTR: %08x\n", error->plane[i].control);
  13661. err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
  13662. if (INTEL_INFO(dev)->gen <= 3) {
  13663. err_printf(m, " SIZE: %08x\n", error->plane[i].size);
  13664. err_printf(m, " POS: %08x\n", error->plane[i].pos);
  13665. }
  13666. if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
  13667. err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
  13668. if (INTEL_INFO(dev)->gen >= 4) {
  13669. err_printf(m, " SURF: %08x\n", error->plane[i].surface);
  13670. err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
  13671. }
  13672. err_printf(m, "Cursor [%d]:\n", i);
  13673. err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
  13674. err_printf(m, " POS: %08x\n", error->cursor[i].position);
  13675. err_printf(m, " BASE: %08x\n", error->cursor[i].base);
  13676. }
  13677. for (i = 0; i < error->num_transcoders; i++) {
  13678. err_printf(m, "CPU transcoder: %s\n",
  13679. transcoder_name(error->transcoder[i].cpu_transcoder));
  13680. err_printf(m, " Power: %s\n",
  13681. onoff(error->transcoder[i].power_domain_on));
  13682. err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
  13683. err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
  13684. err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
  13685. err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
  13686. err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
  13687. err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
  13688. err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
  13689. }
  13690. }